add_definitions(-DEXCLUDE_MIF_SUPPORT -DEXCLUDE_PNM_SUPPORT -DEXCLUDE_BMP_SUPPORT -DEXCLUDE_RAS_SUPPORT -DEXCLUDE_JPG_SUPPORT -DEXCLUDE_PGX_SUPPORT)
-# List of C++ files:
ocv_include_directories(${CMAKE_CURRENT_SOURCE_DIR})
-# The .cpp files:
file(GLOB lib_srcs *.c)
file(GLOB lib_hdrs *.h)
file(GLOB lib_ext_hdrs jasper/*.h)
# ----------------------------------------------------------------------------------
-# Define the library target:
+# Define the library target:
# ----------------------------------------------------------------------------------
add_library(${JASPER_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs} ${lib_ext_hdrs})
if(MSVC)
- if(NOT ENABLE_NOISY_WARNINGS)
- string(REPLACE "/W3" "/W0" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
- string(REPLACE "/W4" "/W0" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
- endif()
add_definitions(-DJAS_WIN_MSVC_BUILD)
endif()
-if(CMAKE_COMPILER_IS_GNUCXX)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-uninitialized")
-endif()
+ocv_warnings_disable(CMAKE_C_FLAGS -Wno-implicit-function-declaration -Wno-uninitialized -Wmissing-prototypes -Wmissing-declarations -Wunused -Wshadow
+ /wd4013 /wd4018 /wd4715 /wd4244 /wd4101 /wd4267)
if(UNIX)
if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC)
endif()
endif()
-if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-implicit-function-declaration -Wno-unused")
-endif()
-
set_target_properties(${JASPER_LIBRARY}
- PROPERTIES
- OUTPUT_NAME ${JASPER_LIBRARY}
- DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
- ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}"
- )
-
+ PROPERTIES
+ OUTPUT_NAME ${JASPER_LIBRARY}
+ DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
+ ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}"
+ )
+
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${JASPER_LIBRARY} PROPERTIES FOLDER "3rdparty")
-endif()
-
+endif()
+
if(NOT BUILD_SHARED_LIBS)
install(TARGETS ${JASPER_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main)
endif()
# ----------------------------------------------------------------------------
project(${JPEG_LIBRARY})
-# List of C++ files:
-
ocv_include_directories(${CMAKE_CURRENT_SOURCE_DIR})
-# The .cpp files:
file(GLOB lib_srcs *.c)
file(GLOB lib_hdrs *.h)
# ----------------------------------------------------------------------------------
-# Define the library target:
+# Define the library target:
# ----------------------------------------------------------------------------------
add_library(${JPEG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
-if(MSVC)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W3")
-endif()
-
if(UNIX)
if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
set_source_files_properties(jcdctmgr.c PROPERTIES COMPILE_FLAGS "-O1")
endif()
+ocv_warnings_disable(CMAKE_C_FLAGS -Wcast-align -Wshadow)
+
set_target_properties(${JPEG_LIBRARY}
- PROPERTIES OUTPUT_NAME ${JPEG_LIBRARY}
- DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
- ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}
- )
-
+ PROPERTIES OUTPUT_NAME ${JPEG_LIBRARY}
+ DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
+ ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}
+ )
+
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${JPEG_LIBRARY} PROPERTIES FOLDER "3rdparty")
-endif()
-
+endif()
+
if(NOT BUILD_SHARED_LIBS)
install(TARGETS ${JPEG_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main)
endif()
# ----------------------------------------------------------------------------
project(${PNG_LIBRARY})
-# List of C++ files:
-
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" ${ZLIB_INCLUDE_DIR})
file(GLOB lib_srcs *.c)
file(GLOB lib_hdrs *.h)
# ----------------------------------------------------------------------------------
-# Define the library target:
+# Define the library target:
# ----------------------------------------------------------------------------------
add_library(${PNG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
-if(MSVC)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W3")
-endif()
-
if(UNIX)
if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
endif()
endif()
+ocv_warnings_disable(CMAKE_C_FLAGS -Wcast-align)
+
set_target_properties(${PNG_LIBRARY}
- PROPERTIES OUTPUT_NAME ${PNG_LIBRARY}
- DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
- ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}"
- )
-
+ PROPERTIES OUTPUT_NAME ${PNG_LIBRARY}
+ DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
+ ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}"
+ )
+
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${PNG_LIBRARY} PROPERTIES FOLDER "3rdparty")
endif()
-
+
if(NOT BUILD_SHARED_LIBS)
install(TARGETS ${PNG_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main)
endif()
ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" ${ZLIB_INCLUDE_DIR})
-# List of C++ files:
set(lib_srcs
tif_aux.c
tif_close.c
list(APPEND lib_srcs tif_win32.c)
endif(WIN32)
-if(MSVC AND NOT ENABLE_NOISY_WARNINGS)
- string(REPLACE "/W4" "/W0" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
- string(REPLACE "/W4" "/W0" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
-endif()
+ocv_warnings_disable(CMAKE_C_FLAGS -Wno-unused-but-set-variable -Wmissing-prototypes -Wmissing-declarations -Wundef
+ -Wcast-align -Wshadow -Wno-maybe-uninitialized -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast)
+ocv_warnings_disable(CMAKE_CXX_FLAGS -Wmissing-declarations /wd4100 /wd4244 /wd4706 /wd4127 /wd4701 /wd4018 /wd4267 /wd4306 /wd4305 /wd4312 /wd4311)
if(UNIX AND (CMAKE_COMPILER_IS_GNUCXX OR CV_ICC))
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
target_link_libraries(${TIFF_LIBRARY} ${ZLIB_LIBRARIES})
set_target_properties(${TIFF_LIBRARY}
- PROPERTIES
- OUTPUT_NAME "${TIFF_LIBRARY}"
- DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
- ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}"
- )
-
+ PROPERTIES
+ OUTPUT_NAME "${TIFF_LIBRARY}"
+ DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
+ ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}"
+ )
+
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${TIFF_LIBRARY} PROPERTIES FOLDER "3rdparty")
-endif()
+endif()
if(NOT BUILD_SHARED_LIBS)
install(TARGETS ${TIFF_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main)
/* Signed 64-bit type formatter */
/* Unsigned 64-bit type formatter */
-#ifdef _MSC_VER
+#if defined _MSC_VER || defined __MINGW__ || defined __MINGW32__
# define TIFF_UINT64_FORMAT "%I64u"
# define TIFF_SSIZE_FORMAT "%Iu"
#else
file(REMOVE "${tbb_tarball}")
message(FATAL_ERROR "Downloaded TBB source tarball has invalid MD5 hash: ${tbb_local_md5} (expected: ${tbb_md5})")
endif()
-
+
if(EXISTS "${tbb_src_dir}")
file(REMOVE_RECURSE "${tbb_src_dir}")
endif()
add_library(tbb STATIC ${lib_srcs} ${lib_hdrs} "${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h" "${CMAKE_CURRENT_SOURCE_DIR}/${tbb_version_file}")
target_link_libraries(tbb c m dl)
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -include \"${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h\"")
+ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations)
+string(REPLACE "-Werror=non-virtual-dtor" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -include \"${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h\"")
set_target_properties(tbb
- PROPERTIES OUTPUT_NAME tbb
- DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
- ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}"
- )
-
+ PROPERTIES OUTPUT_NAME tbb
+ DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
+ ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}"
+ )
+
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(tbb PROPERTIES FOLDER "3rdparty")
endif()
-
+
if(NOT BUILD_SHARED_LIBS)
install(TARGETS tbb ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main)
endif()
"TBB: BUILD_GLIBC Unknown" ENDL \
"TBB: BUILD_LD Unknown" ENDL \
"TBB: BUILD_TARGET Unknown" ENDL \
-"TBB: BUILD_COMMAND use cv::getBuildInformation() for details" ENDL \
+"TBB: BUILD_COMMAND use cv::getBuildInformation() for details" ENDL
#N": BUILD_GLIBC Unknown" ENDL \
#N": BUILD_LD Unknown" ENDL \
#N": BUILD_TARGET Unknown" ENDL \
-#N": BUILD_COMMAND use cv::getBuildInformation() for details" ENDL \
+#N": BUILD_COMMAND use cv::getBuildInformation() for details" ENDL
project(${ZLIB_LIBRARY} C)
-include(CheckTypeSize)
include(CheckFunctionExists)
include(CheckIncludeFile)
include(CheckCSourceCompiles)
#
-# Check to see if we have large file support
-#
-check_type_size(off64_t OFF64_T)
-if(HAVE_OFF64_T)
- add_definitions(-D_LARGEFILE64_SOURCE=1)
-endif()
-
-#
# Check for fseeko
#
check_function_exists(fseeko HAVE_FSEEKO)
endif()
endif()
-if(MSVC AND NOT ENABLE_NOISY_WARNINGS)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4013")
-endif()
+ocv_warnings_disable(CMAKE_C_FLAGS /wd4013 -Wattributes -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations)
set_target_properties(${ZLIB_LIBRARY} PROPERTIES
OUTPUT_NAME ${ZLIB_LIBRARY}
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${ZLIB_LIBRARY} PROPERTIES FOLDER "3rdparty")
endif()
-
+
if(NOT BUILD_SHARED_LIBS)
install(TARGETS ${ZLIB_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main)
endif()
* both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as
* equivalently requesting no 64-bit operations
*/
-#if -_LARGEFILE64_SOURCE - -1 == 1
+#if defined _LARGEFILE64_SOURCE && -_LARGEFILE64_SOURCE - -1 == 1
# undef _LARGEFILE64_SOURCE
#endif
+#ifndef _LFS64_LARGEFILE
+# define _LFS64_LARGEFILE 0
+#endif
+
+#ifndef _FILE_OFFSET_BITS
+# define _FILE_OFFSET_BITS 0
+#endif
+
#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0
# define Z_LARGE
#endif
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
+OCV_OPTION(OPENCV_CAN_BREAK_BINARY_COMPATIBILITY "Allow changes breaking binary compatibility with OpenCV 2.4.0" OFF )
+
# uncategorized options
# ===================================================
OCV_OPTION(CMAKE_VERBOSE "Verbose mode" OFF )
# ----------------------------------------------------------------------------
# OpenCV compiler and linker options
# ----------------------------------------------------------------------------
-include(cmake/OpenCVCompilerOptions.cmake REQUIRED)
-
# In case of Makefiles if the user does not setup CMAKE_BUILD_TYPE, assume it's Release:
if(CMAKE_GENERATOR MATCHES "Makefiles|Ninja" AND "${CMAKE_BUILD_TYPE}" STREQUAL "")
set(CMAKE_BUILD_TYPE Release)
endif()
+include(cmake/OpenCVCompilerOptions.cmake REQUIRED)
+
# ----------------------------------------------------------------------------
# Use statically or dynamically linked CRT?
endif()
endif()
+#
+# Check to see if we have large file support (needed by zlib)
+#
+include(CheckTypeSize)
+check_type_size(off64_t OFF64_T)
+if(HAVE_OFF64_T)
+ add_definitions(-D_LARGEFILE64_SOURCE=1)
+endif()
+
include(cmake/OpenCVPCHSupport.cmake REQUIRED)
include(cmake/OpenCVModule.cmake REQUIRED)
status(" Linker flags (Release):" ${CMAKE_SHARED_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS_RELEASE})
status(" Linker flags (Debug):" ${CMAKE_SHARED_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS_DEBUG})
endif()
+status(" Precompiled headers:" PCHSupport_FOUND AND ENABLE_PRECOMPILED_HEADERS THEN YES ELSE NO)
# ========================== OpenCV modules ==========================
status("")
if(TIFF_VERSION_STRING AND TIFF_FOUND)
status(" TIFF:" "${TIFF_LIBRARY} (ver ${TIFF_VERSION} - ${TIFF_VERSION_STRING})")
else()
- status(" TIFF:" TIFF_FOUND THEN "${TIFF_LIBRARY} (ver ${TIFF_VERSION})" ELSE "build (ver ${TIFF_VERSION})")
+ status(" TIFF:" TIFF_FOUND THEN "${TIFF_LIBRARY} (ver ${TIFF_VERSION})" ELSE "build (ver ${TIFF_VERSION} - ${TIFF_VERSION_STRING})")
endif()
else()
status(" TIFF:" "NO")
#ifndef __CVCOMMON_H_
#define __CVCOMMON_H_
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
#include "cxcore.h"
#include "cv.h"
#include "cxmisc.h"
( *( (float*) (aux->data + ((int) (idx1)) * aux->step ) ) < \
*( (float*) (aux->data + ((int) (idx2)) * aux->step ) ) )
-CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_16s, short, CMP_VALUES, CvValArray* )
+static CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_16s, short, CMP_VALUES, CvValArray* )
-CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_32s, int, CMP_VALUES, CvValArray* )
+static CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_32s, int, CMP_VALUES, CvValArray* )
-CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_32f, float, CMP_VALUES, CvValArray* )
+static CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_32f, float, CMP_VALUES, CvValArray* )
CV_BOOST_IMPL
void cvGetSortedIndices( CvMat* val, CvMat* idx, int sortcols )
assert( classifier != NULL );
assert( sample != NULL );
assert( CV_MAT_TYPE( sample->type ) == CV_32FC1 );
-
+
if( (CV_MAT_ELEM( (*sample), float, 0,
((CvStumpClassifier*) classifier)->compidx )) <
- ((CvStumpClassifier*) classifier)->threshold )
+ ((CvStumpClassifier*) classifier)->threshold )
return ((CvStumpClassifier*) classifier)->left;
return ((CvStumpClassifier*) classifier)->right;
}
#define ICV_DEF_FIND_STUMP_THRESHOLD( suffix, type, error ) \
-CV_BOOST_IMPL int icvFindStumpThreshold_##suffix( \
+static int icvFindStumpThreshold_##suffix( \
uchar* data, size_t datastep, \
uchar* wdata, size_t wstep, \
uchar* ydata, size_t ystep, \
float* curval = NULL; \
float curlerror = 0.0F; \
float currerror = 0.0F; \
- float wposl; \
- float wposr; \
\
int i = 0; \
int idx = 0; \
\
- wposl = wposr = 0.0F; \
if( *sumw == FLT_MAX ) \
{ \
/* calculate sums */ \
*/
#define ICV_DEF_FIND_STUMP_THRESHOLD_MISC( suffix, type ) \
ICV_DEF_FIND_STUMP_THRESHOLD( misc_##suffix, type, \
- wposl = 0.5F * ( wl + wyl ); \
- wposr = 0.5F * ( wr + wyr ); \
+ float wposl = 0.5F * ( wl + wyl ); \
+ float wposr = 0.5F * ( wr + wyr ); \
curleft = 0.5F * ( 1.0F + curleft ); \
curright = 0.5F * ( 1.0F + curright ); \
curlerror = MIN( wposl, wl - wposl ); \
*/
#define ICV_DEF_FIND_STUMP_THRESHOLD_GINI( suffix, type ) \
ICV_DEF_FIND_STUMP_THRESHOLD( gini_##suffix, type, \
- wposl = 0.5F * ( wl + wyl ); \
- wposr = 0.5F * ( wr + wyr ); \
+ float wposl = 0.5F * ( wl + wyl ); \
+ float wposr = 0.5F * ( wr + wyr ); \
curleft = 0.5F * ( 1.0F + curleft ); \
curright = 0.5F * ( 1.0F + curright ); \
curlerror = 2.0F * wposl * ( 1.0F - curleft ); \
*/
#define ICV_DEF_FIND_STUMP_THRESHOLD_ENTROPY( suffix, type ) \
ICV_DEF_FIND_STUMP_THRESHOLD( entropy_##suffix, type, \
- wposl = 0.5F * ( wl + wyl ); \
- wposr = 0.5F * ( wr + wyr ); \
+ float wposl = 0.5F * ( wl + wyl ); \
+ float wposr = 0.5F * ( wr + wyr ); \
curleft = 0.5F * ( 1.0F + curleft ); \
curright = 0.5F * ( 1.0F + curright ); \
curlerror = currerror = 0.0F; \
int ystep = 0;
uchar* idxdata = NULL;
int idxstep = 0;
- int l = 0; /* number of indices */
+ int l = 0; /* number of indices */
uchar* wdata = NULL;
int wstep = 0;
int* idx = NULL;
int i = 0;
-
+
float sumw = FLT_MAX;
float sumwy = FLT_MAX;
float sumwyy = FLT_MAX;
( data + i * ((size_t) cstep), sstep,
wdata, wstep, ydata, ystep, (uchar*) idx, sizeof( int ), l,
&(stump->lerror), &(stump->rerror),
- &(stump->threshold), &(stump->left), &(stump->right),
+ &(stump->threshold), &(stump->left), &(stump->right),
&sumw, &sumwy, &sumwyy ) )
{
stump->compidx = i;
size_t ystep = 0;
uchar* idxdata = NULL;
size_t idxstep = 0;
- int l = 0; /* number of indices */
+ int l = 0; /* number of indices */
uchar* wdata = NULL;
size_t wstep = 0;
char* filter = NULL;
int i = 0;
-
+
int compidx = 0;
int stumperror;
int portion;
int t_compidx;
int t_n;
-
+
int ti;
int tj;
int tk;
if( ((CvMTStumpTrainParams*) trainParams)->getTrainData != NULL )
{
n = ((CvMTStumpTrainParams*) trainParams)->numcomp;
- }
+ }
}
assert( datan <= n );
memset( (void*) stump, 0, sizeof( CvStumpClassifier ) );
portion = ((CvMTStumpTrainParams*)trainParams)->portion;
-
+
if( portion < 1 )
{
/* auto portion */
portion = n;
#ifdef _OPENMP
- portion /= omp_get_max_threads();
- #endif /* _OPENMP */
+ portion /= omp_get_max_threads();
+ #endif /* _OPENMP */
}
stump->eval = cvEvalStumpClassifier;
t_compidx = 0;
t_n = 0;
-
+
ti = 0;
tj = 0;
tk = 0;
t_idx = NULL;
mat.data.ptr = NULL;
-
+
if( datan < n )
{
/* prepare matrix for callback */
{
t_idx[ti] = ti;
}
- }
+ }
}
}
t_idx[tk++] = curidx;
}
}
- if( findStumpThreshold_32s[stumperror](
+ if( findStumpThreshold_32s[stumperror](
t_data + ti * t_cstep, t_sstep,
wdata, wstep, ydata, ystep,
(uchar*) t_idx, sizeof( int ), tk,
&lerror, &rerror,
- &threshold, &left, &right,
+ &threshold, &left, &right,
&sumw, &sumwy, &sumwyy ) )
{
optcompidx = ti;
t_idx[tk++] = curidx;
}
}
- if( findStumpThreshold_32s[stumperror](
+ if( findStumpThreshold_32s[stumperror](
t_data + ti * t_cstep, t_sstep,
wdata, wstep, ydata, ystep,
(uchar*) t_idx, sizeof( int ), tk,
&lerror, &rerror,
- &threshold, &left, &right,
+ &threshold, &left, &right,
&sumw, &sumwy, &sumwyy ) )
{
optcompidx = ti;
t_idx[tk++] = curidx;
}
}
- if( findStumpThreshold_32s[stumperror](
+ if( findStumpThreshold_32s[stumperror](
t_data + ti * t_cstep, t_sstep,
wdata, wstep, ydata, ystep,
(uchar*) t_idx, sizeof( int ), tk,
&lerror, &rerror,
- &threshold, &left, &right,
+ &threshold, &left, &right,
&sumw, &sumwy, &sumwyy ) )
{
optcompidx = ti;
case CV_16SC1:
for( ti = t_compidx; ti < MIN( sortedn, t_compidx + t_n ); ti++ )
{
- if( findStumpThreshold_16s[stumperror](
+ if( findStumpThreshold_16s[stumperror](
t_data + ti * t_cstep, t_sstep,
wdata, wstep, ydata, ystep,
sorteddata + ti * sortedcstep, sortedsstep, sortedm,
&lerror, &rerror,
- &threshold, &left, &right,
+ &threshold, &left, &right,
&sumw, &sumwy, &sumwyy ) )
{
optcompidx = ti;
case CV_32SC1:
for( ti = t_compidx; ti < MIN( sortedn, t_compidx + t_n ); ti++ )
{
- if( findStumpThreshold_32s[stumperror](
+ if( findStumpThreshold_32s[stumperror](
t_data + ti * t_cstep, t_sstep,
wdata, wstep, ydata, ystep,
sorteddata + ti * sortedcstep, sortedsstep, sortedm,
&lerror, &rerror,
- &threshold, &left, &right,
+ &threshold, &left, &right,
&sumw, &sumwy, &sumwyy ) )
{
optcompidx = ti;
case CV_32FC1:
for( ti = t_compidx; ti < MIN( sortedn, t_compidx + t_n ); ti++ )
{
- if( findStumpThreshold_32f[stumperror](
+ if( findStumpThreshold_32f[stumperror](
t_data + ti * t_cstep, t_sstep,
wdata, wstep, ydata, ystep,
sorteddata + ti * sortedcstep, sortedsstep, sortedm,
&lerror, &rerror,
- &threshold, &left, &right,
+ &threshold, &left, &right,
&sumw, &sumwy, &sumwyy ) )
{
optcompidx = ti;
va.data = t_data + ti * t_cstep;
va.step = t_sstep;
icvSortIndexedValArray_32s( t_idx, l, &va );
- if( findStumpThreshold_32s[stumperror](
+ if( findStumpThreshold_32s[stumperror](
t_data + ti * t_cstep, t_sstep,
wdata, wstep, ydata, ystep,
(uchar*)t_idx, sizeof( int ), l,
&lerror, &rerror,
- &threshold, &left, &right,
+ &threshold, &left, &right,
&sumw, &sumwy, &sumwyy ) )
{
optcompidx = ti;
{
if( (CV_MAT_ELEM( (*sample), float, 0,
((CvCARTClassifier*) classifier)->compidx[idx] )) <
- ((CvCARTClassifier*) classifier)->threshold[idx] )
+ ((CvCARTClassifier*) classifier)->threshold[idx] )
{
idx = ((CvCARTClassifier*) classifier)->left[idx];
}
{
if( (CV_MAT_ELEM( (*sample), float,
((CvCARTClassifier*) classifier)->compidx[idx], 0 )) <
- ((CvCARTClassifier*) classifier)->threshold[idx] )
+ ((CvCARTClassifier*) classifier)->threshold[idx] )
{
idx = ((CvCARTClassifier*) classifier)->left[idx];
}
idx = ((CvCARTClassifier*) classifier)->right[idx];
}
} while( idx > 0 );
- }
+ }
__END__;
return ((CvCARTClassifier*) classifier)->val[-idx];
}
-CV_BOOST_IMPL
+static
float cvEvalCARTClassifierIdx( CvClassifier* classifier, CvMat* sample )
{
CV_FUNCNAME( "cvEvalCARTClassifierIdx" );
{
if( (CV_MAT_ELEM( (*sample), float, 0,
((CvCARTClassifier*) classifier)->compidx[idx] )) <
- ((CvCARTClassifier*) classifier)->threshold[idx] )
+ ((CvCARTClassifier*) classifier)->threshold[idx] )
{
idx = ((CvCARTClassifier*) classifier)->left[idx];
}
{
if( (CV_MAT_ELEM( (*sample), float,
((CvCARTClassifier*) classifier)->compidx[idx], 0 )) <
- ((CvCARTClassifier*) classifier)->threshold[idx] )
+ ((CvCARTClassifier*) classifier)->threshold[idx] )
{
idx = ((CvCARTClassifier*) classifier)->left[idx];
}
idx = ((CvCARTClassifier*) classifier)->right[idx];
}
} while( idx > 0 );
- }
+ }
__END__;
*classifier = NULL;
}
-void CV_CDECL icvDefaultSplitIdx_R( int compidx, float threshold,
+static void CV_CDECL icvDefaultSplitIdx_R( int compidx, float threshold,
CvMat* idx, CvMat** left, CvMat** right,
void* userdata )
{
}
}
-void CV_CDECL icvDefaultSplitIdx_C( int compidx, float threshold,
+static void CV_CDECL icvDefaultSplitIdx_C( int compidx, float threshold,
CvMat* idx, CvMat** left, CvMat** right,
void* userdata )
{
int count = 0;
int i = 0;
int j = 0;
-
+
CvCARTNode* intnode = NULL;
CvCARTNode* list = NULL;
int listcount = 0;
CvMat* lidx = NULL;
CvMat* ridx = NULL;
-
+
float maxerrdrop = 0.0F;
int idx = 0;
void* userdata;
count = ((CvCARTTrainParams*) trainParams)->count;
-
+
assert( count > 0 );
- datasize = sizeof( *cart ) + (sizeof( float ) + 3 * sizeof( int )) * count +
+ datasize = sizeof( *cart ) + (sizeof( float ) + 3 * sizeof( int )) * count +
sizeof( float ) * (count + 1);
-
+
cart = (CvCARTClassifier*) cvAlloc( datasize );
memset( cart, 0, datasize );
-
+
cart->count = count;
-
+
cart->eval = cvEvalCARTClassifier;
cart->save = NULL;
cart->release = cvReleaseCARTClassifier;
/* split last added node */
splitIdxCallback( intnode[i-1].stump->compidx, intnode[i-1].stump->threshold,
intnode[i-1].sampleIdx, &lidx, &ridx, userdata );
-
+
if( intnode[i-1].stump->lerror != 0.0F )
{
list[listcount].sampleIdx = lidx;
{
cvReleaseMat( &ridx );
}
-
+
if( listcount == 0 ) break;
/* find the best node to be added to the tree */
cart->count++;
cart->compidx[i] = intnode[i].stump->compidx;
cart->threshold[i] = intnode[i].stump->threshold;
-
+
/* leaves */
if( cart->left[i] <= 0 )
{
j++;
}
}
-
+
/* CLEAN UP */
for( i = 0; i < count && (intnode[i].stump != NULL); i++ )
{
list[i].stump->release( (CvClassifier**) &(list[i].stump) );
cvReleaseMat( &(list[i].sampleIdx) );
}
-
+
cvFree( &intnode );
return (CvClassifier*) cart;
* using ANY appropriate weak classifier
*/
-CV_BOOST_IMPL
+static
CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses,
CvMat* weakTrainVals,
CvMat* /*weights*/,
CV_MAT2VEC( *trainClasses, ydata, ystep, m );
CV_MAT2VEC( *weakTrainVals, traindata, trainstep, trainnum );
- assert( m == trainnum );
+ CV_Assert( m == trainnum );
idxnum = 0;
idxstep = 0;
{
CV_MAT2VEC( *sampleIdx, idxdata, idxstep, idxnum );
}
-
+
datasize = sizeof( *ptr ) + sizeof( *ptr->idx ) * idxnum;
ptr = (CvBoostTrainer*) cvAlloc( datasize );
memset( ptr, 0, datasize );
ptr->count = m;
ptr->type = type;
-
+
if( idxnum > 0 )
{
CvScalar s;
{
idx = (ptr->idx) ? ptr->idx[i] : i;
- *((float*) (traindata + idx * trainstep)) =
+ *((float*) (traindata + idx * trainstep)) =
2.0F * (*((float*) (ydata + idx * ystep))) - 1.0F;
}
* Discrete AdaBoost functions
*
*/
-CV_BOOST_IMPL
+static
float icvBoostNextWeakClassifierDAB( CvMat* weakEvalVals,
CvMat* trainClasses,
CvMat* /*weakTrainVals*/,
CV_MAT2VEC( *trainClasses, ydata, ystep, ynum );
CV_MAT2VEC( *weights, wdata, wstep, wnum );
- assert( m == ynum );
- assert( m == wnum );
+ CV_Assert( m == ynum );
+ CV_Assert( m == wnum );
sumw = 0.0F;
err = 0.0F;
sumw += *((float*) (wdata + idx*wstep));
err += (*((float*) (wdata + idx*wstep))) *
- ( (*((float*) (evaldata + idx*evalstep))) !=
+ ( (*((float*) (evaldata + idx*evalstep))) !=
2.0F * (*((float*) (ydata + idx*ystep))) - 1.0F );
}
err /= sumw;
err = -cvLogRatio( err );
-
+
for( i = 0; i < trainer->count; i++ )
{
idx = (trainer->idx) ? trainer->idx[i] : i;
- *((float*) (wdata + idx*wstep)) *= expf( err *
- ((*((float*) (evaldata + idx*evalstep))) !=
+ *((float*) (wdata + idx*wstep)) *= expf( err *
+ ((*((float*) (evaldata + idx*evalstep))) !=
2.0F * (*((float*) (ydata + idx*ystep))) - 1.0F) );
sumw += *((float*) (wdata + idx*wstep));
}
*((float*) (wdata + idx * wstep)) /= sumw;
}
-
+
return err;
}
* Real AdaBoost functions
*
*/
-CV_BOOST_IMPL
+static
float icvBoostNextWeakClassifierRAB( CvMat* weakEvalVals,
CvMat* trainClasses,
CvMat* /*weakTrainVals*/,
*((float*) (wdata + idx*wstep)) /= sumw;
}
-
+
return 1.0F;
}
#define CV_LB_PROB_THRESH 0.01F
#define CV_LB_WEIGHT_THRESHOLD 0.0001F
-CV_BOOST_IMPL
+static
void icvResponsesAndWeightsLB( int num, uchar* wdata, int wstep,
uchar* ydata, int ystep,
uchar* fdata, int fstep,
*((float*) (wdata + idx*wstep)) = MAX( p * (1.0F - p), CV_LB_WEIGHT_THRESHOLD );
if( *((float*) (ydata + idx*ystep)) == 1.0F )
{
- *((float*) (traindata + idx*trainstep)) =
+ *((float*) (traindata + idx*trainstep)) =
1.0F / (MAX( p, CV_LB_PROB_THRESH ));
}
else
{
- *((float*) (traindata + idx*trainstep)) =
+ *((float*) (traindata + idx*trainstep)) =
-1.0F / (MAX( 1.0F - p, CV_LB_PROB_THRESH ));
}
}
}
-CV_BOOST_IMPL
+static
CvBoostTrainer* icvBoostStartTrainingLB( CvMat* trainClasses,
CvMat* weakTrainVals,
CvMat* weights,
CV_MAT2VEC( *weakTrainVals, traindata, trainstep, trainnum );
CV_MAT2VEC( *weights, wdata, wstep, wnum );
- assert( m == trainnum );
- assert( m == wnum );
+ CV_Assert( m == trainnum );
+ CV_Assert( m == wnum );
idxnum = 0;
{
CV_MAT2VEC( *sampleIdx, idxdata, idxstep, idxnum );
}
-
+
datasize = sizeof( *ptr ) + sizeof( *ptr->F ) * m + sizeof( *ptr->idx ) * idxnum;
ptr = (CvBoostTrainer*) cvAlloc( datasize );
memset( ptr, 0, datasize );
ptr->count = m;
ptr->type = type;
-
+
if( idxnum > 0 )
{
CvScalar s;
return ptr;
}
-CV_BOOST_IMPL
+static
float icvBoostNextWeakClassifierLB( CvMat* weakEvalVals,
CvMat* trainClasses,
CvMat* weakTrainVals,
CV_MAT2VEC( *weakTrainVals, traindata, trainstep, trainnum );
CV_MAT2VEC( *weights, wdata, wstep, wnum );
- assert( m == ynum );
- assert( m == wnum );
- assert( m == trainnum );
+ CV_Assert( m == ynum );
+ CV_Assert( m == wnum );
+ CV_Assert( m == trainnum );
//assert( m == trainer->count );
for( i = 0; i < trainer->count; i++ )
trainer->F[idx] += *((float*) (evaldata + idx * evalstep));
}
-
+
icvResponsesAndWeightsLB( trainer->count, wdata, wstep, ydata, ystep,
(uchar*) trainer->F, sizeof( *trainer->F ),
traindata, trainstep, trainer->idx );
* Gentle AdaBoost
*
*/
-CV_BOOST_IMPL
+static
float icvBoostNextWeakClassifierGAB( CvMat* weakEvalVals,
CvMat* trainClasses,
CvMat* /*weakTrainVals*/,
CV_MAT2VEC( *trainClasses, ydata, ystep, ynum );
CV_MAT2VEC( *weights, wdata, wstep, wnum );
- assert( m == ynum );
- assert( m == wnum );
+ CV_Assert( m == ynum );
+ CV_Assert( m == wnum );
sumw = 0.0F;
for( i = 0; i < trainer->count; i++ )
{
idx = (trainer->idx) ? trainer->idx[i] : i;
- *((float*) (wdata + idx*wstep)) *=
+ *((float*) (wdata + idx*wstep)) *=
expf( -(*((float*) (evaldata + idx*evalstep)))
* ( 2.0F * (*((float*) (ydata + idx*ystep))) - 1.0F ) );
sumw += *((float*) (wdata + idx*wstep));
}
-
+
for( i = 0; i < trainer->count; i++ )
{
idx = (trainer->idx) ? trainer->idx[i] : i;
typedef struct CvBtTrainer
{
- /* {{ external */
+ /* {{ external */
CvMat* trainData;
int flags;
-
+
CvMat* trainClasses;
int m;
uchar* ydata;
CvMat* sampleIdx;
int numsamples;
-
+
float param[2];
CvBoostType type;
int numclasses;
typedef void (*CvZeroApproxFunc)( float* approx, CvBtTrainer* trainer );
/* Mean zero approximation */
-void icvZeroApproxMean( float* approx, CvBtTrainer* trainer )
+static void icvZeroApproxMean( float* approx, CvBtTrainer* trainer )
{
int i;
int idx;
/*
* Median zero approximation
*/
-void icvZeroApproxMed( float* approx, CvBtTrainer* trainer )
+static void icvZeroApproxMed( float* approx, CvBtTrainer* trainer )
{
int i;
int idx;
idx = icvGetIdxAt( trainer->sampleIdx, i );
trainer->f[i] = *((float*) (trainer->ydata + idx * trainer->ystep));
}
-
+
icvSort_32f( trainer->f, trainer->numsamples, 0 );
approx[0] = trainer->f[trainer->numsamples / 2];
}
/*
* 0.5 * log( mean(y) / (1 - mean(y)) ) where y in {0, 1}
*/
-void icvZeroApproxLog( float* approx, CvBtTrainer* trainer )
+static void icvZeroApproxLog( float* approx, CvBtTrainer* trainer )
{
float y_mean;
/*
* 0 zero approximation
*/
-void icvZeroApprox0( float* approx, CvBtTrainer* trainer )
+static void icvZeroApprox0( float* approx, CvBtTrainer* trainer )
{
int i;
CV_BOOST_IMPL
void cvBtNext( CvCARTClassifier** trees, CvBtTrainer* trainer );
-CV_BOOST_IMPL
+static
CvBtTrainer* cvBtStart( CvCARTClassifier** trees,
CvMat* trainData,
int flags,
float* zero_approx;
int m;
int i, j;
-
+
if( trees == NULL )
{
CV_ERROR( CV_StsNullPtr, "Invalid trees parameter" );
}
-
- if( type < CV_DABCLASS || type > CV_MREG )
+
+ if( type < CV_DABCLASS || type > CV_MREG )
{
CV_ERROR( CV_StsUnsupportedFormat, "Unsupported type parameter" );
}
ptr->flags = flags;
ptr->trainClasses = trainClasses;
CV_MAT2VEC( *trainClasses, ptr->ydata, ptr->ystep, ptr->m );
-
+
memset( &(ptr->cartParams), 0, sizeof( ptr->cartParams ) );
memset( &(ptr->stumpParams), 0, sizeof( ptr->stumpParams ) );
ptr->sampleIdx = sampleIdx;
ptr->numsamples = ( sampleIdx == NULL ) ? ptr->m
: MAX( sampleIdx->rows, sampleIdx->cols );
-
+
ptr->weights = cvCreateMat( 1, m, CV_32FC1 );
- cvSet( ptr->weights, cvScalar( 1.0 ) );
-
+ cvSet( ptr->weights, cvScalar( 1.0 ) );
+
if( type <= CV_GABCLASS )
{
ptr->boosttrainer = cvBoostStartTraining( ptr->trainClasses, ptr->y,
{
trees[i]->val[j] += zero_approx[i];
}
- }
+ }
CV_CALL( cvFree( &zero_approx ) );
}
return ptr;
}
-void icvBtNext_LSREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_LSREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
{
int i;
/* yhat_i = y_i - F_(m-1)(x_i) */
for( i = 0; i < trainer->m; i++ )
{
- trainer->y->data.fl[i] =
+ trainer->y->data.fl[i] =
*((float*) (trainer->ydata + i * trainer->ystep)) - trainer->f[i];
}
}
-void icvBtNext_LADREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_LADREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
{
CvCARTClassifier* ptr;
int i, j;
int sample_step;
uchar* sample_data;
int index;
-
+
int data_size;
int* idx;
float* resp;
cvFree( &idx );
cvFree( &resp );
-
+
trees[0] = ptr;
}
-void icvBtNext_MREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_MREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
{
CvCARTClassifier* ptr;
int i, j;
CvMat sample;
int sample_step;
uchar* sample_data;
-
+
int data_size;
int* idx;
float* resid;
/* for delta */
resp[i] = (float) fabs( resid[index] );
}
-
+
/* delta = quantile_alpha{abs(resid_i)} */
icvSort_32f( resp, trainer->numsamples, 0 );
delta = resp[(int)(trainer->param[1] * (trainer->numsamples - 1))];
trainer->y->data.fl[index] = MIN( delta, ((float) fabs( resid[index] )) ) *
CV_SIGN( resid[index] );
}
-
+
ptr = (CvCARTClassifier*) cvCreateCARTClassifier( trainer->trainData, trainer->flags,
trainer->y, NULL, NULL, NULL, trainer->sampleIdx, trainer->weights,
(CvClassifierTrainParams*) &trainer->cartParams );
/* rhat = median(y_i - F_(m-1)(x_i)) */
icvSort_32f( resp, respnum, 0 );
rhat = resp[respnum / 2];
-
+
/* val = sum{sign(r_i - rhat_i) * min(delta, abs(r_i - rhat_i)}
* r_i = y_i - F_(m-1)(x_i)
*/
cvFree( &resid );
cvFree( &resp );
cvFree( &idx );
-
+
trees[0] = ptr;
}
#define CV_LOG_VAL_MAX 18.0
-void icvBtNext_L2CLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_L2CLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
{
CvCARTClassifier* ptr;
int i, j;
CvMat sample;
int sample_step;
uchar* sample_data;
-
+
int data_size;
int* idx;
int respnum;
weights = (float*) cvAlloc( data_size );
data_size = trainer->m * sizeof( *sorted_weights );
sorted_weights = (float*) cvAlloc( data_size );
-
+
/* yhat_i = (4 * y_i - 2) / ( 1 + exp( (4 * y_i - 2) * F_(m-1)(x_i) ) ).
* y_i in {0, 1}
*/
sorted_weights[i] = weights[index];
sum_weights += sorted_weights[i];
}
-
+
trimmed_idx = NULL;
sample_idx = trainer->sampleIdx;
trimmed_num = trainer->numsamples;
if( trainer->param[1] < 1.0F )
{
/* perform weight trimming */
-
+
float threshold;
int count;
-
+
icvSort_32f( sorted_weights, trainer->numsamples, 0 );
sum_weights *= (1.0F - trainer->param[1]);
-
+
i = -1;
do { sum_weights -= sorted_weights[++i]; }
while( sum_weights > 0.0F && i < (trainer->numsamples - 1) );
-
+
threshold = sorted_weights[i];
while( i > 0 && sorted_weights[i-1] == threshold ) i--;
if( i > 0 )
{
- trimmed_num = trainer->numsamples - i;
+ trimmed_num = trainer->numsamples - i;
trimmed_idx = cvCreateMat( 1, trimmed_num, CV_32FC1 );
count = 0;
for( i = 0; i < trainer->numsamples; i++ )
count++;
}
}
-
+
assert( count == trimmed_num );
sample_idx = trimmed_idx;
- printf( "Used samples %%: %g\n",
+ printf( "Used samples %%: %g\n",
(float) trimmed_num / (float) trainer->numsamples * 100.0F );
}
}
}
ptr->val[j] = val;
}
-
+
if( trimmed_idx != NULL ) cvReleaseMat( &trimmed_idx );
cvFree( &sorted_weights );
cvFree( &weights );
cvFree( &idx );
-
+
trees[0] = ptr;
}
-void icvBtNext_LKCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_LKCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
{
int i, j, k, kk, num;
CvMat sample;
int sample_step;
uchar* sample_data;
-
+
int data_size;
int* idx;
int respnum;
sum_exp_f += exp_f;
}
- val = (float) ( (*((float*) (trainer->ydata + index * trainer->ystep)))
+ val = (float) ( (*((float*) (trainer->ydata + index * trainer->ystep)))
== (float) k );
val -= (float) ( (sum_exp_f == CV_VAL_MAX) ? 0.0 : ( 1.0 / sum_exp_f ) );
if( trainer->param[1] < 1.0F )
{
/* perform weight trimming */
-
+
float threshold;
int count;
-
+
icvSort_32f( sorted_weights, trainer->numsamples, 0 );
sum_weights *= (1.0F - trainer->param[1]);
-
+
i = -1;
do { sum_weights -= sorted_weights[++i]; }
while( sum_weights > 0.0F && i < (trainer->numsamples - 1) );
-
+
threshold = sorted_weights[i];
while( i > 0 && sorted_weights[i-1] == threshold ) i--;
if( i > 0 )
{
- trimmed_num = trainer->numsamples - i;
+ trimmed_num = trainer->numsamples - i;
trimmed_idx->cols = trimmed_num;
count = 0;
for( i = 0; i < trainer->numsamples; i++ )
count++;
}
}
-
+
assert( count == trimmed_num );
sample_idx = trimmed_idx;
- printf( "k: %d Used samples %%: %g\n", k,
+ printf( "k: %d Used samples %%: %g\n", k,
(float) trimmed_num / (float) trainer->numsamples * 100.0F );
}
} /* weight trimming */
trees[k]->val[j] = val;
}
} /* for each class */
-
+
cvReleaseMat( &trimmed_idx );
cvFree( &sorted_weights );
cvFree( &weights );
}
-void icvBtNext_XXBCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_XXBCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
{
float alpha;
int i;
num_samples = ( sample_idx == NULL )
? trainer->m : MAX( sample_idx->rows, sample_idx->cols );
- printf( "Used samples %%: %g\n",
+ printf( "Used samples %%: %g\n",
(float) num_samples / (float) trainer->numsamples * 100.0F );
trees[0] = (CvCARTClassifier*) cvCreateCARTClassifier( trainer->trainData,
trainer->flags, trainer->y, NULL, NULL, NULL,
sample_idx, trainer->weights,
(CvClassifierTrainParams*) &trainer->cartParams );
-
+
/* evaluate samples */
CV_GET_SAMPLE( *trainer->trainData, trainer->flags, 0, sample );
CV_GET_SAMPLE_STEP( *trainer->trainData, trainer->flags, sample_step );
sample_data = sample.data.ptr;
-
+
for( i = 0; i < trainer->m; i++ )
{
sample.data.ptr = sample_data + i * sample_step;
alpha = cvBoostNextWeakClassifier( weak_eval_vals, trainer->trainClasses,
trainer->y, trainer->weights, trainer->boosttrainer );
-
+
/* multiply tree by alpha */
for( i = 0; i <= trees[0]->count; i++ )
{
trees[0]->val[i] = cvLogRatio( trees[0]->val[i] );
}
}
-
+
if( sample_idx != NULL && sample_idx != trainer->sampleIdx )
{
cvReleaseMat( &sample_idx );
int sample_step;
uchar* sample_data;
- icvBtNextFunc[trainer->type]( trees, trainer );
+ icvBtNextFunc[trainer->type]( trees, trainer );
/* shrinkage */
if( trainer->param[0] != 1.0F )
index = icvGetIdxAt( trainer->sampleIdx, i );
sample.data.ptr = sample_data + index * sample_step;
for( j = 0; j < trainer->numclasses; j++ )
- {
- trainer->f[index * trainer->numclasses + j] +=
+ {
+ trainer->f[index * trainer->numclasses + j] +=
trees[j]->eval( (CvClassifier*) (trees[j]), &sample );
}
}
}
}
-CV_BOOST_IMPL
+static
void cvBtEnd( CvBtTrainer** trainer )
{
CV_FUNCNAME( "cvBtEnd" );
-
+
__BEGIN__;
-
+
if( trainer == NULL || (*trainer) == NULL )
{
CV_ERROR( CV_StsNullPtr, "Invalid trainer parameter" );
}
-
+
if( (*trainer)->y != NULL )
{
CV_CALL( cvReleaseMat( &((*trainer)->y) ) );
* Boosted tree model as a classifier *
\****************************************************************************************/
-CV_BOOST_IMPL
+static
float cvEvalBtClassifier( CvClassifier* classifier, CvMat* sample )
{
float val;
CV_FUNCNAME( "cvEvalBtClassifier" );
__BEGIN__;
-
+
int i;
val = 0.0F;
return val;
}
-CV_BOOST_IMPL
+static
float cvEvalBtClassifier2( CvClassifier* classifier, CvMat* sample )
{
float val;
CV_FUNCNAME( "cvEvalBtClassifier2" );
__BEGIN__;
-
+
CV_CALL( val = cvEvalBtClassifier( classifier, sample ) );
__END__;
return (float) (val >= 0.0F);
}
-CV_BOOST_IMPL
+static
float cvEvalBtClassifierK( CvClassifier* classifier, CvMat* sample )
{
int cls = 0;
CV_FUNCNAME( "cvEvalBtClassifierK" );
__BEGIN__;
-
+
int i, k;
float max_val;
int numclasses;
cvEvalBtClassifier
};
-CV_BOOST_IMPL
+static
int cvSaveBtClassifier( CvClassifier* classifier, const char* filename )
{
CV_FUNCNAME( "cvSaveBtClassifier" );
CV_ASSERT( classifier );
CV_ASSERT( filename );
-
+
if( !icvMkDir( filename ) || (file = fopen( filename, "w" )) == 0 )
{
CV_ERROR( CV_StsError, "Unable to create file" );
((CvBtClassifier*) classifier)->numclasses,
((CvBtClassifier*) classifier)->numfeatures,
((CvBtClassifier*) classifier)->numiter );
-
+
for( i = 0; i < ((CvBtClassifier*) classifier)->numclasses *
((CvBtClassifier*) classifier)->numiter; i++ )
{
}
-CV_BOOST_IMPL
+static
void cvReleaseBtClassifier( CvClassifier** ptr )
{
CV_FUNCNAME( "cvReleaseBtClassifier" );
__END__;
}
-void cvTuneBtClassifier( CvClassifier* classifier, CvMat*, int flags,
+static void cvTuneBtClassifier( CvClassifier* classifier, CvMat*, int flags,
CvMat*, CvMat* , CvMat*, CvMat*, CvMat* )
{
CV_FUNCNAME( "cvTuneBtClassifier" );
((CvBtClassifier*) classifier)->seq->total;
CV_CALL( ptr = cvAlloc( data_size ) );
CV_CALL( cvCvtSeqToArray( ((CvBtClassifier*) classifier)->seq, ptr ) );
- CV_CALL( cvReleaseMemStorage(
+ CV_CALL( cvReleaseMemStorage(
&(((CvBtClassifier*) classifier)->seq->storage) ) );
((CvBtClassifier*) classifier)->trees = (CvCARTClassifier**) ptr;
classifier->flags &= ~CV_TUNABLE;
__END__;
}
-CvBtClassifier* icvAllocBtClassifier( CvBoostType type, int flags, int numclasses,
+static CvBtClassifier* icvAllocBtClassifier( CvBoostType type, int flags, int numclasses,
int numiter )
{
CvBtClassifier* ptr;
CV_ASSERT( trainParams != NULL );
type = ((CvBtClassifierTrainParams*) trainParams)->type;
-
+
if( type >= CV_DABCLASS && type <= CV_GABCLASS && sampleIdx )
{
CV_ERROR( CV_StsBadArg, "Sample indices are not supported for this type" );
cvMinMaxLoc( trainClasses, &min_val, &max_val );
num_classes = (int) (max_val + 1.0);
-
+
CV_ASSERT( num_classes >= 2 );
}
else
num_classes = 1;
}
num_iter = ((CvBtClassifierTrainParams*) trainParams)->numiter;
-
+
CV_ASSERT( num_iter > 0 );
ptr = icvAllocBtClassifier( type, CV_TUNABLE | flags, num_classes, num_iter );
ptr->numfeatures = (CV_IS_ROW_SAMPLE( flags )) ? trainData->cols : trainData->rows;
-
+
i = 0;
printf( "Iteration %d\n", 1 );
CV_CALL( cvSeqPushMulti( ptr->seq, trees, ptr->numclasses ) );
CV_CALL( cvFree( &trees ) );
ptr->numiter++;
-
+
for( i = 1; i < num_iter; i++ )
{
ptr->tune( (CvClassifier*) ptr, NULL, CV_TUNABLE, NULL, NULL, NULL, NULL, NULL );
CvBtClassifier* ptr = 0;
CV_FUNCNAME( "cvCreateBtClassifierFromFile" );
-
+
__BEGIN__;
FILE* file;
{
CV_ERROR( CV_StsError, "Unable to open file" );
}
-
+
values_read = fscanf( file, "%d %d %d %d", &type, &num_classes, &num_features, &num_classifiers );
CV_Assert(values_read == 4);
}
ptr = icvAllocBtClassifier( (CvBoostType) type, 0, num_classes, num_classifiers );
ptr->numfeatures = num_features;
-
+
for( i = 0; i < num_classes * num_classifiers; i++ )
{
int count;
count++;
}
}
-
+
assert( count == ptr->cols );
}
cvFree( &sorted_weights );
{
CV_ERROR( CV_StsNullPtr, "trainClasses must be not NULL" );
}
-
+
*trainData = NULL;
*trainClasses = NULL;
file = fopen( filename, "r" );
{
CV_CALL( *trainData = cvCreateMat( n, m, CV_32FC1 ) );
}
-
+
CV_CALL( *trainClasses = cvCreateMat( 1, m, CV_32FC1 ) );
for( i = 0; i < m; i++ )
fclose( file );
__END__;
-
+
}
CV_BOOST_IMPL
{
CV_ERROR( CV_StsUnmatchedSizes, "Incorrect trainData and trainClasses sizes" );
}
-
+
if( sampleIdx != NULL )
{
count = (sampleIdx->rows == 1) ? sampleIdx->cols : sampleIdx->rows;
{
count = m;
}
-
+
file = fopen( filename, "w" );
if( !file )
for( j = 0; j < n; j++ )
{
fprintf( file, "%g ", ( (CV_IS_ROW_SAMPLE( flags ))
- ? CV_MAT_ELEM( *trainData, float, idx, j )
+ ? CV_MAT_ELEM( *trainData, float, idx, j )
: CV_MAT_ELEM( *trainData, float, j, idx ) ) );
}
fprintf( file, "%g\n", ( (clsrow)
}
fclose( file );
-
+
__END__;
}
#define ICV_RAND_SHUFFLE( suffix, type ) \
-void icvRandShuffle_##suffix( uchar* data, size_t step, int num ) \
+static void icvRandShuffle_##suffix( uchar* data, size_t step, int num ) \
{ \
time_t seed; \
type tmp; \
-CvIntHaarClassifier* icvLoadCARTStageHaarClassifierF( FILE* file, int step )
+static CvIntHaarClassifier* icvLoadCARTStageHaarClassifierF( FILE* file, int step )
{
CvStageHaarClassifier* ptr = NULL;
sum_type* tilted, float normfactor )
{
CvTreeCascadeNode* ptr;
- CvTreeCascadeClassifier* tree;
+ //CvTreeCascadeClassifier* tree;
- tree = (CvTreeCascadeClassifier*) classifier;
+ //tree = (CvTreeCascadeClassifier*) classifier;
/*
- * get sum image offsets for <rect> corner points
+ * get sum image offsets for <rect> corner points
* step - row step (measured in image pixels!) of sum image
*/
#define CV_SUM_OFFSETS( p0, p1, p2, p3, rect, step ) \
(p3) = (rect).x + (rect).width + (step) * ((rect).y + (rect).height);
/*
- * get tilted image offsets for <rect> corner points
+ * get tilted image offsets for <rect> corner points
* step - row step (measured in image pixels!) of tilted image
*/
#define CV_TILTED_OFFSETS( p0, p1, p2, p3, rect, step ) \
{
CvIntHaarFeatures* features = NULL;
CvTHaarFeature haarFeature;
-
+
CvMemStorage* storage = NULL;
CvSeq* seq = NULL;
CvSeqWriter writer;
int dx = 0;
int dy = 0;
+#if 0
float factor = 1.0F;
factor = ((float) winsize.width) * winsize.height / (24 * 24);
-#if 0
+
s0 = (int) (s0 * factor);
s1 = (int) (s1 * factor);
s2 = (int) (s2 * factor);
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
-
+
// haar_y4
if ( (x+dx <= winsize.width ) && (y+dy*4 <= winsize.height) ) {
if (dx*4*dy < s0) continue;
}
}
- if (mode != 0 /*BASIC*/) {
+ if (mode != 0 /*BASIC*/) {
// point
if ( (x+dx*3 <= winsize.width) && (y+dy*3 <= winsize.height) ) {
if (dx*9*dy < s0) continue;
}
}
}
-
- if (mode == 2 /*ALL*/) {
+
+ if (mode == 2 /*ALL*/) {
// tilted haar_x2 (x, y, w, h, b, weight)
if ( (x+2*dx <= winsize.width) && (y+2*dx+dy <= winsize.height) && (x-dy>= 0) ) {
if (dx*2*dy < s1) continue;
-
+
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_x2",
x, y, dx*2, dy, -1,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
-
+
// tilted haar_y2 (x, y, w, h, b, weight)
if ( (x+dx <= winsize.width) && (y+dx+2*dy <= winsize.height) && (x-2*dy>= 0) ) {
if (dx*2*dy < s1) continue;
-
+
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_y2",
x, y, dx, 2*dy, -1,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
-
+
// tilted haar_x3 (x, y, w, h, b, weight)
if ( (x+3*dx <= winsize.width) && (y+3*dx+dy <= winsize.height) && (x-dy>= 0) ) {
if (dx*3*dy < s2) continue;
-
+
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_x3",
x, y, dx*3, dy, -1,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
-
+
// tilted haar_y3 (x, y, w, h, b, weight)
if ( (x+dx <= winsize.width) && (y+dx+3*dy <= winsize.height) && (x-3*dy>= 0) ) {
if (dx*3*dy < s2) continue;
-
+
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_y3",
x, y, dx, 3*dy, -1,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
-
-
+
+
// tilted haar_x4 (x, y, w, h, b, weight)
if ( (x+4*dx <= winsize.width) && (y+4*dx+dy <= winsize.height) && (x-dy>= 0) ) {
if (dx*4*dy < s3) continue;
-
+
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_x4",
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
-
+
// tilted haar_y4 (x, y, w, h, b, weight)
if ( (x+dx <= winsize.width) && (y+dx+4*dy <= winsize.height) && (x-4*dy>= 0) ) {
if (dx*4*dy < s3) continue;
-
+
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_y4",
x, y, dx, 4*dy, -1,
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
-
+
/*
-
+
// tilted point
if ( (x+dx*3 <= winsize.width - 1) && (y+dy*3 <= winsize.height - 1) && (x-3*dy>= 0)) {
if (dx*9*dy < 36) continue;
features->winsize = winsize;
cvCvtSeqToArray( seq, (CvArr*) features->feature );
cvReleaseMemStorage( &storage );
-
+
icvConvertToFastHaarFeature( features->feature, features->fastfeature,
features->count, (winsize.width + 1) );
-
+
return features;
}
fastHaarFeature[i].rect[j].p3,
haarFeature[i].rect[j].r, step )
}
-
+
}
else
{
CvHaarTrainigData* icvCreateHaarTrainingData( CvSize winsize, int maxnumsamples )
{
CvHaarTrainigData* data;
-
+
CV_FUNCNAME( "icvCreateHaarTrainingData" );
-
+
__BEGIN__;
data = NULL;
uchar* ptr = NULL;
size_t datasize = 0;
-
+
datasize = sizeof( CvHaarTrainigData ) +
/* sum and tilted */
( 2 * (winsize.width + 1) * (winsize.height + 1) * sizeof( sum_type ) +
int j = 0;
float val = 0.0F;
float normfactor = 0.0F;
-
+
CvHaarTrainingData* training_data;
CvIntHaarFeatures* haar_features;
#if 0 /*def CV_VERBOSE*/
if( first % 5000 == 0 )
{
- fprintf( stderr, "%3d%%\r", (int) (100.0 * first /
+ fprintf( stderr, "%3d%%\r", (int) (100.0 * first /
haar_features->count) );
fflush( stderr );
}
t_data = *data->valcache;
t_idx = *data->idxcache;
t_portion = MIN( portion, (numprecalculated - first) );
-
+
/* indices */
t_idx.rows = t_portion;
t_idx.data.ptr = data->idxcache->data.ptr + first * ((size_t)t_idx.step);
{
if( cvEvalFastHaarFeature( fastfeature,
(sum_type*) (data->sum.data.ptr + i * data->sum.step),
- (sum_type*) (data->tilted.data.ptr + i * data->tilted.step) )
+ (sum_type*) (data->tilted.data.ptr + i * data->tilted.step) )
< threshold * data->normfactor.data.fl[i] )
{
(*left)->data.fl[(*left)->cols++] = (float) i;
index = (int) *((float*) (idxdata + i * idxstep));
if( cvEvalFastHaarFeature( fastfeature,
(sum_type*) (data->sum.data.ptr + index * data->sum.step),
- (sum_type*) (data->tilted.data.ptr + index * data->tilted.step) )
+ (sum_type*) (data->tilted.data.ptr + index * data->tilted.step) )
< threshold * data->normfactor.data.fl[index] )
{
(*left)->data.fl[(*left)->cols++] = (float) index;
float sum_stage = 0.0F;
float threshold = 0.0F;
float falsealarm = 0.0F;
-
+
//CvMat* sampleIdx = NULL;
CvMat* trimmedIdx;
//float* idxdata = NULL;
int idx;
int numsamples;
int numtrimmed;
-
+
CvCARTHaarClassifier* classifier;
CvSeq* seq = NULL;
CvMemStorage* storage = NULL;
printf( "| N |%%SMP|F| ST.THR | HR | FA | EXP. ERR|\n" );
printf( "+----+----+-+---------+---------+---------+---------+\n" );
#endif /* CV_VERBOSE */
-
+
n = haarFeatures->count;
m = data->sum.rows;
numsamples = (sampleIdx) ? MAX( sampleIdx->rows, sampleIdx->cols ) : m;
trainParams.userdata = &userdata;
eval = cvMat( 1, m, CV_32FC1, cvAlloc( sizeof( float ) * m ) );
-
+
storage = cvCreateMemStorage();
seq = cvCreateSeq( 0, sizeof( *seq ), sizeof( classifier ), storage );
num_splits = 0;
sumalpha = 0.0F;
do
- {
+ {
#ifdef CV_VERBOSE
int v_wt = 0;
num_splits += classifier->count;
cart->release( (CvClassifier**) &cart );
-
+
if( symmetric && (seq->total % 2) )
{
float normfactor = 0.0F;
CvStumpClassifier* stump;
-
+
/* flip haar features */
for( i = 0; i < classifier->count; i++ )
{
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
- classifier->feature[i].rect[j].r.x = data->winsize.width -
+ classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x -
- classifier->feature[i].rect[j].r.width;
+ classifier->feature[i].rect[j].r.width;
}
}
else
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
- classifier->feature[i].rect[j].r.x = data->winsize.width -
+ classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x;
CV_SWAP( classifier->feature[i].rect[j].r.width,
classifier->feature[i].rect[j].r.height, tmp );
weakTrainVals, 0, 0, 0, trimmedIdx,
&(data->weights),
trainParams.stumpTrainParams );
-
+
classifier->threshold[i] = stump->threshold;
if( classifier->left[i] <= 0 )
{
classifier->val[-classifier->right[i]] = stump->right;
}
- stump->release( (CvClassifier**) &stump );
-
+ stump->release( (CvClassifier**) &stump );
+
}
stumpTrainParams.getTrainData = icvGetTrainingDataCallback;
cvReleaseMat( &trimmedIdx );
trimmedIdx = NULL;
}
-
+
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
alpha = cvBoostNextWeakClassifier( &eval, &data->cls, weakTrainVals,
&data->weights, trainer );
sumalpha += alpha;
-
+
for( i = 0; i <= classifier->count; i++ )
{
- if( boosttype == CV_RABCLASS )
+ if( boosttype == CV_RABCLASS )
{
classifier->val[i] = cvLogRatio( classifier->val[i] );
}
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
- eval.data.fl[numpos] += classifier->eval(
+ eval.data.fl[numpos] += classifier->eval(
(CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
fflush( stdout );
}
#endif /* CV_VERBOSE */
-
+
} while( falsealarm > maxfalsealarm && (!maxsplits || (num_splits < maxsplits) ) );
cvBoostEndTraining( &trainer );
threshold );
cvCvtSeqToArray( seq, (CvArr*) stage->classifier );
}
-
+
/* CLEANUP */
cvReleaseMemStorage( &storage );
cvReleaseMat( &weakTrainVals );
cvFree( &(eval.data.ptr) );
-
+
return (CvIntHaarClassifier*) stage;
}
{
CvBackgroundData* data = NULL;
- const char* dir = NULL;
+ const char* dir = NULL;
char full[PATH_MAX];
char* imgfilename = NULL;
size_t datasize = 0;
int len = 0;
assert( filename != NULL );
-
+
dir = strrchr( filename, '\\' );
if( dir == NULL )
{
{
count = 0;
datasize = 0;
-
+
/* count */
while( !feof( input ) )
{
while( !feof( input ) )
{
*imgfilename = '\0';
- if( !fgets( imgfilename, PATH_MAX - (int)(imgfilename - full) - 1, input ))
+ if( !fgets( imgfilename, PATH_MAX - (int)(imgfilename - full) - 1, input ))
break;
len = (int)strlen( imgfilename );
- if( len > 0 && imgfilename[len-1] == '\n' )
- imgfilename[len-1] = 0, len--;
+ if( len > 0 && imgfilename[len-1] == '\n' )
+ imgfilename[len-1] = 0, len--;
if( len > 0 )
{
if( (*imgfilename) == '#' ) continue; /* comment */
{
round = data->round;
-//#ifdef CV_VERBOSE
+//#ifdef CV_VERBOSE
// printf( "Open background image: %s\n", data->filename[data->last] );
//#endif /* CV_VERBOSE */
-
+
data->last = rand() % data->count;
data->last %= data->count;
img = cvLoadImage( data->filename[data->last], 0 );
- if( !img )
+ if( !img )
continue;
data->round += data->last / data->count;
data->round = data->round % (data->winsize.width * data->winsize.height);
offset.x = MIN( offset.x, img->width - data->winsize.width );
offset.y = MIN( offset.y, img->height - data->winsize.height );
-
+
if( img != NULL && img->depth == IPL_DEPTH_8U && img->nChannels == 1 &&
offset.x >= 0 && offset.y >= 0 )
{
reader->scale = MAX(
((float) data->winsize.width + reader->point.x) / ((float) reader->src.cols),
((float) data->winsize.height + reader->point.y) / ((float) reader->src.rows) );
-
+
reader->img = cvMat( (int) (reader->scale * reader->src.rows + 0.5F),
(int) (reader->scale * reader->src.cols + 0.5F),
CV_8UC1, (void*) cvAlloc( datasize ) );
sum_type valsum = 0;
sqsum_type valsqsum = 0;
double area = 0.0;
-
+
cvIntegral( img, sum, sqsum, tilted );
normrect = cvRect( 1, 1, img->cols - 2, img->rows - 2 );
CV_SUM_OFFSETS( p0, p1, p2, p3, normrect, img->cols + 1 )
-
+
area = normrect.width * normrect.height;
valsum = ((sum_type*) (sum->data.ptr))[p0] - ((sum_type*) (sum->data.ptr))[p1]
- ((sum_type*) (sum->data.ptr))[p2] + ((sum_type*) (sum->data.ptr))[p3];
int i = 0;
ccounter_t getcount = 0;
ccounter_t thread_getcount = 0;
- ccounter_t consumed_count;
+ ccounter_t consumed_count;
ccounter_t thread_consumed_count;
-
+
/* private variables */
CvMat img;
CvMat sum;
CvMat tilted;
CvMat sqsum;
-
+
sum_type* sumdata;
sum_type* tilteddata;
float* normfactor;
-
+
/* end private variables */
-
+
assert( data != NULL );
assert( first + count <= data->maxnum );
assert( cascade != NULL );
assert( callback != NULL );
-
+
// if( !cvbgdata ) return 0; this check needs to be done in the callback for BG
-
+
CCOUNTER_SET_ZERO(getcount);
CCOUNTER_SET_ZERO(thread_getcount);
CCOUNTER_SET_ZERO(consumed_count);
normfactor = data->normfactor.data.fl + i;
sum.data.ptr = (uchar*) sumdata;
tilted.data.ptr = (uchar*) tilteddata;
- icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
+ icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
if( cascade->eval( cascade, sumdata, tilteddata, *normfactor ) != 0.0F )
{
CCOUNTER_INC(thread_getcount);
break;
}
}
-
+
#ifdef CV_VERBOSE
if( (i - first) % 500 == 0 )
{
CCOUNTER_ADD(consumed_count, thread_consumed_count);
}
} /* omp parallel */
-
+
if( consumed != NULL )
{
*consumed = (int)consumed_count;
/* *acceptance_ratio = ((double) count) / consumed_count; */
*acceptance_ratio = CCOUNTER_DIV(count, consumed_count);
}
-
+
return static_cast<int>(getcount);
}
// CV_SQSUM_MAT_TYPE,
// cvAlloc( sizeof( sqsum_type ) * (data->winsize.height + 1)
// * (data->winsize.width + 1) ) );
-//
+//
// #ifdef CV_OPENMP
// #pragma omp for schedule(static, 1)
// #endif /* CV_OPENMP */
// for( ; ; )
// {
// icvGetBackgroundImage( cvbgdata, cvbgreader, &img );
-//
+//
// CCOUNTER_INC(thread_consumed_count);
//
// sumdata = (sum_type*) (data->sum.data.ptr + i * data->sum.step);
// normfactor = data->normfactor.data.fl + i;
// sum.data.ptr = (uchar*) sumdata;
// tilted.data.ptr = (uchar*) tilteddata;
-// icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
+// icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
// if( cascade->eval( cascade, sumdata, tilteddata, *normfactor ) != 0.0F )
// {
// break;
// fflush( stderr );
// }
//#endif /* CV_VERBOSE */
-//
+//
// }
//
// cvFree( &(img.data.ptr) );
// /* *acceptance_ratio = ((double) count) / consumed_count; */
// *acceptance_ratio = CCOUNTER_DIV(count, consumed_count);
// }
-//
+//
// return count;
//}
int c = 0;
assert( img->rows * img->cols == ((CvVecFile*) userdata)->vecsize );
-
+
size_t elements_read = fread( &tmp, sizeof( tmp ), 1, ((CvVecFile*) userdata)->input );
CV_Assert(elements_read == 1);
elements_read = fread( ((CvVecFile*) userdata)->vector, sizeof( short ),
((CvVecFile*) userdata)->vecsize, ((CvVecFile*) userdata)->input );
CV_Assert(elements_read == (size_t)((CvVecFile*) userdata)->vecsize);
-
- if( feof( ((CvVecFile*) userdata)->input ) ||
+
+ if( feof( ((CvVecFile*) userdata)->input ) ||
(((CvVecFile*) userdata)->last)++ >= ((CvVecFile*) userdata)->count )
{
return 0;
}
-
+
for( r = 0; r < img->rows; r++ )
{
for( c = 0; c < img->cols; c++ )
{
- CV_MAT_ELEM( *img, uchar, r, c ) =
+ CV_MAT_ELEM( *img, uchar, r, c ) =
(uchar) ( ((CvVecFile*) userdata)->vector[r * img->cols + c] );
}
}
return 1;
}
-int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
+static int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
{
if (! cvbgdata)
return 0;
-
+
if (! cvbgreader)
return 0;
-
+
// just in case icvGetBackgroundImage is not thread-safe ...
#ifdef CV_OPENMP
#pragma omp critical (get_background_image_callback)
{
icvGetBackgroundImage( cvbgdata, cvbgreader, img );
}
-
+
return 1;
}
* Get training data from .vec file
*/
static
-int icvGetHaarTrainingDataFromVec( CvHaarTrainingData* data, int first, int count,
+int icvGetHaarTrainingDataFromVec( CvHaarTrainingData* data, int first, int count,
CvIntHaarClassifier* cascade,
const char* filename,
int* consumed )
__BEGIN__;
CvVecFile file;
- short tmp = 0;
-
+ short tmp = 0;
+
file.input = NULL;
if( filename ) file.input = fopen( filename, "rb" );
if (filename)
{
CvVecFile file;
- short tmp = 0;
-
+ short tmp = 0;
+
file.input = NULL;
if( filename ) file.input = fopen( filename, "rb" );
void cvCreateCascadeClassifier( const char* dirname,
const char* vecfilename,
- const char* bgfilename,
+ const char* bgfilename,
int npos, int nneg, int nstages,
int numprecalculated,
int numsplits,
cascade = (CvCascadeHaarClassifier*) icvCreateCascadeHaarClassifier( nstages );
cascade->count = 0;
-
+
if( icvInitBackgroundReaders( bgfilename, winsize ) )
{
data = icvCreateHaarTrainingData( winsize, npos + nneg );
for( i = 0; i < nstages; i++, cascade->count++ )
{
sprintf( stagename, "%s%d/%s", dirname, i, CV_STAGE_CART_FILE_NAME );
- cascade->classifier[i] =
+ cascade->classifier[i] =
icvLoadCARTStageHaarClassifier( stagename, winsize.width + 1 );
if( !icvMkDir( stagename ) )
data->sum.rows = data->tilted.rows = poscount + negcount;
data->normfactor.cols = data->weights.cols = data->cls.cols =
poscount + negcount;
-
+
posweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F / poscount);
negweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F / negcount);
for( j = 0; j < poscount; j++ )
file = fopen( stagename, "w" );
if( file != NULL )
{
- cascade->classifier[i]->save(
+ cascade->classifier[i]->save(
(CvIntHaarClassifier*) cascade->classifier[i], file );
fclose( file );
}
{
char xml_path[1024];
int len = (int)strlen(dirname);
- CvHaarClassifierCascade* cascade = 0;
+ CvHaarClassifierCascade* cascade1 = 0;
strcpy( xml_path, dirname );
if( xml_path[len-1] == '\\' || xml_path[len-1] == '/' )
len--;
strcpy( xml_path + len, ".xml" );
- cascade = cvLoadHaarClassifierCascade( dirname, cvSize(winwidth,winheight) );
- if( cascade )
- cvSave( xml_path, cascade );
- cvReleaseHaarClassifierCascade( &cascade );
+ cascade1 = cvLoadHaarClassifierCascade( dirname, cvSize(winwidth,winheight) );
+ if( cascade1 )
+ cvSave( xml_path, cascade1 );
+ cvReleaseHaarClassifierCascade( &cascade1 );
}
}
else
printf( "FAILED TO INITIALIZE BACKGROUND READERS\n" );
#endif /* CV_VERBOSE */
}
-
+
/* CLEAN UP */
icvDestroyBackgroundReaders();
cascade->release( (CvIntHaarClassifier**) &cascade );
/* tree cascade classifier */
-int icvNumSplits( CvStageHaarClassifier* stage )
+static int icvNumSplits( CvStageHaarClassifier* stage )
{
int i;
int num;
return num;
}
-void icvSetNumSamples( CvHaarTrainingData* training_data, int num )
+static void icvSetNumSamples( CvHaarTrainingData* training_data, int num )
{
assert( num <= training_data->maxnum );
training_data->cls.cols = training_data->weights.cols = num;
}
-void icvSetWeightsAndClasses( CvHaarTrainingData* training_data,
+static void icvSetWeightsAndClasses( CvHaarTrainingData* training_data,
int num1, float weight1, float cls1,
int num2, float weight2, float cls2 )
{
}
}
-CvMat* icvGetUsedValues( CvHaarTrainingData* training_data,
+static CvMat* icvGetUsedValues( CvHaarTrainingData* training_data,
int start, int num,
CvIntHaarFeatures* haar_features,
CvStageHaarClassifier* stage )
}
total = last + 1;
CV_CALL( ptr = cvCreateMat( num, total, CV_32FC1 ) );
-
+
#ifdef CV_OPENMP
#pragma omp parallel for
void cvCreateTreeCascadeClassifier( const char* dirname,
const char* vecfilename,
- const char* bgfilename,
+ const char* bgfilename,
int npos, int nneg, int nstages,
int numprecalculated,
int numsplits,
sprintf( stage_name, "%s/", dirname );
suffix = stage_name + strlen( stage_name );
-
+
if (! bg_vecfile)
if( !icvInitBackgroundReaders( bgfilename, winsize ) && nstages > 0 )
CV_ERROR( CV_StsError, "Unable to read negative images" );
-
+
if( nstages > 0 )
{
/* width-first search in the tree */
CvSplit* first_split;
CvSplit* last_split;
CvSplit* cur_split;
-
+
CvTreeCascadeNode* parent;
CvTreeCascadeNode* cur_node;
CvTreeCascadeNode* last_node;
parent = leaves;
leaves = NULL;
do
- {
+ {
int best_clusters; /* best selected number of clusters */
float posweight, negweight;
double leaf_fa_rate;
{
CvTreeCascadeNode* single_cluster;
CvTreeCascadeNode* multiple_clusters;
- CvSplit* cur_split;
int single_num;
icvSetNumSamples( training_data, poscount + negcount );
multiple_clusters = NULL;
printf( "Number of used features: %d\n", single_num );
-
+
if( maxtreesplits >= 0 )
{
max_clusters = MIN( max_clusters, maxtreesplits - total_splits + 1 );
printf( "Clusters are too small. Clustering aborted.\n" );
break;
}
-
+
cur_num = 0;
cur_node = last_node = NULL;
for( cluster = 0; (cluster < k) && (cur_num < best_num); cluster++ )
} /* try different number of clusters */
cvReleaseMat( &vals );
- CV_CALL( cur_split = (CvSplit*) cvAlloc( sizeof( *cur_split ) ) );
- CV_ZERO_OBJ( cur_split );
-
- if( last_split ) last_split->next = cur_split;
- else first_split = cur_split;
- last_split = cur_split;
-
- cur_split->single_cluster = single_cluster;
- cur_split->multiple_clusters = multiple_clusters;
- cur_split->num_clusters = best_clusters;
- cur_split->parent = parent;
- cur_split->single_multiple_ratio = (float) single_num / best_num;
+ CvSplit* curSplit;
+ CV_CALL( curSplit = (CvSplit*) cvAlloc( sizeof( *curSplit ) ) );
+ CV_ZERO_OBJ( curSplit );
+
+ if( last_split ) last_split->next = curSplit;
+ else first_split = curSplit;
+ last_split = curSplit;
+
+ curSplit->single_cluster = single_cluster;
+ curSplit->multiple_clusters = multiple_clusters;
+ curSplit->num_clusters = best_clusters;
+ curSplit->parent = parent;
+ curSplit->single_multiple_ratio = (float) single_num / best_num;
}
if( parent ) parent = parent->next_same_level;
? last_split->multiple_clusters : last_split->single_cluster;
parent = last_split->parent;
if( parent ) parent->child = cur_node;
-
+
/* connect leaves via next_same_level and save them */
for( ; cur_node; cur_node = cur_node->next )
{
printf( "\nParent node: %s\n", buf );
printf( "Chosen number of splits: %d\n\n", (last_split->multiple_clusters)
? (last_split->num_clusters - 1) : 0 );
-
+
cur_split = last_split;
last_split = last_split->next;
cvFree( &cur_split );
} /* for each split point */
printf( "Total number of splits: %d\n", total_splits );
-
+
if( !(tcc->root) ) tcc->root = leaves;
CV_CALL( icvPrintTreeCascade( tcc->root ) );
inverse = (rand() > (RAND_MAX/2));
}
icvPlaceDistortedSample( &sample, inverse, maxintensitydev,
- maxxangle, maxyangle, maxzangle,
+ maxxangle, maxyangle, maxzangle,
0 /* nonzero means placing image without cut offs */,
0.0 /* nozero adds random shifting */,
0.0 /* nozero adds random scaling */,
cvFree( &(sample.data.ptr) );
fclose( output );
} /* if( output != NULL ) */
-
+
icvEndSampleDistortion( &data );
}
-
+
#ifdef CV_VERBOSE
printf( "\r \r" );
-#endif /* CV_VERBOSE */
+#endif /* CV_VERBOSE */
}
{
cvNamedWindow( "Image", CV_WINDOW_AUTOSIZE );
}
-
+
info = fopen( infoname, "w" );
strcpy( fullname, infoname );
filename = strrchr( fullname, '\\' );
for( i = 0; i < count; i++ )
{
icvGetNextFromBackgroundData( cvbgdata, cvbgreader );
-
+
maxscale = MIN( 0.7F * cvbgreader->src.cols / winwidth,
0.7F * cvbgreader->src.rows / winheight );
if( maxscale < 1.0F ) continue;
inverse = (rand() > (RAND_MAX/2));
}
icvPlaceDistortedSample( &win, inverse, maxintensitydev,
- maxxangle, maxyangle, maxzangle,
+ maxxangle, maxyangle, maxzangle,
1, 0.0, 0.0, &data );
-
-
+
+
sprintf( filename, "%04d_%04d_%04d_%04d_%04d.jpg",
(i + 1), x, y, width, height );
-
- if( info )
+
+ if( info )
{
fprintf( info, "%s %d %d %d %d %d\n",
filename, 1, x, y, width, height );
* cij - coeffs[i][j], coeffs[2][2] = 1
* (ui, vi) - rectangle vertices
*/
-void cvGetPerspectiveTransform( CvSize src_size, double quad[4][2],
+static void cvGetPerspectiveTransform( CvSize src_size, double quad[4][2],
double coeffs[3][3] )
{
//CV_FUNCNAME( "cvWarpPerspective" );
}
/* Warps source into destination by a perspective transform */
-void cvWarpPerspective( CvArr* src, CvArr* dst, double quad[4][2] )
+static void cvWarpPerspective( CvArr* src, CvArr* dst, double quad[4][2] )
{
CV_FUNCNAME( "cvWarpPerspective" );
int i00, i10, i01, i11;
i00 = i10 = i01 = i11 = (int) fill_value;
- double i = fill_value;
-
/* linear interpolation using 2x2 neighborhood */
if( isrc_x >= 0 && isrc_x <= src_size.width &&
isrc_y >= 0 && isrc_y <= src_size.height )
double i0 = i00 + (i10 - i00)*delta_x;
double i1 = i01 + (i11 - i01)*delta_x;
- i = i0 + (i1 - i0)*delta_y;
- ((uchar*)(dst_data + y * dst_step))[x] = (uchar) i;
+ ((uchar*)(dst_data + y * dst_step))[x] = (uchar) (i0 + (i1 - i0)*delta_y);
}
x_min += k_left;
x_max += k_right;
*
* Measure performance of classifier
*/
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
#include "cv.h"
#include "highgui.h"
totaltime = 0.0;
if( info != NULL )
{
- int x, y, width, height;
+ int x, y;
IplImage* img;
int hits, missed, falseAlarms;
int totalHits, totalMissed, totalFalseAlarms;
ref = (ObjectPos*) cvAlloc( refcount * sizeof( *ref ) );
for( i = 0; i < refcount; i++ )
{
- error = (fscanf( info, "%d %d %d %d", &x, &y, &width, &height ) != 4);
+ int w, h;
+ error = (fscanf( info, "%d %d %d %d", &x, &y, &w, &h ) != 4);
if( error ) break;
- ref[i].x = 0.5F * width + x;
- ref[i].y = 0.5F * height + y;
- ref[i].width = sqrtf( 0.5F * (width * width + height * height) );
+ ref[i].x = 0.5F * w + x;
+ ref[i].y = 0.5F * h + y;
+ ref[i].width = sqrtf( 0.5F * (w * w + h * h) );
ref[i].found = 0;
ref[i].neghbors = 0;
}
+#include "opencv2/core/core.hpp"\r
+#include "opencv2/core/internal.hpp"\r
+\r
#include "HOGfeatures.h"\r
#include "cascadeclassifier.h"\r
\r
features[featIdx].write( fs, componentIdx );\r
fs << "}";\r
}\r
- fs << "]"; \r
+ fs << "]";\r
}\r
\r
void CvHOGEvaluator::generateFeatures()\r
}\r
}\r
w = 4*t;\r
- h = 2*t; \r
+ h = 2*t;\r
for (x = 0; x <= winSize.width - w; x += blockStep.width)\r
{\r
for (y = 0; y <= winSize.height - h; y += blockStep.height)\r
- { \r
+ {\r
features.push_back(Feature(offset, x, y, 2*t, t));\r
}\r
}\r
// int cellIdx = featComponent / N_BINS;\r
// int binIdx = featComponent % N_BINS;\r
//\r
-// fs << CC_RECTS << "[:" << rect[cellIdx].x << rect[cellIdx].y << \r
+// fs << CC_RECTS << "[:" << rect[cellIdx].x << rect[cellIdx].y <<\r
// rect[cellIdx].width << rect[cellIdx].height << binIdx << "]";\r
//}\r
\r
//All block is nessesary for block normalization\r
void CvHOGEvaluator::Feature::write(FileStorage &fs, int featComponentIdx) const\r
{\r
- fs << CC_RECT << "[:" << rect[0].x << rect[0].y << \r
+ fs << CC_RECT << "[:" << rect[0].x << rect[0].y <<\r
rect[0].width << rect[0].height << featComponentIdx << "]";\r
}\r
\r
memset( histBuf, 0, histSize.width * sizeof(histBuf[0]) );\r
histBuf += histStep + 1;\r
for( y = 0; y < qangle.rows; y++ )\r
- { \r
+ {\r
histBuf[-1] = 0.f;\r
float strSum = 0.f;\r
for( x = 0; x < qangle.cols; x++ )\r
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
#include "boost.h"
#include "cascadeclassifier.h"
#include <queue>
//----------------------------- CascadeBoostParams -------------------------------------------------
CvCascadeBoostParams::CvCascadeBoostParams() : minHitRate( 0.995F), maxFalseAlarm( 0.5F )
-{
+{
boost_type = CvBoost::GENTLE;
use_surrogates = use_1se_rule = truncate_pruned_tree = false;
}
void CvCascadeBoostParams::write( FileStorage &fs ) const
{
- String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
+ String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
boost_type == CvBoost::REAL ? CC_REAL_BOOST :
boost_type == CvBoost::LOGIT ? CC_LOGIT_BOOST :
boost_type == CvBoost::GENTLE ? CC_GENTLE_BOOST : String();
void CvCascadeBoostParams::printDefaults() const
{
cout << "--boostParams--" << endl;
- cout << " [-bt <{" << CC_DISCRETE_BOOST << ", "
+ cout << " [-bt <{" << CC_DISCRETE_BOOST << ", "
<< CC_REAL_BOOST << ", "
<< CC_LOGIT_BOOST ", "
<< CC_GENTLE_BOOST << "(default)}>]" << endl;
void CvCascadeBoostParams::printAttrs() const
{
- String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
+ String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
boost_type == CvBoost::REAL ? CC_REAL_BOOST :
boost_type == CvBoost::LOGIT ? CC_LOGIT_BOOST :
boost_type == CvBoost::GENTLE ? CC_GENTLE_BOOST : String();
else
res = false;
- return res;
+ return res;
}
CvDTreeNode* CvCascadeBoostTrainData::subsample_data( const CvMat* _subsample_idx )
set_params( _params );
max_c_count = MAX( 2, featureEvaluator->getMaxCatCount() );
var_type = cvCreateMat( 1, var_count + 2, CV_32SC1 );
- if ( featureEvaluator->getMaxCatCount() > 0 )
+ if ( featureEvaluator->getMaxCatCount() > 0 )
{
numPrecalcIdx = 0;
cat_var_count = var_count;
for( int vi = 0; vi < var_count; vi++ )
{
var_type->data.i[vi] = vi;
- }
+ }
}
else
{
for( int vi = 1; vi <= var_count; vi++ )
{
var_type->data.i[vi-1] = -vi;
- }
- }
+ }
+ }
var_type->data.i[var_count] = cat_var_count;
var_type->data.i[var_count+1] = cat_var_count+1;
treeBlockSize = MAX(treeBlockSize + BlockSizeDelta, MinBlockSize);
tree_storage = cvCreateMemStorage( treeBlockSize );
node_heap = cvCreateSet( 0, sizeof(node_heap[0]), sizeof(CvDTreeNode), tree_storage );
- split_heap = cvCreateSet( 0, sizeof(split_heap[0]), maxSplitSize, tree_storage );
+ split_heap = cvCreateSet( 0, sizeof(split_heap[0]), maxSplitSize, tree_storage );
}
CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _featureEvaluator,
{
setData( _featureEvaluator, _numSamples, _precalcValBufSize, _precalcIdxBufSize, _params );
}
-
+
void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluator,
int _numSamples,
int _precalcValBufSize, int _precalcIdxBufSize,
- const CvDTreeParams& _params )
-{
+ const CvDTreeParams& _params )
+{
int* idst = 0;
unsigned short* udst = 0;
-
+
clear();
shared = true;
have_labels = true;
_resp = featureEvaluator->getCls();
responses = &_resp;
// TODO: check responses: elements must be 0 or 1
-
- if( _precalcValBufSize < 0 || _precalcIdxBufSize < 0)
+
+ if( _precalcValBufSize < 0 || _precalcIdxBufSize < 0)
CV_Error( CV_StsOutOfRange, "_numPrecalcVal and _numPrecalcIdx must be positive or 0" );
- var_count = var_all = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize();
+ var_count = var_all = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize();
sample_count = _numSamples;
-
- is_buf_16u = false;
- if (sample_count < 65536)
- is_buf_16u = true;
+
+ is_buf_16u = false;
+ if (sample_count < 65536)
+ is_buf_16u = true;
numPrecalcVal = min( cvRound((double)_precalcValBufSize*1048576. / (sizeof(float)*sample_count)), var_count );
numPrecalcIdx = min( cvRound((double)_precalcIdxBufSize*1048576. /
valCache.create( numPrecalcVal, sample_count, CV_32FC1 );
var_type = cvCreateMat( 1, var_count + 2, CV_32SC1 );
-
- if ( featureEvaluator->getMaxCatCount() > 0 )
+
+ if ( featureEvaluator->getMaxCatCount() > 0 )
{
numPrecalcIdx = 0;
cat_var_count = var_count;
for( int vi = 0; vi < var_count; vi++ )
{
var_type->data.i[vi] = vi;
- }
+ }
}
else
{
for( int vi = 1; vi <= var_count; vi++ )
{
var_type->data.i[vi-1] = -vi;
- }
+ }
}
var_type->data.i[var_count] = cat_var_count;
var_type->data.i[var_count+1] = cat_var_count+1;
work_var_count = ( cat_var_count ? 0 : numPrecalcIdx ) + 1/*cv_lables*/;
buf_size = (work_var_count + 1) * sample_count/*sample_indices*/;
buf_count = 2;
-
+
if ( is_buf_16u )
buf = cvCreateMat( buf_count, buf_size, CV_16UC1 );
else
cat_count = cvCreateMat( 1, cat_var_count + 1, CV_32SC1 );
// precalculate valCache and set indices in buf
- precalculate();
+ precalculate();
// now calculate the maximum size of split,
// create memory storage that will keep nodes and splits of the decision tree
tempBlockSize = MAX( tempBlockSize + BlockSizeDelta, MinBlockSize );
temp_storage = cvCreateMemStorage( tempBlockSize );
nv_heap = cvCreateSet( 0, sizeof(*nv_heap), nvSize, temp_storage );
-
+
data_root = new_node( 0, sample_count, 0, 0 );
// set sample labels
const int* CvCascadeBoostTrainData::get_class_labels( CvDTreeNode* n, int* labelsBuf)
{
- int nodeSampleCount = n->sample_count;
+ int nodeSampleCount = n->sample_count;
int rStep = CV_IS_MAT_CONT( responses->type ) ? 1 : responses->step / CV_ELEM_SIZE( responses->type );
int* sampleIndicesBuf = labelsBuf; //
{
int sidx = sampleIndices[si];
labelsBuf[si] = (int)responses->data.fl[sidx*rStep];
- }
+ }
return labelsBuf;
}
void CvCascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* ordValuesBuf, int* sortedIndicesBuf,
const float** ordValues, const int** sortedIndices, int* sampleIndicesBuf )
{
- int nodeSampleCount = n->sample_count;
+ int nodeSampleCount = n->sample_count;
const int* sampleIndices = get_sample_indices(n, sampleIndicesBuf);
-
+
if ( vi < numPrecalcIdx )
{
if( !is_buf_16u )
*sortedIndices = sortedIndicesBuf;
}
-
+
if( vi < numPrecalcVal )
{
for( int i = 0; i < nodeSampleCount; i++ )
ordValuesBuf[i] = (&sampleValues[0])[sortedIndicesBuf[i]];
*sortedIndices = sortedIndicesBuf;
}
-
+
*ordValues = ordValuesBuf;
}
-
+
const int* CvCascadeBoostTrainData::get_cat_var_data( CvDTreeNode* n, int vi, int* catValuesBuf )
{
int nodeSampleCount = n->sample_count;
float CvCascadeBoostTrainData::getVarValue( int vi, int si )
{
if ( vi < numPrecalcVal && !valCache.empty() )
- return valCache.at<float>( vi, si );
- return (*featureEvaluator)( vi, si );
+ return valCache.at<float>( vi, si );
+ return (*featureEvaluator)( vi, si );
}
CvDTreeNode* node = root;
if( !node )
CV_Error( CV_StsError, "The tree has not been trained yet" );
-
+
if ( ((CvCascadeBoostTrainData*)data)->featureEvaluator->getMaxCatCount() == 0 ) // ordered
{
while( node->left )
int maxCatCount = ((CvCascadeBoostTrainData*)_data)->featureEvaluator->getMaxCatCount();
int subsetN = (maxCatCount + 31)/32;
int step = 3 + ( maxCatCount>0 ? subsetN : 1 );
-
+
queue<CvDTreeNode*> internalNodesQueue;
FileNodeIterator internalNodesIt, leafValsuesIt;
CvDTreeNode* prntNode, *cldNode;
{
prntNode->right = cldNode = data->new_node( 0, 0, 0, 0 );
*leafValsuesIt >> cldNode->value; leafValsuesIt--;
- cldNode->parent = prntNode;
+ cldNode->parent = prntNode;
}
else
{
- prntNode->right = internalNodesQueue.front();
+ prntNode->right = internalNodesQueue.front();
prntNode->right->parent = prntNode;
internalNodesQueue.pop();
}
{
prntNode->left = cldNode = data->new_node( 0, 0, 0, 0 );
*leafValsuesIt >> cldNode->value; leafValsuesIt--;
- cldNode->parent = prntNode;
+ cldNode->parent = prntNode;
}
else
{
}
}
CV_Assert( n1 == n );
- }
+ }
else
{
int *ldst, *rdst;
}
}
CV_Assert( n1 == n );
- }
+ }
}
// split cv_labels using newIdx relocation table
}
}
}
-
+
// split sample indices
int *sampleIdx_src_buf = tempBuf + n;
const int* sampleIdx_src = data->get_sample_indices(node, sampleIdx_src_buf);
if (data->is_buf_16u)
{
- unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
+ unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
workVarCount*scount + left->offset);
- unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
+ unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
workVarCount*scount + right->offset);
for (int i = 0; i < n; i++)
{
}
else
{
- int* ldst = buf->data.i + left->buf_idx*buf->cols +
+ int* ldst = buf->data.i + left->buf_idx*buf->cols +
workVarCount*scount + left->offset;
- int* rdst = buf->data.i + right->buf_idx*buf->cols +
+ int* rdst = buf->data.i + right->buf_idx*buf->cols +
workVarCount*scount + right->offset;
for (int i = 0; i < n; i++)
{
}
// deallocate the parent node data that is not needed anymore
- data->free_node_data(node);
+ data->free_node_data(node);
}
-void auxMarkFeaturesInMap( const CvDTreeNode* node, Mat& featureMap)
+static void auxMarkFeaturesInMap( const CvDTreeNode* node, Mat& featureMap)
{
if ( node && node->split )
{
set_params( _params );
if ( (_params.boost_type == LOGIT) || (_params.boost_type == GENTLE) )
data->do_responses_copy();
-
+
update_weights( 0 );
cout << "+----+---------+---------+" << endl;
minHitRate = ((CvCascadeBoostParams&)_params).minHitRate;
maxFalseAlarm = ((CvCascadeBoostParams&)_params).maxFalseAlarm;
return ( ( minHitRate > 0 ) && ( minHitRate < 1) &&
- ( maxFalseAlarm > 0 ) && ( maxFalseAlarm < 1) &&
+ ( maxFalseAlarm > 0 ) && ( maxFalseAlarm < 1) &&
CvBoost::set_params( _params ));
}
if (data->is_buf_16u)
{
- unsigned short* labels = (unsigned short*)(buf->data.s + data->data_root->buf_idx*buf->cols +
+ unsigned short* labels = (unsigned short*)(buf->data.s + data->data_root->buf_idx*buf->cols +
data->data_root->offset + (data->work_var_count-1)*data->sample_count);
for( int i = 0; i < n; i++ )
{
}
else
{
- int* labels = buf->data.i + data->data_root->buf_idx*buf->cols +
+ int* labels = buf->data.i + data->data_root->buf_idx*buf->cols +
data->data_root->offset + (data->work_var_count-1)*data->sample_count;
for( int i = 0; i < n; i++ )
{
// invert the subsample mask
cvXorS( subsample_mask, cvScalar(1.), subsample_mask );
-
+
// run tree through all the non-processed samples
for( int i = 0; i < n; i++ )
if( subsample_mask->data.ptr[i] )
int sCount = data->sample_count,
numPos = 0, numNeg = 0, numFalse = 0, numPosTrue = 0;
vector<float> eval(sCount);
-
+
for( int i = 0; i < sCount; i++ )
if( ((CvCascadeBoostTrainData*)data)->featureEvaluator->getCls( i ) == 1.0F )
eval[numPos++] = predict( i, true );
set_params( _params );
node[CC_STAGE_THRESHOLD] >> threshold;
- FileNode rnode = node[CC_WEAK_CLASSIFIERS];
+ FileNode rnode = node[CC_WEAK_CLASSIFIERS];
storage = cvCreateMemStorage();
weak = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvBoostTree*), storage );
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
#include "cascadeclassifier.h"
#include <queue>
static const char* stageTypes[] = { CC_BOOST };
static const char* featureTypes[] = { CC_HAAR, CC_LBP, CC_HOG };
-CvCascadeParams::CvCascadeParams() : stageType( defaultStageType ),
+CvCascadeParams::CvCascadeParams() : stageType( defaultStageType ),
featureType( defaultFeatureType ), winSize( cvSize(24, 24) )
-{
- name = CC_CASCADE_PARAMS;
+{
+ name = CC_CASCADE_PARAMS;
}
CvCascadeParams::CvCascadeParams( int _stageType, int _featureType ) : stageType( _stageType ),
featureType( _featureType ), winSize( cvSize(24, 24) )
-{
+{
name = CC_CASCADE_PARAMS;
}
CV_Assert( !stageTypeStr.empty() );
fs << CC_STAGE_TYPE << stageTypeStr;
String featureTypeStr = featureType == CvFeatureParams::HAAR ? CC_HAAR :
- featureType == CvFeatureParams::LBP ? CC_LBP :
+ featureType == CvFeatureParams::LBP ? CC_LBP :
featureType == CvFeatureParams::HOG ? CC_HOG :
0;
CV_Assert( !stageTypeStr.empty() );
return false;
rnode >> featureTypeStr;
featureType = !featureTypeStr.compare( CC_HAAR ) ? CvFeatureParams::HAAR :
- !featureTypeStr.compare( CC_LBP ) ? CvFeatureParams::LBP :
+ !featureTypeStr.compare( CC_LBP ) ? CvFeatureParams::LBP :
!featureTypeStr.compare( CC_HOG ) ? CvFeatureParams::HOG :
-1;
if (featureType == -1)
bool CvCascadeClassifier::train( const String _cascadeDirName,
const String _posFilename,
- const String _negFilename,
- int _numPos, int _numNeg,
+ const String _negFilename,
+ int _numPos, int _numNeg,
int _precalcValBufSize, int _precalcIdxBufSize,
int _numStages,
const CvCascadeParams& _cascadeParams,
const CvFeatureParams& _featureParams,
const CvCascadeBoostParams& _stageParams,
bool baseFormatSave )
-{
+{
if( _cascadeDirName.empty() || _posFilename.empty() || _negFilename.empty() )
CV_Error( CV_StsBadArg, "_cascadeDirName or _bgfileName or _vecFileName is NULL" );
cout << endl << "Stages 0-" << startNumStages-1 << " are loaded" << endl;
else if ( startNumStages == 1)
cout << endl << "Stage 0 is loaded" << endl;
-
+
double requiredLeafFARate = pow( (double) stageParams->maxFalseAlarm, (double) numStages ) /
(double)stageParams->max_depth;
double tempLeafFARate;
-
+
for( int i = startNumStages; i < numStages; i++ )
{
cout << endl << "===== TRAINING " << i << "-stage =====" << endl;
cout << "<BEGIN" << endl;
- if ( !updateTrainingSet( tempLeafFARate ) )
+ if ( !updateTrainingSet( tempLeafFARate ) )
{
cout << "Train dataset for temp stage can not be filled. "
"Branch training terminated." << endl;
stageClassifiers.push_back( tempStage );
cout << "END>" << endl;
-
+
// save params
String filename;
- if ( i == 0)
+ if ( i == 0)
{
filename = dirName + CC_PARAMS_FILENAME;
FileStorage fs( filename, FileStorage::WRITE);
{
bool isGetImg = isPositive ? imgReader.getPos( img ) :
imgReader.getNeg( img );
- if( !isGetImg )
+ if( !isGetImg )
return getcount;
consumed++;
void CvCascadeClassifier::writeFeatures( FileStorage &fs, const Mat& featureMap ) const
{
- ((CvFeatureEvaluator*)((Ptr<CvFeatureEvaluator>)featureEvaluator))->writeFeatures( fs, featureMap );
+ ((CvFeatureEvaluator*)((Ptr<CvFeatureEvaluator>)featureEvaluator))->writeFeatures( fs, featureMap );
}
void CvCascadeClassifier::writeStages( FileStorage &fs, const Mat& featureMap ) const
{
char cmnt[30];
int i = 0;
- fs << CC_STAGES << "[";
+ fs << CC_STAGES << "[";
for( vector< Ptr<CvCascadeBoost> >::const_iterator it = stageClassifiers.begin();
it != stageClassifiers.end(); it++, i++ )
{
{
if ( !node.isMap() || !cascadeParams.read( node ) )
return false;
-
+
stageParams = new CvCascadeBoostParams;
FileNode rnode = node[CC_STAGE_PARAMS];
if ( !stageParams->read( rnode ) )
return false;
-
+
featureParams = CvFeatureParams::create(cascadeParams.featureType);
rnode = node[CC_FEATURE_PARAMS];
if ( !featureParams->read( rnode ) )
return false;
- return true;
+ return true;
}
bool CvCascadeClassifier::readStages( const FileNode &node)
fs << FileStorage::getDefaultObjectName(filename) << "{";
if ( !baseFormat )
{
- Mat featureMap;
+ Mat featureMap;
getUsedFeaturesIdxMap( featureMap );
writeParams( fs );
fs << CC_STAGE_NUM << (int)stageClassifiers.size();
CvSeq* weak;
if ( cascadeParams.featureType != CvFeatureParams::HAAR )
CV_Error( CV_StsBadFunc, "old file format is used for Haar-like features only");
- fs << ICV_HAAR_SIZE_NAME << "[:" << cascadeParams.winSize.width <<
+ fs << ICV_HAAR_SIZE_NAME << "[:" << cascadeParams.winSize.width <<
cascadeParams.winSize.height << "]";
fs << ICV_HAAR_STAGES_NAME << "[";
for( size_t si = 0; si < stageClassifiers.size(); si++ )
int inner_node_idx = -1, total_inner_node_idx = -1;
queue<const CvDTreeNode*> inner_nodes_queue;
CvCascadeBoostTree* tree = *((CvCascadeBoostTree**) cvGetSeqElem( weak, wi ));
-
+
fs << "[";
/*sprintf( buf, "tree %d", wi );
CV_CALL( cvWriteComment( fs, buf, 1 ) );*/
const CvDTreeNode* tempNode;
-
+
inner_nodes_queue.push( tree->get_root() );
total_inner_node_idx++;
-
+
while (!inner_nodes_queue.empty())
{
tempNode = inner_nodes_queue.front();
node = fs.getFirstTopLevelNode();
if ( !fs.isOpened() )
break;
- CvCascadeBoost *tempStage = new CvCascadeBoost;
+ CvCascadeBoost *tempStage = new CvCascadeBoost;
if ( !tempStage->read( node, (CvFeatureEvaluator*)featureEvaluator, *((CvCascadeBoostParams*)stageParams )) )
{
int varCount = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize();
featureMap.create( 1, varCount, CV_32SC1 );
featureMap.setTo(Scalar(-1));
-
+
for( vector< Ptr<CvCascadeBoost> >::const_iterator it = stageClassifiers.begin();
it != stageClassifiers.end(); it++ )
((CvCascadeBoost*)((Ptr<CvCascadeBoost>)(*it)))->markUsedFeaturesInMap( featureMap );
-
+
for( int fi = 0, idx = 0; fi < varCount; fi++ )
if ( featureMap.at<int>(0, fi) >= 0 )
featureMap.ptr<int>(0)[fi] = idx++;
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
#include "traincascade_features.h"
#include "cascadeclassifier.h"
CvFeatureParams::CvFeatureParams() : maxCatCount( 0 ), featSize( 1 )
{
- name = CC_FEATURE_PARAMS;
+ name = CC_FEATURE_PARAMS;
}
void CvFeatureParams::init( const CvFeatureParams& fp )
Ptr<CvFeatureParams> CvFeatureParams::create( int featureType )
{
return featureType == HAAR ? Ptr<CvFeatureParams>(new CvHaarFeatureParams) :
- featureType == LBP ? Ptr<CvFeatureParams>(new CvLBPFeatureParams) :
+ featureType == LBP ? Ptr<CvFeatureParams>(new CvLBPFeatureParams) :
featureType == HOG ? Ptr<CvFeatureParams>(new CvHOGFeatureParams) :
Ptr<CvFeatureParams>();
}
Ptr<CvFeatureEvaluator> CvFeatureEvaluator::create(int type)
{
return type == CvFeatureParams::HAAR ? Ptr<CvFeatureEvaluator>(new CvHaarEvaluator) :
- type == CvFeatureParams::LBP ? Ptr<CvFeatureEvaluator>(new CvLBPEvaluator) :
+ type == CvFeatureParams::LBP ? Ptr<CvFeatureEvaluator>(new CvLBPEvaluator) :
type == CvFeatureParams::HOG ? Ptr<CvFeatureEvaluator>(new CvHOGEvaluator) :
Ptr<CvFeatureEvaluator>();
}
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
#include "haarfeatures.h"
#include "cascadeclassifier.h"
using namespace std;
CvHaarFeatureParams::CvHaarFeatureParams() : mode(BASIC)
-{
+{
name = HFP_NAME;
}
CvHaarFeatureParams::CvHaarFeatureParams( int _mode ) : mode( _mode )
{
- name = HFP_NAME;
+ name = HFP_NAME;
}
void CvHaarFeatureParams::init( const CvFeatureParams& fp )
void CvHaarFeatureParams::write( FileStorage &fs ) const
{
CvFeatureParams::write( fs );
- String modeStr = mode == BASIC ? CC_MODE_BASIC :
+ String modeStr = mode == BASIC ? CC_MODE_BASIC :
mode == CORE ? CC_MODE_CORE :
mode == ALL ? CC_MODE_ALL : String();
CV_Assert( !modeStr.empty() );
void CvHaarFeatureParams::printAttrs() const
{
CvFeatureParams::printAttrs();
- String mode_str = mode == BASIC ? CC_MODE_BASIC :
+ String mode_str = mode == BASIC ? CC_MODE_BASIC :
mode == CORE ? CC_MODE_CORE :
mode == ALL ? CC_MODE_ALL : 0;
cout << "mode: " << mode_str << endl;
if( mode != CvHaarFeatureParams::BASIC )
{
// haar_x4
- if ( (x+dx*4 <= winSize.width) && (y+dy <= winSize.height) )
+ if ( (x+dx*4 <= winSize.width) && (y+dy <= winSize.height) )
{
features.push_back( Feature( offset, false,
x, y, dx*4, dy, -1,
}
}
// x2_y2
- if ( (x+dx*2 <= winSize.width) && (y+dy*2 <= winSize.height) )
+ if ( (x+dx*2 <= winSize.width) && (y+dy*2 <= winSize.height) )
{
features.push_back( Feature( offset, false,
x, y, dx*2, dy*2, -1,
x, y, dx, dy, +2,
x+dx, y+dy, dx, dy, +2 ) );
}
- if (mode != CvHaarFeatureParams::BASIC)
- {
- if ( (x+dx*3 <= winSize.width) && (y+dy*3 <= winSize.height) )
+ if (mode != CvHaarFeatureParams::BASIC)
+ {
+ if ( (x+dx*3 <= winSize.width) && (y+dy*3 <= winSize.height) )
{
features.push_back( Feature( offset, false,
x , y , dx*3, dy*3, -1,
x+dx, y+dy, dx , dy , +9) );
}
}
- if (mode == CvHaarFeatureParams::ALL)
- {
+ if (mode == CvHaarFeatureParams::ALL)
+ {
// tilted haar_x2
- if ( (x+2*dx <= winSize.width) && (y+2*dx+dy <= winSize.height) && (x-dy>= 0) )
+ if ( (x+2*dx <= winSize.width) && (y+2*dx+dy <= winSize.height) && (x-dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx*2, dy, -1,
x, y, dx, dy, +2 ) );
}
// tilted haar_y2
- if ( (x+dx <= winSize.width) && (y+dx+2*dy <= winSize.height) && (x-2*dy>= 0) )
+ if ( (x+dx <= winSize.width) && (y+dx+2*dy <= winSize.height) && (x-2*dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx, 2*dy, -1,
x, y, dx, dy, +2 ) );
}
// tilted haar_x3
- if ( (x+3*dx <= winSize.width) && (y+3*dx+dy <= winSize.height) && (x-dy>= 0) )
+ if ( (x+3*dx <= winSize.width) && (y+3*dx+dy <= winSize.height) && (x-dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx*3, dy, -1,
x+dx, y+dx, dx, dy, +3 ) );
}
// tilted haar_y3
- if ( (x+dx <= winSize.width) && (y+dx+3*dy <= winSize.height) && (x-3*dy>= 0) )
+ if ( (x+dx <= winSize.width) && (y+dx+3*dy <= winSize.height) && (x-3*dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx, 3*dy, -1,
x-dy, y+dy, dx, dy, +3 ) );
}
// tilted haar_x4
- if ( (x+4*dx <= winSize.width) && (y+4*dx+dy <= winSize.height) && (x-dy>= 0) )
+ if ( (x+4*dx <= winSize.width) && (y+4*dx+dy <= winSize.height) && (x-dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx*4, dy, -1,
x+dx, y+dx, dx*2, dy, +2 ) );
}
// tilted haar_y4
- if ( (x+dx <= winSize.width) && (y+dx+4*dy <= winSize.height) && (x-4*dy>= 0) )
+ if ( (x+dx <= winSize.width) && (y+dx+4*dy <= winSize.height) && (x-4*dy>= 0) )
{
features.push_back( Feature( offset, true,
x, y, dx, 4*dy, -1,
fs << CC_RECTS << "[";
for( int ri = 0; ri < CV_HAAR_FEATURE_MAX && rect[ri].r.width != 0; ++ri )
{
- fs << "[:" << rect[ri].r.x << rect[ri].r.y <<
+ fs << "[:" << rect[ri].r.x << rect[ri].r.y <<
rect[ri].r.width << rect[ri].r.height << rect[ri].weight << "]";
}
fs << "]" << CC_TILTED << tilted;
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
#include "cv.h"
#include "imagestorage.h"
#include <stdio.h>
for( size_t i = 0; i < count; i++ )
{
src = imread( imgFilenames[last++], 0 );
- if( src.empty() )
+ if( src.empty() )
continue;
round += last / count;
round = round % (winSize.width * winSize.height);
_offset.x = min( (int)round % winSize.width, src.cols - winSize.width );
_offset.y = min( (int)round / winSize.width, src.rows - winSize.height );
- if( !src.empty() && src.type() == CV_8UC1
+ if( !src.empty() && src.type() == CV_8UC1
&& offset.x >= 0 && offset.y >= 0 )
break;
}
point = offset = _offset;
scale = max( ((float)winSize.width + point.x) / ((float)src.cols),
((float)winSize.height + point.y) / ((float)src.rows) );
-
+
Size sz( (int)(scale*src.cols + 0.5F), (int)(scale*src.rows + 0.5F) );
resize( src, img, sz );
return true;
CV_Assert( _img.rows == winSize.height );
if( img.empty() )
- if ( !nextImg() )
+ if ( !nextImg() )
return false;
Mat mat( winSize.height, winSize.width, CV_8UC1,
resize( src, img, Size( (int)(scale*src.cols), (int)(scale*src.rows) ) );
else
{
- if ( !nextImg() )
+ if ( !nextImg() )
return false;
}
}
if( !file )
return false;
- short tmp = 0;
+ short tmp = 0;
if( fread( &count, sizeof( count ), 1, file ) != 1 ||
fread( &vecSize, sizeof( vecSize ), 1, file ) != 1 ||
fread( &tmp, sizeof( tmp ), 1, file ) != 1 ||
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
#include "lbpfeatures.h"
#include "cascadeclassifier.h"
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
#include "cv.h"
#include "cascadeclassifier.h"
int precalcValBufSize = 256,
precalcIdxBufSize = 256;
bool baseFormatSave = false;
-
+
CvCascadeParams cascadeParams;
CvCascadeBoostParams stageParams;
Ptr<CvFeatureParams> featureParams[] = { Ptr<CvFeatureParams>(new CvHaarFeatureParams),
- Ptr<CvFeatureParams>(new CvLBPFeatureParams),
+ Ptr<CvFeatureParams>(new CvLBPFeatureParams),
Ptr<CvFeatureParams>(new CvHOGFeatureParams)
};
int fc = sizeof(featureParams)/sizeof(featureParams[0]);
{
for( int fi = 0; fi < fc; fi++ )
{
- set = featureParams[fi]->scanAttr(argv[i], argv[i+1]);
+ set = featureParams[fi]->scanAttr(argv[i], argv[i+1]);
if ( !set )
{
i++;
}
}
}
-
+
classifier.train( cascadeDirName,
vecName,
- bgName,
- numPos, numNeg,
+ bgName,
+ numPos, numNeg,
precalcValBufSize, precalcIdxBufSize,
numStages,
cascadeParams,
endif()
if(MSVC)
+ string(REGEX REPLACE "^ *| * $" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ string(REGEX REPLACE "^ *| * $" "" CMAKE_CXX_FLAGS_INIT "${CMAKE_CXX_FLAGS_INIT}")
if(CMAKE_CXX_FLAGS STREQUAL CMAKE_CXX_FLAGS_INIT)
# override cmake default exception handling option
string(REPLACE "/EHsc" "/EHa" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
endif()
+set(OPENCV_EXTRA_FLAGS "")
set(OPENCV_EXTRA_C_FLAGS "")
-set(OPENCV_EXTRA_C_FLAGS_RELEASE "")
-set(OPENCV_EXTRA_C_FLAGS_DEBUG "")
+set(OPENCV_EXTRA_CXX_FLAGS "")
+set(OPENCV_EXTRA_FLAGS_RELEASE "")
+set(OPENCV_EXTRA_FLAGS_DEBUG "")
set(OPENCV_EXTRA_EXE_LINKER_FLAGS "")
set(OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE "")
set(OPENCV_EXTRA_EXE_LINKER_FLAGS_DEBUG "")
+macro(add_extra_compiler_option option)
+ if(CMAKE_BUILD_TYPE)
+ set(CMAKE_TRY_COMPILE_CONFIGURATION ${CMAKE_BUILD_TYPE})
+ endif()
+ ocv_check_flag_support(CXX "${option}" _varname "${OPENCV_EXTRA_CXX_FLAGS} ${ARGN}")
+ if(${_varname})
+ set(OPENCV_EXTRA_CXX_FLAGS "${OPENCV_EXTRA_CXX_FLAGS} ${option}")
+ endif()
+
+ ocv_check_flag_support(C "${option}" _varname "${OPENCV_EXTRA_C_FLAGS} ${ARGN}")
+ if(${_varname})
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} ${option}")
+ endif()
+endmacro()
+
if(MINGW)
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=40838
# here we are trying to workaround the problem
- include(CheckCXXCompilerFlag)
- CHECK_CXX_COMPILER_FLAG(-mstackrealign HAVE_STACKREALIGN_FLAG)
- if(HAVE_STACKREALIGN_FLAG)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mstackrealign")
- else()
- CHECK_CXX_COMPILER_FLAG(-mpreferred-stack-boundary=2 HAVE_PREFERRED_STACKBOUNDARY_FLAG)
- if(HAVE_PREFERRED_STACKBOUNDARY_FLAG)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mstackrealign")
- endif()
+ add_extra_compiler_option(-mstackrealign)
+ if(NOT HAVE_CXX_MSTACKREALIGN)
+ add_extra_compiler_option(-mpreferred-stack-boundary=2)
endif()
endif()
+if(OPENCV_CAN_BREAK_BINARY_COMPATIBILITY)
+ add_definitions(-DOPENCV_CAN_BREAK_BINARY_COMPATIBILITY)
+endif()
+
if(CMAKE_COMPILER_IS_GNUCXX)
# High level of warnings.
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Wall")
+ add_extra_compiler_option(-Wall)
+ add_extra_compiler_option(-Werror=return-type)
+ if(OPENCV_CAN_BREAK_BINARY_COMPATIBILITY)
+ add_extra_compiler_option(-Werror=non-virtual-dtor)
+ endif()
+ add_extra_compiler_option(-Werror=address)
+ add_extra_compiler_option(-Werror=sequence-point)
+ add_extra_compiler_option(-Wformat)
+ add_extra_compiler_option(-Werror=format-security -Wformat)
+ add_extra_compiler_option(-Wmissing-declarations)
+ add_extra_compiler_option(-Wmissing-prototypes)
+ add_extra_compiler_option(-Wstrict-prototypes)
+ add_extra_compiler_option(-Wundef)
+ add_extra_compiler_option(-Winit-self)
+ add_extra_compiler_option(-Wpointer-arith)
+ add_extra_compiler_option(-Wshadow)
+
+ if(ENABLE_NOISY_WARNINGS)
+ add_extra_compiler_option(-Wcast-align)
+ add_extra_compiler_option(-Wstrict-aliasing=2)
+ else()
+ add_extra_compiler_option(-Wno-narrowing)
+ add_extra_compiler_option(-Wno-delete-non-virtual-dtor)
+ add_extra_compiler_option(-Wno-unnamed-type-template-args)
+ endif()
# The -Wno-long-long is required in 64bit systems when including sytem headers.
if(X86_64)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Wno-long-long")
+ add_extra_compiler_option(-Wno-long-long)
endif()
# We need pthread's
if(UNIX AND NOT ANDROID)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -pthread")
+ add_extra_compiler_option(-pthread)
endif()
if(OPENCV_WARNINGS_ARE_ERRORS)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Werror")
+ add_extra_compiler_option(-Werror)
endif()
if(X86 AND NOT MINGW64 AND NOT X86_64 AND NOT APPLE)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -march=i686")
+ add_extra_compiler_option(-march=i686)
endif()
# Other optimizations
if(ENABLE_OMIT_FRAME_POINTER)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -fomit-frame-pointer")
+ add_extra_compiler_option(-fomit-frame-pointer)
else()
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -fno-omit-frame-pointer")
+ add_extra_compiler_option(-fno-omit-frame-pointer)
endif()
if(ENABLE_FAST_MATH)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -ffast-math")
+ add_extra_compiler_option(-ffast-math)
endif()
if(ENABLE_POWERPC)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mcpu=G3 -mtune=G5")
+ add_extra_compiler_option("-mcpu=G3 -mtune=G5")
endif()
if(ENABLE_SSE)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse")
+ add_extra_compiler_option(-msse)
endif()
if(ENABLE_SSE2)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse2")
+ add_extra_compiler_option(-msse2)
endif()
# SSE3 and further should be disabled under MingW because it generates compiler errors
if(NOT MINGW)
if(ENABLE_SSE3)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse3")
+ add_extra_compiler_option(-msse3)
endif()
if(${CMAKE_OPENCV_GCC_VERSION_NUM} GREATER 402)
if(HAVE_GCC42_OR_NEWER OR APPLE)
if(ENABLE_SSSE3)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mssse3")
+ add_extra_compiler_option(-mssse3)
endif()
if(HAVE_GCC43_OR_NEWER)
if(ENABLE_SSE41)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse4.1")
+ add_extra_compiler_option(-msse4.1)
endif()
if(ENABLE_SSE42)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse4.2")
+ add_extra_compiler_option(-msse4.2)
endif()
endif()
endif()
if(X86 OR X86_64)
if(NOT APPLE AND CMAKE_SIZEOF_VOID_P EQUAL 4)
- if(ENABLE_SSE2)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mfpmath=sse")# !! important - be on the same wave with x64 compilers
- else()
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mfpmath=387")
- endif()
+ if(ENABLE_SSE2)
+ add_extra_compiler_option(-mfpmath=sse)# !! important - be on the same wave with x64 compilers
+ else()
+ add_extra_compiler_option(-mfpmath=387)
+ endif()
endif()
endif()
# Profiling?
if(ENABLE_PROFILING)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -pg -g")
+ add_extra_compiler_option("-pg -g")
# turn off incompatible options
- foreach(flags CMAKE_CXX_FLAGS CMAKE_C_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG OPENCV_EXTRA_C_FLAGS_RELEASE)
+ foreach(flags CMAKE_CXX_FLAGS CMAKE_C_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG
+ OPENCV_EXTRA_FLAGS_RELEASE OPENCV_EXTRA_FLAGS_DEBUG OPENCV_EXTRA_C_FLAGS OPENCV_EXTRA_CXX_FLAGS)
string(REPLACE "-fomit-frame-pointer" "" ${flags} "${${flags}}")
string(REPLACE "-ffunction-sections" "" ${flags} "${${flags}}")
endforeach()
elseif(NOT APPLE AND NOT ANDROID)
# Remove unreferenced functions: function level linking
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -ffunction-sections")
+ add_extra_compiler_option(-ffunction-sections)
endif()
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -DNDEBUG")
- set(OPENCV_EXTRA_C_FLAGS_DEBUG "${OPENCV_EXTRA_C_FLAGS_DEBUG} -O0 -DDEBUG -D_DEBUG")
+ set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} -DNDEBUG")
+ set(OPENCV_EXTRA_FLAGS_DEBUG "${OPENCV_EXTRA_FLAGS_DEBUG} -O0 -DDEBUG -D_DEBUG")
if(BUILD_WITH_DEBUG_INFO)
- set(OPENCV_EXTRA_C_FLAGS_DEBUG "${OPENCV_EXTRA_C_FLAGS_DEBUG} -ggdb3")
+ set(OPENCV_EXTRA_FLAGS_DEBUG "${OPENCV_EXTRA_FLAGS_DEBUG} -ggdb3")
endif()
endif()
if(MSVC)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /D _CRT_SECURE_NO_DEPRECATE /D _CRT_NONSTDC_NO_DEPRECATE /D _SCL_SECURE_NO_WARNINGS")
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /D _CRT_SECURE_NO_DEPRECATE /D _CRT_NONSTDC_NO_DEPRECATE /D _SCL_SECURE_NO_WARNINGS")
# 64-bit portability warnings, in MSVC80
if(MSVC80)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Wp64")
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /Wp64")
endif()
if(BUILD_WITH_DEBUG_INFO)
endif()
# Remove unreferenced functions: function level linking
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Gy")
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /Gy")
if(NOT MSVC_VERSION LESS 1400)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /bigobj")
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /bigobj")
endif()
if(BUILD_WITH_DEBUG_INFO)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} /Zi")
+ set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE} /Zi")
endif()
if(NOT MSVC64)
# 64-bit MSVC compiler uses SSE/SSE2 by default
if(ENABLE_SSE)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE")
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:SSE")
endif()
if(ENABLE_SSE2)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE2")
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:SSE2")
endif()
endif()
-
+
if(ENABLE_SSE3)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE3")
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:SSE3")
endif()
if(ENABLE_SSE4_1)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE4.1")
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /arch:SSE4.1")
endif()
-
+
if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Oi")
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /Oi")
endif()
-
+
if(X86 OR X86_64)
if(CMAKE_SIZEOF_VOID_P EQUAL 4 AND ENABLE_SSE2)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /fp:fast")# !! important - be on the same wave with x64 compilers
+ set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS} /fp:fast")# !! important - be on the same wave with x64 compilers
endif()
endif()
endif()
if(NOT BUILD_SHARED_LIBS AND CMAKE_COMPILER_IS_GNUCXX AND NOT ANDROID)
# Android does not need these settings because they are already set by toolchain file
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} stdc++)
- set(OPENCV_EXTRA_C_FLAGS "-fPIC ${OPENCV_EXTRA_C_FLAGS}")
+ set(OPENCV_EXTRA_FLAGS "-fPIC ${OPENCV_EXTRA_FLAGS}")
endif()
# Add user supplied extra options (optimization, etc...)
# ==========================================================
-set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS}" CACHE INTERNAL "Extra compiler options")
-set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE}" CACHE INTERNAL "Extra compiler options for Release build")
-set(OPENCV_EXTRA_C_FLAGS_DEBUG "${OPENCV_EXTRA_C_FLAGS_DEBUG}" CACHE INTERNAL "Extra compiler options for Debug build")
+set(OPENCV_EXTRA_FLAGS "${OPENCV_EXTRA_FLAGS}" CACHE INTERNAL "Extra compiler options")
+set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS}" CACHE INTERNAL "Extra compiler options for C sources")
+set(OPENCV_EXTRA_CXX_FLAGS "${OPENCV_EXTRA_CXX_FLAGS}" CACHE INTERNAL "Extra compiler options for C++ sources")
+set(OPENCV_EXTRA_FLAGS_RELEASE "${OPENCV_EXTRA_FLAGS_RELEASE}" CACHE INTERNAL "Extra compiler options for Release build")
+set(OPENCV_EXTRA_FLAGS_DEBUG "${OPENCV_EXTRA_FLAGS_DEBUG}" CACHE INTERNAL "Extra compiler options for Debug build")
set(OPENCV_EXTRA_EXE_LINKER_FLAGS "${OPENCV_EXTRA_EXE_LINKER_FLAGS}" CACHE INTERNAL "Extra linker flags")
set(OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE "${OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE}" CACHE INTERNAL "Extra linker flags for Release build")
set(OPENCV_EXTRA_EXE_LINKER_FLAGS_DEBUG "${OPENCV_EXTRA_EXE_LINKER_FLAGS_DEBUG}" CACHE INTERNAL "Extra linker flags for Debug build")
#combine all "extra" options
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENCV_EXTRA_C_FLAGS}")
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENCV_EXTRA_C_FLAGS}")
-set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${OPENCV_EXTRA_C_FLAGS_RELEASE}")
-set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${OPENCV_EXTRA_C_FLAGS_RELEASE}")
-set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${OPENCV_EXTRA_C_FLAGS_DEBUG}")
-set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${OPENCV_EXTRA_C_FLAGS_DEBUG}")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENCV_EXTRA_FLAGS} ${OPENCV_EXTRA_C_FLAGS}")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPENCV_EXTRA_FLAGS} ${OPENCV_EXTRA_CXX_FLAGS}")
+set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${OPENCV_EXTRA_FLAGS_RELEASE}")
+set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${OPENCV_EXTRA_FLAGS_RELEASE}")
+set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${OPENCV_EXTRA_FLAGS_DEBUG}")
+set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${OPENCV_EXTRA_FLAGS_DEBUG}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OPENCV_EXTRA_EXE_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} ${OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE}")
set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} ${OPENCV_EXTRA_EXE_LINKER_FLAGS_DEBUG}")
string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}")
string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}")
-
+
+ if(NOT ENABLE_NOISY_WARNINGS AND MSVC_VERSION EQUAL 1400)
+ ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4510 /wd4610 /wd4312 /wd4201 /wd4244 /wd4328 /wd4267)
+ endif()
+
# allow extern "C" functions throw exceptions
foreach(flags CMAKE_C_FLAGS CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
string(REPLACE "/EHsc-" "/EHs" ${flags} "${${flags}}")
string(REPLACE "/EHsc" "/EHs" ${flags} "${${flags}}")
-
+
string(REPLACE "/Zm1000" "" ${flags} "${${flags}}")
endforeach()
set(CV_ICC __INTEL_COMPILER_FOR_WINDOWS)
endif()
+if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR (UNIX AND CV_ICC))
+ set(CV_COMPILER_IS_GNU TRUE)
+else()
+ set(CV_COMPILER_IS_GNU FALSE)
+endif()
+
# ----------------------------------------------------------------------------
# Detect GNU version:
# ----------------------------------------------------------------------------
if(ANDROID)
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/tbb")
- ocv_include_directories(${TBB_INCLUDE_DIRS})
+ include_directories(SYSTEM ${TBB_INCLUDE_DIRS})
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} tbb)
add_definitions(-DTBB_USE_GCC_BUILTINS=1 -D__TBB_GCC_BUILTIN_ATOMICS_PRESENT=1 -D__TBB_USE_GENERIC_DWORD_LOAD_STORE=1)
set(HAVE_TBB 1)
set(TIFF_VERSION_BIG ${TIFF_BIGTIFF_VERSION})
endif()
+if(NOT TIFF_VERSION_STRING AND TIFF_INCLUDE_DIR)
+ list(GET TIFF_INCLUDE_DIR 0 _TIFF_INCLUDE_DIR)
+ if(EXISTS "${_TIFF_INCLUDE_DIR}/tiffvers.h")
+ file(STRINGS "${_TIFF_INCLUDE_DIR}/tiffvers.h" tiff_version_str REGEX "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version .*")
+ string(REGEX REPLACE "^#define[\t ]+TIFFLIB_VERSION_STR[\t ]+\"LIBTIFF, Version +([^ \\n]*).*" "\\1" TIFF_VERSION_STRING "${tiff_version_str}")
+ unset(tiff_version_str)
+ endif()
+ unset(_TIFF_INCLUDE_DIR)
+endif()
+
# --- libjpeg (optional) ---
if(WITH_JPEG)
if(BUILD_JPEG)
# Try to find the XIMEA API path in registry.
GET_FILENAME_COMPONENT(XIMEA_PATH "[HKEY_CURRENT_USER\\Software\\XIMEA\\CamSupport\\API;Path]" ABSOLUTE)
- if(XIMEA_PATH)
+ if(EXISTS XIMEA_PATH)
set(XIMEA_FOUND 1)
# set LIB folders
ocv_list_sort(OPENCV_MOD_LIST)
foreach(m ${OPENCV_MOD_LIST})
string(TOUPPER "${m}" m)
- set(OPENCV_MODULE_DEFINITIONS_CONFIGMAKE "${OPENCV_MODULE_DEFINITIONS_CONFIGMAKE}#define HAVE_${m} 1\n")
+ set(OPENCV_MODULE_DEFINITIONS_CONFIGMAKE "${OPENCV_MODULE_DEFINITIONS_CONFIGMAKE}#define HAVE_${m}\n")
endforeach()
set(OPENCV_MODULE_DEFINITIONS_CONFIGMAKE "${OPENCV_MODULE_DEFINITIONS_CONFIGMAKE}\n")
-set(OPENCV_MOD_LIST ${OPENCV_MODULES_DISABLED_USER} ${OPENCV_MODULES_DISABLED_AUTO})
-ocv_list_sort(OPENCV_MOD_LIST)
-foreach(m ${OPENCV_MOD_LIST})
- string(TOUPPER "${m}" m)
- set(OPENCV_MODULE_DEFINITIONS_CONFIGMAKE "${OPENCV_MODULE_DEFINITIONS_CONFIGMAKE}#undef HAVE_${m}\n")
-endforeach()
+#set(OPENCV_MOD_LIST ${OPENCV_MODULES_DISABLED_USER} ${OPENCV_MODULES_DISABLED_AUTO} ${OPENCV_MODULES_DISABLED_FORCE})
+#ocv_list_sort(OPENCV_MOD_LIST)
+#foreach(m ${OPENCV_MOD_LIST})
+# string(TOUPPER "${m}" m)
+# set(OPENCV_MODULE_DEFINITIONS_CONFIGMAKE "${OPENCV_MODULE_DEFINITIONS_CONFIGMAKE}#undef HAVE_${m}\n")
+#endforeach()
configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/opencv_modules.hpp.in" "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp")
install(FILES "${OPENCV_CONFIG_FILE_INCLUDE_DIR}/opencv2/opencv_modules.hpp" DESTINATION ${OPENCV_INCLUDE_PREFIX}/opencv2 COMPONENT main)
set(OPENCV_MODULES_PUBLIC ${OPENCV_MODULES_PUBLIC} "${the_module}" CACHE INTERNAL "List of OpenCV modules marked for export")
endif()
endif()
-
+
# add self to the world dependencies
if(NOT DEFINED OPENCV_MODULE_IS_PART_OF_WORLD AND NOT OPENCV_MODULE_${the_module}_CLASS STREQUAL "BINDINGS" OR OPENCV_MODULE_IS_PART_OF_WORLD)
ocv_add_dependencies(opencv_world OPTIONAL ${the_module})
macro(ocv_add_precompiled_headers the_target)
if("${the_target}" MATCHES "^opencv_test_.*$")
SET(pch_path "test/test_")
+ elseif("${the_target}" MATCHES "opencv_perf_gpu_cpu")
+ SET(pch_path "perf_cpu/perf_cpu_")
elseif("${the_target}" MATCHES "^opencv_perf_.*$")
SET(pch_path "perf/perf_")
else()
ENDIF()
SET(_PCH_include_prefix "-I")
+ SET(_PCH_isystem_prefix "-isystem")
ELSEIF(WIN32)
SET(PCHSupport_FOUND TRUE) # for experimental msvc support
SET(_PCH_include_prefix "/I")
+ SET(_PCH_isystem_prefix "/I")
ELSE()
SET(PCHSupport_FOUND FALSE)
ENDIF()
GET_DIRECTORY_PROPERTY(DIRINC INCLUDE_DIRECTORIES )
FOREACH(item ${DIRINC})
- LIST(APPEND ${_out_compile_flags} "${_PCH_include_prefix}\"${item}\"")
+ if(item MATCHES "^${OpenCV_SOURCE_DIR}/modules/")
+ LIST(APPEND ${_out_compile_flags} "${_PCH_include_prefix}\"${item}\"")
+ else()
+ LIST(APPEND ${_out_compile_flags} "${_PCH_isystem_prefix}\"${item}\"")
+ endif()
ENDFOREACH(item)
GET_DIRECTORY_PROPERTY(_directory_flags DEFINITIONS)
ADD_CUSTOM_COMMAND(
OUTPUT "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "#include \\\"${_include_file}\\\"" > "${${_dephelp}}"
+ COMMAND ${CMAKE_COMMAND} -E echo "int testfunction();" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "int testfunction()" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "{" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo " return 0;" >> "${${_dephelp}}"
ADD_CUSTOM_COMMAND(
OUTPUT "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${_include_file}\\\"" > "${${_dephelp}}"
+ COMMAND ${CMAKE_COMMAND} -E echo "int testfunction\\(\\)\\;" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "int testfunction\\(\\)" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo "{" >> "${${_dephelp}}"
COMMAND ${CMAKE_COMMAND} -E echo " \\return 0\\;" >> "${${_dephelp}}"
if("${__abs_dir}" MATCHES "^${OpenCV_SOURCE_DIR}" OR "${__abs_dir}" MATCHES "^${OpenCV_BINARY_DIR}")
list(APPEND __add_before "${dir}")
else()
- include_directories(AFTER "${dir}")
+ include_directories(AFTER SYSTEM "${dir}")
endif()
endforeach()
include_directories(BEFORE ${__add_before})
endforeach()
endmacro()
+set(OCV_COMPILER_FAIL_REGEX
+ "command line option .* is valid for .* but not for C\\+\\+" # GNU
+ "unrecognized .*option" # GNU
+ "unknown .*option" # Clang
+ "ignoring unknown option" # MSVC
+ "warning D9002" # MSVC, any lang
+ "option .*not supported" # Intel
+ "[Uu]nknown option" # HP
+ "[Ww]arning: [Oo]ption" # SunPro
+ "command option .* is not recognized" # XL
+ "not supported in this configuration; ignored" # AIX
+ "File with unknown suffix passed to linker" # PGI
+ "WARNING: unknown flag:" # Open64
+ )
+
+MACRO(ocv_check_compiler_flag LANG FLAG RESULT)
+ if(NOT DEFINED ${RESULT})
+ if("_${LANG}_" MATCHES "_CXX_")
+ set(_fname "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/src.cxx")
+ if("${CMAKE_CXX_FLAGS} ${FLAG} " MATCHES "-Werror " OR "${CMAKE_CXX_FLAGS} ${FLAG} " MATCHES "-Werror=unknown-pragmas ")
+ FILE(WRITE "${_fname}" "int main() { return 0; }\n")
+ else()
+ FILE(WRITE "${_fname}" "#pragma\nint main() { return 0; }\n")
+ endif()
+ elseif("_${LANG}_" MATCHES "_C_")
+ set(_fname "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/src.c")
+ if("${CMAKE_C_FLAGS} ${FLAG} " MATCHES "-Werror " OR "${CMAKE_C_FLAGS} ${FLAG} " MATCHES "-Werror=unknown-pragmas ")
+ FILE(WRITE "${_fname}" "int main(void) { return 0; }\n")
+ else()
+ FILE(WRITE "${_fname}" "#pragma\nint main(void) { return 0; }\n")
+ endif()
+ else()
+ unset(_fname)
+ endif()
+ if(_fname)
+ MESSAGE(STATUS "Performing Test ${RESULT}")
+ TRY_COMPILE(${RESULT}
+ ${CMAKE_BINARY_DIR}
+ "${_fname}"
+ COMPILE_DEFINITIONS "${FLAG}"
+ OUTPUT_VARIABLE OUTPUT)
+
+ FOREACH(_regex ${OCV_COMPILER_FAIL_REGEX})
+ IF("${OUTPUT}" MATCHES "${_regex}")
+ SET(${RESULT} 0)
+ break()
+ ENDIF()
+ ENDFOREACH()
+
+ IF(${RESULT})
+ SET(${RESULT} 1 CACHE INTERNAL "Test ${RESULT}")
+ MESSAGE(STATUS "Performing Test ${RESULT} - Success")
+ ELSE(${RESULT})
+ MESSAGE(STATUS "Performing Test ${RESULT} - Failed")
+ SET(${RESULT} "" CACHE INTERNAL "Test ${RESULT}")
+ ENDIF(${RESULT})
+ else()
+ SET(${RESULT} 0)
+ endif()
+ endif()
+ENDMACRO()
+
+macro(ocv_check_flag_support lang flag varname)
+ if("_${lang}_" MATCHES "_CXX_")
+ set(_lang CXX)
+ elseif("_${lang}_" MATCHES "_C_")
+ set(_lang C)
+ else()
+ set(_lang ${lang})
+ endif()
+
+ string(TOUPPER "${flag}" ${varname})
+ string(REGEX REPLACE "^(/|-)" "HAVE_${_lang}_" ${varname} "${${varname}}")
+ string(REGEX REPLACE " -|-|=| |\\." "_" ${varname} "${${varname}}")
+
+ ocv_check_compiler_flag("${_lang}" "${ARGN} ${flag}" ${${varname}})
+endmacro()
+
+# turns off warnings
+macro(ocv_warnings_disable)
+ if(NOT ENABLE_NOISY_WARNINGS)
+ set(_flag_vars "")
+ set(_msvc_warnings "")
+ set(_gxx_warnings "")
+ foreach(arg ${ARGN})
+ if(arg MATCHES "^CMAKE_")
+ list(APPEND _flag_vars ${arg})
+ elseif(arg MATCHES "^/wd")
+ list(APPEND _msvc_warnings ${arg})
+ elseif(arg MATCHES "^-W")
+ list(APPEND _gxx_warnings ${arg})
+ endif()
+ endforeach()
+ if(MSVC AND _msvc_warnings AND _flag_vars)
+ foreach(var ${_flag_vars})
+ foreach(warning ${_msvc_warnings})
+ set(${var} "${${var}} ${warning}")
+ endforeach()
+ endforeach()
+ elseif(CV_COMPILER_IS_GNU AND _gxx_warnings AND _flag_vars)
+ foreach(var ${_flag_vars})
+ foreach(warning ${_gxx_warnings})
+ if(NOT warning MATCHES "^-Wno-")
+ string(REPLACE "${warning}" "" ${var} "${${var}}")
+ string(REPLACE "-W" "-Wno-" warning "${warning}")
+ endif()
+ ocv_check_flag_support(${var} "${warning}" _varname)
+ if(${_varname})
+ set(${var} "${${var}} ${warning}")
+ endif()
+ endforeach()
+ endforeach()
+ endif()
+ unset(_flag_vars)
+ unset(_msvc_warnings)
+ unset(_gxx_warnings)
+ endif(NOT ENABLE_NOISY_WARNINGS)
+endmacro()
+
# Provides an option that the user can optionally select.
# Can accept condition to control when option is available for user.
# Usage:
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
int found = 0;
CvCBQuad *quads = 0, **quad_group = 0;
CvCBCorner *corners = 0, **corner_group = 0;
-
+
try
{
int k = 0;
if( out_corner_count )
*out_corner_count = 0;
-
+
IplImage _img;
int check_chessboard_result;
- int quad_count = 0, group_idx = 0, i = 0, dilations = 0;
-
+ int quad_count = 0, group_idx = 0, dilations = 0;
+
img = cvGetMat( img, &stub );
//debug_img = img;
for( dilations = min_dilations; dilations <= max_dilations; dilations++ )
{
if (found)
- break; // already found it
-
+ break; // already found it
+
cvFree(&quads);
cvFree(&corners);
cvCopy(dbg_img, dbg1_img);
cvNamedWindow("all_quads", 1);
// copy corners to temp array
- for( i = 0; i < quad_count; i++ )
+ for(int i = 0; i < quad_count; i++ )
{
for (int k=0; k<4; k++)
{
cvCopy(dbg_img,dbg2_img);
cvNamedWindow("connected_group", 1);
// copy corners to temp array
- for( i = 0; i < quad_count; i++ )
+ for(int i = 0; i < quad_count; i++ )
{
if (quads[i].group_idx == group_idx)
for (int k=0; k<4; k++)
#endif
if (count == 0)
- continue; // haven't found inner quads
+ continue; // haven't found inner quads
// If count is more than it should be, this will remove those quads
float sum_dist = 0;
int total = 0;
- for( i = 0; i < n; i++ )
+ for(int i = 0; i < n; i++ )
{
int ni = 0;
float avgi = corner_group[i]->meanDist(&ni);
if( count > 0 || (out_corner_count && -count > *out_corner_count) )
{
// copy corners to output array
- for( i = 0; i < n; i++ )
+ for(int i = 0; i < n; i++ )
out_corners[i] = corner_group[i]->pt;
if( out_corner_count )
if( found )
found = icvCheckBoardMonotony( out_corners, pattern_size );
- // check that none of the found corners is too close to the image boundary
+ // check that none of the found corners is too close to the image boundary
if( found )
- {
- const int BORDER = 8;
- for( k = 0; k < pattern_size.width*pattern_size.height; k++ )
- {
- if( out_corners[k].x <= BORDER || out_corners[k].x > img->cols - BORDER ||
- out_corners[k].y <= BORDER || out_corners[k].y > img->rows - BORDER )
- break;
- }
-
- found = k == pattern_size.width*pattern_size.height;
- }
+ {
+ const int BORDER = 8;
+ for( k = 0; k < pattern_size.width*pattern_size.height; k++ )
+ {
+ if( out_corners[k].x <= BORDER || out_corners[k].x > img->cols - BORDER ||
+ out_corners[k].y <= BORDER || out_corners[k].y > img->rows - BORDER )
+ break;
+ }
+
+ found = k == pattern_size.width*pattern_size.height;
+ }
if( found && pattern_size.height % 2 == 0 && pattern_size.width % 2 == 0 )
{
double dy0 = out_corners[last_row].y - out_corners[0].y;
if( dy0 < 0 )
{
- int i, n = pattern_size.width*pattern_size.height;
- for( i = 0; i < n/2; i++ )
+ int n = pattern_size.width*pattern_size.height;
+ for(int i = 0; i < n/2; i++ )
{
CvPoint2D32f temp;
CV_SWAP(out_corners[i], out_corners[n-i-1], temp);
cvFree(&corner_group);
throw;
}
-
+
cvFree(&quads);
cvFree(&corners);
cvFree(&quad_group);
icvCheckBoardMonotony( CvPoint2D32f* corners, CvSize pattern_size )
{
int i, j, k;
-
+
for( k = 0; k < 2; k++ )
{
for( i = 0; i < (k == 0 ? pattern_size.height : pattern_size.width); i++ )
{
cv::Ptr<CvMemStorage> temp_storage = cvCreateChildMemStorage( storage );
CvSeq* stack = cvCreateSeq( 0, sizeof(*stack), sizeof(void*), temp_storage );
- int i;
// first find an interior quad
CvCBQuad *start = NULL;
- for (i=0; i<quad_count; i++)
+ for (int i=0; i<quad_count; i++)
{
if (quads[i]->count == 4)
{
case 1:
col += 2; break;
case 2:
- row += 2; break;
+ row += 2; break;
case 3:
col -= 2; break;
}
}
}
- for (i=col_min; i<=col_max; i++)
+ for (int i=col_min; i<=col_max; i++)
PRINTF("HIST[%d] = %d\n", i, col_hist[i]);
// analyze inner quad structure
// if there is an outer quad missing, fill it in
// first order all inner quads
int found = 0;
- for (i=0; i<quad_count; i++)
+ for (int i=0; i<quad_count; i++)
{
if (quads[i]->count == 4)
{ // ok, look at neighbors
case 1:
col += 2; break;
case 2:
- row += 2; break;
+ row += 2; break;
case 3:
col -= 2; break;
}
// final trimming of outer quads
- if (dcol == w && drow == h) // found correct inner quads
+ if (dcol == w && drow == h) // found correct inner quads
{
PRINTF("Inner bounds ok, check outer quads\n");
int rcount = quad_count;
if (quads[i]->neighbors[j] && quads[i]->neighbors[j]->ordered)
outer = true;
}
- if (!outer) // not an outer quad, eliminate
+ if (!outer) // not an outer quad, eliminate
{
PRINTF("Removing quad %d\n", i);
icvRemoveQuadFromGroup(quads,rcount,quads[i]);
quad->count += 1;
q->neighbors[j] = quad;
q->group_idx = quad->group_idx;
- q->count = 1; // number of neighbors
+ q->count = 1; // number of neighbors
q->ordered = false;
q->edge_len = quad->edge_len;
int width = 0, height = 0;
int hist[5] = {0,0,0,0,0};
CvCBCorner* first = 0, *first2 = 0, *right, *cur, *below, *c;
-
+
// build dual graph, which vertices are internal quad corners
// and two vertices are connected iff they lie on the same quad edge
for( i = 0; i < quad_count; i++ )
result = corner_count;
finalize:
-
+
if( result <= 0 )
{
corner_count = MIN( corner_count, pattern_size.width*pattern_size.height );
CV_POLY_APPROX_DP, (float)approx_level );
if( dst_contour->total == 4 )
break;
-
+
// we call this again on its own output, because sometimes
// cvApproxPoly() does not simplify as much as it should.
dst_contour = cvApproxPoly( dst_contour, sizeof(CvContour), temp_storage,
#endif
if (isFound)
{
- switch(parameters.gridType)
- {
+ switch(parameters.gridType)
+ {
case CirclesGridFinderParameters::SYMMETRIC_GRID:
boxFinder.getHoles(centers);
break;
case CirclesGridFinderParameters::ASYMMETRIC_GRID:
- boxFinder.getAsymmetricHoles(centers);
- break;
+ boxFinder.getAsymmetricHoles(centers);
+ break;
default:
CV_Error(CV_StsBadArg, "Unkown pattern type");
- }
+ }
if (i != 0)
{
Mat(centers).copyTo(_centers);
return true;
}
-
+
boxFinder.getHoles(centers);
if (i != attempts - 1)
{
int useExtrinsicGuess )
{
const int max_iter = 20;
- Ptr<CvMat> matM, _Mxy, _m, _mn, matL, matJ;
+ Ptr<CvMat> matM, _Mxy, _m, _mn, matL;
int i, count;
double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9];
# endif
#endif
-void icvGetQuadrangleHypotheses(CvSeq* contours, std::vector<std::pair<float, int> >& quads, int class_id)
+static void icvGetQuadrangleHypotheses(CvSeq* contours, std::vector<std::pair<float, int> >& quads, int class_id)
{
const float min_aspect_ratio = 0.3f;
const float max_aspect_ratio = 3.0f;
const float min_box_size = 10.0f;
-
+
for(CvSeq* seq = contours; seq != NULL; seq = seq->h_next)
{
CvBox2D box = cvMinAreaRect2(seq);
{
continue;
}
-
+
quads.push_back(std::pair<float, int>(box_size, class_id));
}
}
-void countClasses(const std::vector<std::pair<float, int> >& pairs, size_t idx1, size_t idx2, std::vector<int>& counts)
+static void countClasses(const std::vector<std::pair<float, int> >& pairs, size_t idx1, size_t idx2, std::vector<int>& counts)
{
counts.assign(2, 0);
for(size_t i = idx1; i != idx2; i++)
}
}
-bool less_pred(const std::pair<float, int>& p1, const std::pair<float, int>& p2)
+inline bool less_pred(const std::pair<float, int>& p1, const std::pair<float, int>& p2)
{
return p1.first < p2.first;
}
-// does a fast check if a chessboard is in the input image. This is a workaround to
+// does a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
-// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
+// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
int cvCheckChessboard(IplImage* src, CvSize size)
{
if(src->nChannels > 1)
{
- cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only",
+ cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only",
__FILE__, __LINE__);
}
-
+
if(src->depth != 8)
{
- cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only",
+ cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only",
__FILE__, __LINE__);
}
-
+
const int erosion_count = 1;
const float black_level = 20.f;
const float white_level = 130.f;
const float black_white_gap = 70.f;
-
+
#if defined(DEBUG_WINDOWS)
cvNamedWindow("1", 1);
cvShowImage("1", src);
#endif //DEBUG_WINDOWS
CvMemStorage* storage = cvCreateMemStorage();
-
+
IplImage* white = cvCloneImage(src);
IplImage* black = cvCloneImage(src);
-
+
cvErode(white, white, NULL, erosion_count);
cvDilate(black, black, NULL, erosion_count);
IplImage* thresh = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
-
+
int result = 0;
for(float thresh_level = black_level; thresh_level < white_level && !result; thresh_level += 20.0f)
{
cvThreshold(white, thresh, thresh_level + black_white_gap, 255, CV_THRESH_BINARY);
-
+
#if defined(DEBUG_WINDOWS)
cvShowImage("1", thresh);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
-
+
CvSeq* first = 0;
std::vector<std::pair<float, int> > quads;
- cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
+ cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
icvGetQuadrangleHypotheses(first, quads, 1);
-
+
cvThreshold(black, thresh, thresh_level, 255, CV_THRESH_BINARY_INV);
-
+
#if defined(DEBUG_WINDOWS)
cvShowImage("1", thresh);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
-
+
cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
icvGetQuadrangleHypotheses(first, quads, 0);
-
+
const size_t min_quads_count = size.width*size.height/2;
std::sort(quads.begin(), quads.end(), less_pred);
-
+
// now check if there are many hypotheses with similar sizes
// do this by floodfill-style algorithm
const float size_rel_dev = 0.4f;
-
+
for(size_t i = 0; i < quads.size(); i++)
{
size_t j = i + 1;
break;
}
}
-
+
if(j + 1 > min_quads_count + i)
{
// check the number of black and white squares
}
}
}
-
-
+
+
cvReleaseImage(&thresh);
cvReleaseImage(&white);
cvReleaseImage(&black);
cvReleaseMemStorage(&storage);
-
+
return result;
}
}
#endif
-void CirclesGridClusterFinder::hierarchicalClustering(const vector<Point2f> points, const Size &patternSize, vector<Point2f> &patternPoints)
+void CirclesGridClusterFinder::hierarchicalClustering(const vector<Point2f> points, const Size &patternSz, vector<Point2f> &patternPoints)
{
#ifdef HAVE_TEGRA_OPTIMIZATION
- if(tegra::hierarchicalClustering(points, patternSize, patternPoints))
+ if(tegra::hierarchicalClustering(points, patternSz, patternPoints))
return;
#endif
- int i, j, n = (int)points.size();
- size_t pn = static_cast<size_t>(patternSize.area());
+ int j, n = (int)points.size();
+ size_t pn = static_cast<size_t>(patternSz.area());
patternPoints.clear();
if (pn >= points.size())
Mat dists(n, n, CV_32FC1, Scalar(0));
Mat distsMask(dists.size(), CV_8UC1, Scalar(0));
- for(i = 0; i < n; i++)
+ for(int i = 0; i < n; i++)
{
for(j = i+1; j < n; j++)
{
}
//the largest cluster can have more than pn points -- we need to filter out such situations
- if(clusters[patternClusterIdx].size() != static_cast<size_t>(patternSize.area()))
+ if(clusters[patternClusterIdx].size() != static_cast<size_t>(patternSz.area()))
{
return;
}
{
for (Vertices::const_iterator it3 = vertices.begin(); it3 != vertices.end(); it3++)
{
- int i1 = (int)it1->first, i2 = (int)it2->first, i3 = (int)it3->first;
+ int i1 = (int)it1->first, i2 = (int)it2->first, i3 = (int)it3->first;
int val1 = distanceMatrix.at<int> (i2, i3);
int val2;
if (distanceMatrix.at<int> (i2, i1) == infinity ||
- distanceMatrix.at<int> (i1, i3) == infinity)
+ distanceMatrix.at<int> (i1, i3) == infinity)
val2 = val1;
else
{
}
}
-void computeShortestPath(Mat &predecessorMatrix, size_t v1, size_t v2, vector<size_t> &path)
+static void computeShortestPath(Mat &predecessorMatrix, size_t v1, size_t v2, vector<size_t> &path)
{
if (predecessorMatrix.at<int> ((int)v1, (int)v2) < 0)
{
}
}
-bool areIndicesCorrect(Point pos, vector<vector<size_t> > *points)
+static bool areIndicesCorrect(Point pos, vector<vector<size_t> > *points)
{
if (pos.y < 0 || pos.x < 0)
return false;
if (cameraMatrix.depth() == CV_32F)\r
init_camera_parameters<float>(cameraMatrix);\r
else\r
- init_camera_parameters<double>(cameraMatrix);\r
+ init_camera_parameters<double>(cameraMatrix);\r
\r
number_of_correspondences = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));\r
\r
pws.resize(3 * number_of_correspondences);\r
- us.resize(2 * number_of_correspondences); \r
- \r
+ us.resize(2 * number_of_correspondences);\r
+\r
if (opoints.depth() == ipoints.depth())\r
{\r
- if (opoints.depth() == CV_32F)\r
- init_points<cv::Point3f,cv::Point2f>(opoints, ipoints);\r
- else\r
- init_points<cv::Point3d,cv::Point2d>(opoints, ipoints);\r
+ if (opoints.depth() == CV_32F)\r
+ init_points<cv::Point3f,cv::Point2f>(opoints, ipoints);\r
+ else\r
+ init_points<cv::Point3d,cv::Point2d>(opoints, ipoints);\r
}\r
else if (opoints.depth() == CV_32F)\r
- init_points<cv::Point3f,cv::Point2d>(opoints, ipoints);\r
+ init_points<cv::Point3f,cv::Point2d>(opoints, ipoints);\r
else\r
- init_points<cv::Point3d,cv::Point2f>(opoints, ipoints);\r
+ init_points<cv::Point3d,cv::Point2f>(opoints, ipoints);\r
\r
- alphas.resize(4 * number_of_correspondences); \r
+ alphas.resize(4 * number_of_correspondences);\r
pcs.resize(3 * number_of_correspondences);\r
\r
max_nr = 0;\r
\r
for(int j = 0; j < 3; j++)\r
a[1 + j] =\r
- ci[3 * j ] * (pi[0] - cws[0][0]) +\r
- ci[3 * j + 1] * (pi[1] - cws[0][1]) +\r
- ci[3 * j + 2] * (pi[2] - cws[0][2]);\r
+ ci[3 * j ] * (pi[0] - cws[0][0]) +\r
+ ci[3 * j + 1] * (pi[1] - cws[0][1]) +\r
+ ci[3 * j + 2] * (pi[2] - cws[0][2]);\r
a[0] = 1.0f - a[1] - a[2] - a[3];\r
}\r
}\r
\r
void epnp::fill_M(CvMat * M,\r
- const int row, const double * as, const double u, const double v)\r
+ const int row, const double * as, const double u, const double v)\r
{\r
double * M1 = M->data.db + row * 12;\r
double * M2 = M1 + 12;\r
const double * v = ut + 12 * (11 - i);\r
for(int j = 0; j < 4; j++)\r
for(int k = 0; k < 3; k++)\r
- ccs[j][k] += betas[i] * v[3 * j + k];\r
+ ccs[j][k] += betas[i] * v[3 * j + k];\r
}\r
}\r
\r
}\r
\r
void epnp::copy_R_and_t(const double R_src[3][3], const double t_src[3],\r
- double R_dst[3][3], double t_dst[3])\r
+ double R_dst[3][3], double t_dst[3])\r
{\r
for(int i = 0; i < 3; i++) {\r
for(int j = 0; j < 3; j++)\r
if (pcs[2] < 0.0) {\r
for(int i = 0; i < 4; i++)\r
for(int j = 0; j < 3; j++)\r
- ccs[i][j] = -ccs[i][j];\r
+ ccs[i][j] = -ccs[i][j];\r
\r
for(int i = 0; i < number_of_correspondences; i++) {\r
pcs[3 * i ] = -pcs[3 * i];\r
}\r
\r
double epnp::compute_R_and_t(const double * ut, const double * betas,\r
- double R[3][3], double t[3])\r
+ double R[3][3], double t[3])\r
{\r
compute_ccs(betas, ut);\r
compute_pcs();\r
}\r
\r
return sum2 / number_of_correspondences;\r
-} \r
+}\r
\r
// betas10 = [B11 B12 B22 B13 B23 B33 B14 B24 B34 B44]\r
// betas_approx_1 = [B11 B12 B13 B14]\r
\r
void epnp::find_betas_approx_1(const CvMat * L_6x10, const CvMat * Rho,\r
- double * betas)\r
+ double * betas)\r
{\r
double l_6x4[6 * 4], b4[4];\r
CvMat L_6x4 = cvMat(6, 4, CV_64F, l_6x4);\r
// betas_approx_2 = [B11 B12 B22 ]\r
\r
void epnp::find_betas_approx_2(const CvMat * L_6x10, const CvMat * Rho,\r
- double * betas)\r
+ double * betas)\r
{\r
double l_6x3[6 * 3], b3[3];\r
CvMat L_6x3 = cvMat(6, 3, CV_64F, l_6x3);\r
// betas_approx_3 = [B11 B12 B22 B13 B23 ]\r
\r
void epnp::find_betas_approx_3(const CvMat * L_6x10, const CvMat * Rho,\r
- double * betas)\r
+ double * betas)\r
{\r
double l_6x5[6 * 5], b5[5];\r
CvMat L_6x5 = cvMat(6, 5, CV_64F, l_6x5);\r
\r
b++;\r
if (b > 3) {\r
- a++;\r
- b = a + 1;\r
+ a++;\r
+ b = a + 1;\r
}\r
}\r
}\r
}\r
\r
void epnp::compute_A_and_b_gauss_newton(const double * l_6x10, const double * rho,\r
- const double betas[4], CvMat * A, CvMat * b)\r
+ const double betas[4], CvMat * A, CvMat * b)\r
{\r
for(int i = 0; i < 6; i++) {\r
const double * rowL = l_6x10 + i * 10;\r
rowA[3] = rowL[6] * betas[0] + rowL[7] * betas[1] + rowL[8] * betas[2] + 2 * rowL[9] * betas[3];\r
\r
cvmSet(b, i, 0, rho[i] -\r
- (\r
- rowL[0] * betas[0] * betas[0] +\r
- rowL[1] * betas[0] * betas[1] +\r
- rowL[2] * betas[1] * betas[1] +\r
- rowL[3] * betas[0] * betas[2] +\r
- rowL[4] * betas[1] * betas[2] +\r
- rowL[5] * betas[2] * betas[2] +\r
- rowL[6] * betas[0] * betas[3] +\r
- rowL[7] * betas[1] * betas[3] +\r
- rowL[8] * betas[2] * betas[3] +\r
- rowL[9] * betas[3] * betas[3]\r
- ));\r
+ (\r
+ rowL[0] * betas[0] * betas[0] +\r
+ rowL[1] * betas[0] * betas[1] +\r
+ rowL[2] * betas[1] * betas[1] +\r
+ rowL[3] * betas[0] * betas[2] +\r
+ rowL[4] * betas[1] * betas[2] +\r
+ rowL[5] * betas[2] * betas[2] +\r
+ rowL[6] * betas[0] * betas[3] +\r
+ rowL[7] * betas[1] * betas[3] +\r
+ rowL[8] * betas[2] * betas[3] +\r
+ rowL[9] * betas[3] * betas[3]\r
+ ));\r
}\r
}\r
\r
-void epnp::gauss_newton(const CvMat * L_6x10, const CvMat * Rho,\r
- double betas[4])\r
+void epnp::gauss_newton(const CvMat * L_6x10, const CvMat * Rho, double betas[4])\r
{\r
const int iterations_number = 5;\r
\r
CvMat B = cvMat(6, 1, CV_64F, b);\r
CvMat X = cvMat(4, 1, CV_64F, x);\r
\r
- for(int k = 0; k < iterations_number; k++) {\r
+ for(int k = 0; k < iterations_number; k++)\r
+ {\r
compute_A_and_b_gauss_newton(L_6x10->data.db, Rho->data.db,\r
- betas, &A, &B);\r
+ betas, &A, &B);\r
qr_solve(&A, &B, &X);\r
for(int i = 0; i < 4; i++)\r
- betas[i] += x[i];\r
+ betas[i] += x[i];\r
}\r
}\r
\r
const int nr = A->rows;\r
const int nc = A->cols;\r
\r
- if (max_nr != 0 && max_nr < nr) {\r
+ if (max_nr != 0 && max_nr < nr)\r
+ {\r
delete [] A1;\r
delete [] A2;\r
}\r
- if (max_nr < nr) {\r
+ if (max_nr < nr)\r
+ {\r
max_nr = nr;\r
A1 = new double[nr];\r
A2 = new double[nr];\r
}\r
\r
double * pA = A->data.db, * ppAkk = pA;\r
- for(int k = 0; k < nc; k++) {\r
- double * ppAik = ppAkk, eta = fabs(*ppAik);\r
- for(int i = k + 1; i < nr; i++) {\r
- double elt = fabs(*ppAik);\r
+ for(int k = 0; k < nc; k++)\r
+ {\r
+ double * ppAik1 = ppAkk, eta = fabs(*ppAik1);\r
+ for(int i = k + 1; i < nr; i++)\r
+ {\r
+ double elt = fabs(*ppAik1);\r
if (eta < elt) eta = elt;\r
- ppAik += nc;\r
+ ppAik1 += nc;\r
}\r
- if (eta == 0) {\r
+ if (eta == 0)\r
+ {\r
A1[k] = A2[k] = 0.0;\r
//cerr << "God damnit, A is singular, this shouldn't happen." << endl;\r
return;\r
- } else {\r
- double * ppAik = ppAkk, sum = 0.0, inv_eta = 1. / eta;\r
- for(int i = k; i < nr; i++) {\r
- *ppAik *= inv_eta;\r
- sum += *ppAik * *ppAik;\r
- ppAik += nc;\r
+ }\r
+ else\r
+ {\r
+ double * ppAik2 = ppAkk, sum2 = 0.0, inv_eta = 1. / eta;\r
+ for(int i = k; i < nr; i++)\r
+ {\r
+ *ppAik2 *= inv_eta;\r
+ sum2 += *ppAik2 * *ppAik2;\r
+ ppAik2 += nc;\r
}\r
- double sigma = sqrt(sum);\r
+ double sigma = sqrt(sum2);\r
if (*ppAkk < 0)\r
- sigma = -sigma;\r
+ sigma = -sigma;\r
*ppAkk += sigma;\r
A1[k] = sigma * *ppAkk;\r
A2[k] = -eta * sigma;\r
- for(int j = k + 1; j < nc; j++) {\r
- double * ppAik = ppAkk, sum = 0;\r
- for(int i = k; i < nr; i++) {\r
- sum += *ppAik * ppAik[j - k];\r
- ppAik += nc;\r
- }\r
- double tau = sum / A1[k];\r
- ppAik = ppAkk;\r
- for(int i = k; i < nr; i++) {\r
- ppAik[j - k] -= tau * *ppAik;\r
- ppAik += nc;\r
- }\r
+ for(int j = k + 1; j < nc; j++)\r
+ {\r
+ double * ppAik = ppAkk, sum = 0;\r
+ for(int i = k; i < nr; i++)\r
+ {\r
+ sum += *ppAik * ppAik[j - k];\r
+ ppAik += nc;\r
+ }\r
+ double tau = sum / A1[k];\r
+ ppAik = ppAkk;\r
+ for(int i = k; i < nr; i++)\r
+ {\r
+ ppAik[j - k] -= tau * *ppAik;\r
+ ppAik += nc;\r
+ }\r
}\r
}\r
ppAkk += nc + 1;\r
\r
// b <- Qt b\r
double * ppAjj = pA, * pb = b->data.db;\r
- for(int j = 0; j < nc; j++) {\r
+ for(int j = 0; j < nc; j++)\r
+ {\r
double * ppAij = ppAjj, tau = 0;\r
- for(int i = j; i < nr; i++) {\r
+ for(int i = j; i < nr; i++)\r
+ {\r
tau += *ppAij * pb[i];\r
ppAij += nc;\r
}\r
tau /= A1[j];\r
ppAij = ppAjj;\r
- for(int i = j; i < nr; i++) {\r
+ for(int i = j; i < nr; i++)\r
+ {\r
pb[i] -= tau * *ppAij;\r
ppAij += nc;\r
}\r
// X = R-1 b\r
double * pX = X->data.db;\r
pX[nc - 1] = pb[nc - 1] / A2[nc - 1];\r
- for(int i = nc - 2; i >= 0; i--) {\r
+ for(int i = nc - 2; i >= 0; i--)\r
+ {\r
double * ppAij = pA + i * nc + (i + 1), sum = 0;\r
\r
- for(int j = i + 1; j < nc; j++) {\r
+ for(int j = i + 1; j < nc; j++)\r
+ {\r
sum += *ppAij * pX[j];\r
ppAij++;\r
}\r
void p3p::init_inverse_parameters()
{
- inv_fx = 1. / fx;
- inv_fy = 1. / fy;
- cx_fx = cx / fx;
- cy_fy = cy / fy;
+ inv_fx = 1. / fx;
+ inv_fy = 1. / fy;
+ cx_fx = cx / fx;
+ cy_fy = cy / fy;
}
p3p::p3p(cv::Mat cameraMatrix)
{
- if (cameraMatrix.depth() == CV_32F)\r
- init_camera_parameters<float>(cameraMatrix);\r
- else\r
- init_camera_parameters<double>(cameraMatrix);
- init_inverse_parameters();
+ if (cameraMatrix.depth() == CV_32F)
+ init_camera_parameters<float>(cameraMatrix);
+ else
+ init_camera_parameters<double>(cameraMatrix);
+ init_inverse_parameters();
}
p3p::p3p(double _fx, double _fy, double _cx, double _cy)
{
- fx = _fx;
- fy = _fy;
- cx = _cx;
- cy = _cy;
- init_inverse_parameters();
+ fx = _fx;
+ fy = _fy;
+ cx = _cx;
+ cy = _cy;
+ init_inverse_parameters();
}
bool p3p::solve(cv::Mat& R, cv::Mat& tvec, const cv::Mat& opoints, const cv::Mat& ipoints)
{
- double rotation_matrix[3][3], translation[3];
- std::vector<double> points;
- if (opoints.depth() == ipoints.depth())\r
- {\r
- if (opoints.depth() == CV_32F)\r
- extract_points<cv::Point3f,cv::Point2f>(opoints, ipoints, points);\r
- else\r
- extract_points<cv::Point3d,cv::Point2d>(opoints, ipoints, points);\r
- }\r
- else if (opoints.depth() == CV_32F)\r
- extract_points<cv::Point3f,cv::Point2d>(opoints, ipoints, points);\r
- else\r
- extract_points<cv::Point3d,cv::Point2f>(opoints, ipoints, points);
-
- bool result = solve(rotation_matrix, translation, points[0], points[1], points[2], points[3], points[4], points[5],
- points[6], points[7], points[8], points[9], points[10], points[11], points[12], points[13], points[14],
- points[15], points[16], points[17], points[18], points[19]);
- cv::Mat(3, 1, CV_64F, translation).copyTo(tvec);\r
+ double rotation_matrix[3][3], translation[3];
+ std::vector<double> points;
+ if (opoints.depth() == ipoints.depth())
+ {
+ if (opoints.depth() == CV_32F)
+ extract_points<cv::Point3f,cv::Point2f>(opoints, ipoints, points);
+ else
+ extract_points<cv::Point3d,cv::Point2d>(opoints, ipoints, points);
+ }
+ else if (opoints.depth() == CV_32F)
+ extract_points<cv::Point3f,cv::Point2d>(opoints, ipoints, points);
+ else
+ extract_points<cv::Point3d,cv::Point2f>(opoints, ipoints, points);
+
+ bool result = solve(rotation_matrix, translation, points[0], points[1], points[2], points[3], points[4], points[5],
+ points[6], points[7], points[8], points[9], points[10], points[11], points[12], points[13], points[14],
+ points[15], points[16], points[17], points[18], points[19]);
+ cv::Mat(3, 1, CV_64F, translation).copyTo(tvec);
cv::Mat(3, 3, CV_64F, rotation_matrix).copyTo(R);
- return result;
+ return result;
}
bool p3p::solve(double R[3][3], double t[3],
- double mu0, double mv0, double X0, double Y0, double Z0,
- double mu1, double mv1, double X1, double Y1, double Z1,
- double mu2, double mv2, double X2, double Y2, double Z2,
- double mu3, double mv3, double X3, double Y3, double Z3)
+ double mu0, double mv0, double X0, double Y0, double Z0,
+ double mu1, double mv1, double X1, double Y1, double Z1,
+ double mu2, double mv2, double X2, double Y2, double Z2,
+ double mu3, double mv3, double X3, double Y3, double Z3)
{
- double Rs[4][3][3], ts[4][3];
-
- int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2);
-
- if (n == 0)
- return false;
-
- int ns = 0;
- double min_reproj = 0;
- for(int i = 0; i < n; i++) {
- double X3p = Rs[i][0][0] * X3 + Rs[i][0][1] * Y3 + Rs[i][0][2] * Z3 + ts[i][0];
- double Y3p = Rs[i][1][0] * X3 + Rs[i][1][1] * Y3 + Rs[i][1][2] * Z3 + ts[i][1];
- double Z3p = Rs[i][2][0] * X3 + Rs[i][2][1] * Y3 + Rs[i][2][2] * Z3 + ts[i][2];
- double mu3p = cx + fx * X3p / Z3p;
- double mv3p = cy + fy * Y3p / Z3p;
- double reproj = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
- if (i == 0 || min_reproj > reproj) {
- ns = i;
- min_reproj = reproj;
- }
- }
-
- for(int i = 0; i < 3; i++) {
- for(int j = 0; j < 3; j++)
- R[i][j] = Rs[ns][i][j];
- t[i] = ts[ns][i];
- }
-
- return true;
+ double Rs[4][3][3], ts[4][3];
+
+ int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2);
+
+ if (n == 0)
+ return false;
+
+ int ns = 0;
+ double min_reproj = 0;
+ for(int i = 0; i < n; i++) {
+ double X3p = Rs[i][0][0] * X3 + Rs[i][0][1] * Y3 + Rs[i][0][2] * Z3 + ts[i][0];
+ double Y3p = Rs[i][1][0] * X3 + Rs[i][1][1] * Y3 + Rs[i][1][2] * Z3 + ts[i][1];
+ double Z3p = Rs[i][2][0] * X3 + Rs[i][2][1] * Y3 + Rs[i][2][2] * Z3 + ts[i][2];
+ double mu3p = cx + fx * X3p / Z3p;
+ double mv3p = cy + fy * Y3p / Z3p;
+ double reproj = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
+ if (i == 0 || min_reproj > reproj) {
+ ns = i;
+ min_reproj = reproj;
+ }
+ }
+
+ for(int i = 0; i < 3; i++) {
+ for(int j = 0; j < 3; j++)
+ R[i][j] = Rs[ns][i][j];
+ t[i] = ts[ns][i];
+ }
+
+ return true;
}
int p3p::solve(double R[4][3][3], double t[4][3],
- double mu0, double mv0, double X0, double Y0, double Z0,
- double mu1, double mv1, double X1, double Y1, double Z1,
- double mu2, double mv2, double X2, double Y2, double Z2)
+ double mu0, double mv0, double X0, double Y0, double Z0,
+ double mu1, double mv1, double X1, double Y1, double Z1,
+ double mu2, double mv2, double X2, double Y2, double Z2)
{
- double mk0, mk1, mk2;
- double norm;
+ double mk0, mk1, mk2;
+ double norm;
- mu0 = inv_fx * mu0 - cx_fx;
- mv0 = inv_fy * mv0 - cy_fy;
- norm = sqrt(mu0 * mu0 + mv0 * mv0 + 1);
- mk0 = 1. / norm; mu0 *= mk0; mv0 *= mk0;
+ mu0 = inv_fx * mu0 - cx_fx;
+ mv0 = inv_fy * mv0 - cy_fy;
+ norm = sqrt(mu0 * mu0 + mv0 * mv0 + 1);
+ mk0 = 1. / norm; mu0 *= mk0; mv0 *= mk0;
- mu1 = inv_fx * mu1 - cx_fx;
- mv1 = inv_fy * mv1 - cy_fy;
- norm = sqrt(mu1 * mu1 + mv1 * mv1 + 1);
- mk1 = 1. / norm; mu1 *= mk1; mv1 *= mk1;
+ mu1 = inv_fx * mu1 - cx_fx;
+ mv1 = inv_fy * mv1 - cy_fy;
+ norm = sqrt(mu1 * mu1 + mv1 * mv1 + 1);
+ mk1 = 1. / norm; mu1 *= mk1; mv1 *= mk1;
- mu2 = inv_fx * mu2 - cx_fx;
- mv2 = inv_fy * mv2 - cy_fy;
- norm = sqrt(mu2 * mu2 + mv2 * mv2 + 1);
- mk2 = 1. / norm; mu2 *= mk2; mv2 *= mk2;
+ mu2 = inv_fx * mu2 - cx_fx;
+ mv2 = inv_fy * mv2 - cy_fy;
+ norm = sqrt(mu2 * mu2 + mv2 * mv2 + 1);
+ mk2 = 1. / norm; mu2 *= mk2; mv2 *= mk2;
- double distances[3];
- distances[0] = sqrt( (X1 - X2) * (X1 - X2) + (Y1 - Y2) * (Y1 - Y2) + (Z1 - Z2) * (Z1 - Z2) );
- distances[1] = sqrt( (X0 - X2) * (X0 - X2) + (Y0 - Y2) * (Y0 - Y2) + (Z0 - Z2) * (Z0 - Z2) );
- distances[2] = sqrt( (X0 - X1) * (X0 - X1) + (Y0 - Y1) * (Y0 - Y1) + (Z0 - Z1) * (Z0 - Z1) );
+ double distances[3];
+ distances[0] = sqrt( (X1 - X2) * (X1 - X2) + (Y1 - Y2) * (Y1 - Y2) + (Z1 - Z2) * (Z1 - Z2) );
+ distances[1] = sqrt( (X0 - X2) * (X0 - X2) + (Y0 - Y2) * (Y0 - Y2) + (Z0 - Z2) * (Z0 - Z2) );
+ distances[2] = sqrt( (X0 - X1) * (X0 - X1) + (Y0 - Y1) * (Y0 - Y1) + (Z0 - Z1) * (Z0 - Z1) );
- // Calculate angles
- double cosines[3];
- cosines[0] = mu1 * mu2 + mv1 * mv2 + mk1 * mk2;
- cosines[1] = mu0 * mu2 + mv0 * mv2 + mk0 * mk2;
- cosines[2] = mu0 * mu1 + mv0 * mv1 + mk0 * mk1;
+ // Calculate angles
+ double cosines[3];
+ cosines[0] = mu1 * mu2 + mv1 * mv2 + mk1 * mk2;
+ cosines[1] = mu0 * mu2 + mv0 * mv2 + mk0 * mk2;
+ cosines[2] = mu0 * mu1 + mv0 * mv1 + mk0 * mk1;
- double lengths[4][3];
- int n = solve_for_lengths(lengths, distances, cosines);
+ double lengths[4][3];
+ int n = solve_for_lengths(lengths, distances, cosines);
- int nb_solutions = 0;
- for(int i = 0; i < n; i++) {
- double M_orig[3][3];
+ int nb_solutions = 0;
+ for(int i = 0; i < n; i++) {
+ double M_orig[3][3];
- M_orig[0][0] = lengths[i][0] * mu0;
- M_orig[0][1] = lengths[i][0] * mv0;
- M_orig[0][2] = lengths[i][0] * mk0;
+ M_orig[0][0] = lengths[i][0] * mu0;
+ M_orig[0][1] = lengths[i][0] * mv0;
+ M_orig[0][2] = lengths[i][0] * mk0;
- M_orig[1][0] = lengths[i][1] * mu1;
- M_orig[1][1] = lengths[i][1] * mv1;
- M_orig[1][2] = lengths[i][1] * mk1;
+ M_orig[1][0] = lengths[i][1] * mu1;
+ M_orig[1][1] = lengths[i][1] * mv1;
+ M_orig[1][2] = lengths[i][1] * mk1;
- M_orig[2][0] = lengths[i][2] * mu2;
- M_orig[2][1] = lengths[i][2] * mv2;
- M_orig[2][2] = lengths[i][2] * mk2;
+ M_orig[2][0] = lengths[i][2] * mu2;
+ M_orig[2][1] = lengths[i][2] * mv2;
+ M_orig[2][2] = lengths[i][2] * mk2;
- if (!align(M_orig, X0, Y0, Z0, X1, Y1, Z1, X2, Y2, Z2, R[nb_solutions], t[nb_solutions]))
- continue;
+ if (!align(M_orig, X0, Y0, Z0, X1, Y1, Z1, X2, Y2, Z2, R[nb_solutions], t[nb_solutions]))
+ continue;
- nb_solutions++;
- }
+ nb_solutions++;
+ }
- return nb_solutions;
+ return nb_solutions;
}
/// Given 3D distances between three points and cosines of 3 angles at the apex, calculates
int p3p::solve_for_lengths(double lengths[4][3], double distances[3], double cosines[3])
{
- double p = cosines[0] * 2;
- double q = cosines[1] * 2;
- double r = cosines[2] * 2;
+ double p = cosines[0] * 2;
+ double q = cosines[1] * 2;
+ double r = cosines[2] * 2;
- double inv_d22 = 1. / (distances[2] * distances[2]);
- double a = inv_d22 * (distances[0] * distances[0]);
- double b = inv_d22 * (distances[1] * distances[1]);
+ double inv_d22 = 1. / (distances[2] * distances[2]);
+ double a = inv_d22 * (distances[0] * distances[0]);
+ double b = inv_d22 * (distances[1] * distances[1]);
- double a2 = a * a, b2 = b * b, p2 = p * p, q2 = q * q, r2 = r * r;
- double pr = p * r, pqr = q * pr;
+ double a2 = a * a, b2 = b * b, p2 = p * p, q2 = q * q, r2 = r * r;
+ double pr = p * r, pqr = q * pr;
- // Check reality condition (the four points should not be coplanar)
- if (p2 + q2 + r2 - pqr - 1 == 0)
- return 0;
+ // Check reality condition (the four points should not be coplanar)
+ if (p2 + q2 + r2 - pqr - 1 == 0)
+ return 0;
- double ab = a * b, a_2 = 2*a;
+ double ab = a * b, a_2 = 2*a;
- double A = -2 * b + b2 + a2 + 1 + ab*(2 - r2) - a_2;
+ double A = -2 * b + b2 + a2 + 1 + ab*(2 - r2) - a_2;
- // Check reality condition
- if (A == 0) return 0;
+ // Check reality condition
+ if (A == 0) return 0;
- double a_4 = 4*a;
+ double a_4 = 4*a;
- double B = q*(-2*(ab + a2 + 1 - b) + r2*ab + a_4) + pr*(b - b2 + ab);
- double C = q2 + b2*(r2 + p2 - 2) - b*(p2 + pqr) - ab*(r2 + pqr) + (a2 - a_2)*(2 + q2) + 2;
- double D = pr*(ab-b2+b) + q*((p2-2)*b + 2 * (ab - a2) + a_4 - 2);
- double E = 1 + 2*(b - a - ab) + b2 - b*p2 + a2;
+ double B = q*(-2*(ab + a2 + 1 - b) + r2*ab + a_4) + pr*(b - b2 + ab);
+ double C = q2 + b2*(r2 + p2 - 2) - b*(p2 + pqr) - ab*(r2 + pqr) + (a2 - a_2)*(2 + q2) + 2;
+ double D = pr*(ab-b2+b) + q*((p2-2)*b + 2 * (ab - a2) + a_4 - 2);
+ double E = 1 + 2*(b - a - ab) + b2 - b*p2 + a2;
- double temp = (p2*(a-1+b) + r2*(a-1-b) + pqr - a*pqr);
- double b0 = b * temp * temp;
- // Check reality condition
- if (b0 == 0)
- return 0;
+ double temp = (p2*(a-1+b) + r2*(a-1-b) + pqr - a*pqr);
+ double b0 = b * temp * temp;
+ // Check reality condition
+ if (b0 == 0)
+ return 0;
- double real_roots[4];
- int n = solve_deg4(A, B, C, D, E, real_roots[0], real_roots[1], real_roots[2], real_roots[3]);
+ double real_roots[4];
+ int n = solve_deg4(A, B, C, D, E, real_roots[0], real_roots[1], real_roots[2], real_roots[3]);
- if (n == 0)
- return 0;
+ if (n == 0)
+ return 0;
- int nb_solutions = 0;
- double r3 = r2*r, pr2 = p*r2, r3q = r3 * q;
- double inv_b0 = 1. / b0;
+ int nb_solutions = 0;
+ double r3 = r2*r, pr2 = p*r2, r3q = r3 * q;
+ double inv_b0 = 1. / b0;
- // For each solution of x
- for(int i = 0; i < n; i++) {
- double x = real_roots[i];
+ // For each solution of x
+ for(int i = 0; i < n; i++) {
+ double x = real_roots[i];
- // Check reality condition
- if (x <= 0)
- continue;
+ // Check reality condition
+ if (x <= 0)
+ continue;
- double x2 = x*x;
+ double x2 = x*x;
- double b1 =
- ((1-a-b)*x2 + (q*a-q)*x + 1 - a + b) *
- (((r3*(a2 + ab*(2 - r2) - a_2 + b2 - 2*b + 1)) * x +
+ double b1 =
+ ((1-a-b)*x2 + (q*a-q)*x + 1 - a + b) *
+ (((r3*(a2 + ab*(2 - r2) - a_2 + b2 - 2*b + 1)) * x +
- (r3q*(2*(b-a2) + a_4 + ab*(r2 - 2) - 2) + pr2*(1 + a2 + 2*(ab-a-b) + r2*(b - b2) + b2))) * x2 +
+ (r3q*(2*(b-a2) + a_4 + ab*(r2 - 2) - 2) + pr2*(1 + a2 + 2*(ab-a-b) + r2*(b - b2) + b2))) * x2 +
- (r3*(q2*(1-2*a+a2) + r2*(b2-ab) - a_4 + 2*(a2 - b2) + 2) + r*p2*(b2 + 2*(ab - b - a) + 1 + a2) + pr2*q*(a_4 + 2*(b - ab - a2) - 2 - r2*b)) * x +
+ (r3*(q2*(1-2*a+a2) + r2*(b2-ab) - a_4 + 2*(a2 - b2) + 2) + r*p2*(b2 + 2*(ab - b - a) + 1 + a2) + pr2*q*(a_4 + 2*(b - ab - a2) - 2 - r2*b)) * x +
- 2*r3q*(a_2 - b - a2 + ab - 1) + pr2*(q2 - a_4 + 2*(a2 - b2) + r2*b + q2*(a2 - a_2) + 2) +
- p2*(p*(2*(ab - a - b) + a2 + b2 + 1) + 2*q*r*(b + a_2 - a2 - ab - 1)));
+ 2*r3q*(a_2 - b - a2 + ab - 1) + pr2*(q2 - a_4 + 2*(a2 - b2) + r2*b + q2*(a2 - a_2) + 2) +
+ p2*(p*(2*(ab - a - b) + a2 + b2 + 1) + 2*q*r*(b + a_2 - a2 - ab - 1)));
- // Check reality condition
- if (b1 <= 0)
- continue;
+ // Check reality condition
+ if (b1 <= 0)
+ continue;
- double y = inv_b0 * b1;
- double v = x2 + y*y - x*y*r;
+ double y = inv_b0 * b1;
+ double v = x2 + y*y - x*y*r;
- if (v <= 0)
- continue;
+ if (v <= 0)
+ continue;
- double Z = distances[2] / sqrt(v);
- double X = x * Z;
- double Y = y * Z;
+ double Z = distances[2] / sqrt(v);
+ double X = x * Z;
+ double Y = y * Z;
- lengths[nb_solutions][0] = X;
- lengths[nb_solutions][1] = Y;
- lengths[nb_solutions][2] = Z;
+ lengths[nb_solutions][0] = X;
+ lengths[nb_solutions][1] = Y;
+ lengths[nb_solutions][2] = Z;
- nb_solutions++;
- }
+ nb_solutions++;
+ }
- return nb_solutions;
+ return nb_solutions;
}
bool p3p::align(double M_end[3][3],
- double X0, double Y0, double Z0,
- double X1, double Y1, double Z1,
- double X2, double Y2, double Z2,
- double R[3][3], double T[3])
+ double X0, double Y0, double Z0,
+ double X1, double Y1, double Z1,
+ double X2, double Y2, double Z2,
+ double R[3][3], double T[3])
{
- // Centroids:
- double C_start[3], C_end[3];
- for(int i = 0; i < 3; i++) C_end[i] = (M_end[0][i] + M_end[1][i] + M_end[2][i]) / 3;
- C_start[0] = (X0 + X1 + X2) / 3;
- C_start[1] = (Y0 + Y1 + Y2) / 3;
- C_start[2] = (Z0 + Z1 + Z2) / 3;
-
- // Covariance matrix s:
- double s[3 * 3];
- for(int j = 0; j < 3; j++) {
- s[0 * 3 + j] = (X0 * M_end[0][j] + X1 * M_end[1][j] + X2 * M_end[2][j]) / 3 - C_end[j] * C_start[0];
- s[1 * 3 + j] = (Y0 * M_end[0][j] + Y1 * M_end[1][j] + Y2 * M_end[2][j]) / 3 - C_end[j] * C_start[1];
- s[2 * 3 + j] = (Z0 * M_end[0][j] + Z1 * M_end[1][j] + Z2 * M_end[2][j]) / 3 - C_end[j] * C_start[2];
- }
-
- double Qs[16], evs[4], U[16];
-
- Qs[0 * 4 + 0] = s[0 * 3 + 0] + s[1 * 3 + 1] + s[2 * 3 + 2];
- Qs[1 * 4 + 1] = s[0 * 3 + 0] - s[1 * 3 + 1] - s[2 * 3 + 2];
- Qs[2 * 4 + 2] = s[1 * 3 + 1] - s[2 * 3 + 2] - s[0 * 3 + 0];
- Qs[3 * 4 + 3] = s[2 * 3 + 2] - s[0 * 3 + 0] - s[1 * 3 + 1];
-
- Qs[1 * 4 + 0] = Qs[0 * 4 + 1] = s[1 * 3 + 2] - s[2 * 3 + 1];
- Qs[2 * 4 + 0] = Qs[0 * 4 + 2] = s[2 * 3 + 0] - s[0 * 3 + 2];
- Qs[3 * 4 + 0] = Qs[0 * 4 + 3] = s[0 * 3 + 1] - s[1 * 3 + 0];
- Qs[2 * 4 + 1] = Qs[1 * 4 + 2] = s[1 * 3 + 0] + s[0 * 3 + 1];
- Qs[3 * 4 + 1] = Qs[1 * 4 + 3] = s[2 * 3 + 0] + s[0 * 3 + 2];
- Qs[3 * 4 + 2] = Qs[2 * 4 + 3] = s[2 * 3 + 1] + s[1 * 3 + 2];
-
- jacobi_4x4(Qs, evs, U);
-
- // Looking for the largest eigen value:
- int i_ev = 0;
- double ev_max = evs[i_ev];
- for(int i = 1; i < 4; i++)
- if (evs[i] > ev_max)
- ev_max = evs[i_ev = i];
-
- // Quaternion:
- double q[4];
- for(int i = 0; i < 4; i++)
- q[i] = U[i * 4 + i_ev];
-
- double q02 = q[0] * q[0], q12 = q[1] * q[1], q22 = q[2] * q[2], q32 = q[3] * q[3];
- double q0_1 = q[0] * q[1], q0_2 = q[0] * q[2], q0_3 = q[0] * q[3];
- double q1_2 = q[1] * q[2], q1_3 = q[1] * q[3];
- double q2_3 = q[2] * q[3];
-
- R[0][0] = q02 + q12 - q22 - q32;
- R[0][1] = 2. * (q1_2 - q0_3);
- R[0][2] = 2. * (q1_3 + q0_2);
-
- R[1][0] = 2. * (q1_2 + q0_3);
- R[1][1] = q02 + q22 - q12 - q32;
- R[1][2] = 2. * (q2_3 - q0_1);
-
- R[2][0] = 2. * (q1_3 - q0_2);
- R[2][1] = 2. * (q2_3 + q0_1);
- R[2][2] = q02 + q32 - q12 - q22;
-
- for(int i = 0; i < 3; i++)
- T[i] = C_end[i] - (R[i][0] * C_start[0] + R[i][1] * C_start[1] + R[i][2] * C_start[2]);
-
- return true;
+ // Centroids:
+ double C_start[3], C_end[3];
+ for(int i = 0; i < 3; i++) C_end[i] = (M_end[0][i] + M_end[1][i] + M_end[2][i]) / 3;
+ C_start[0] = (X0 + X1 + X2) / 3;
+ C_start[1] = (Y0 + Y1 + Y2) / 3;
+ C_start[2] = (Z0 + Z1 + Z2) / 3;
+
+ // Covariance matrix s:
+ double s[3 * 3];
+ for(int j = 0; j < 3; j++) {
+ s[0 * 3 + j] = (X0 * M_end[0][j] + X1 * M_end[1][j] + X2 * M_end[2][j]) / 3 - C_end[j] * C_start[0];
+ s[1 * 3 + j] = (Y0 * M_end[0][j] + Y1 * M_end[1][j] + Y2 * M_end[2][j]) / 3 - C_end[j] * C_start[1];
+ s[2 * 3 + j] = (Z0 * M_end[0][j] + Z1 * M_end[1][j] + Z2 * M_end[2][j]) / 3 - C_end[j] * C_start[2];
+ }
+
+ double Qs[16], evs[4], U[16];
+
+ Qs[0 * 4 + 0] = s[0 * 3 + 0] + s[1 * 3 + 1] + s[2 * 3 + 2];
+ Qs[1 * 4 + 1] = s[0 * 3 + 0] - s[1 * 3 + 1] - s[2 * 3 + 2];
+ Qs[2 * 4 + 2] = s[1 * 3 + 1] - s[2 * 3 + 2] - s[0 * 3 + 0];
+ Qs[3 * 4 + 3] = s[2 * 3 + 2] - s[0 * 3 + 0] - s[1 * 3 + 1];
+
+ Qs[1 * 4 + 0] = Qs[0 * 4 + 1] = s[1 * 3 + 2] - s[2 * 3 + 1];
+ Qs[2 * 4 + 0] = Qs[0 * 4 + 2] = s[2 * 3 + 0] - s[0 * 3 + 2];
+ Qs[3 * 4 + 0] = Qs[0 * 4 + 3] = s[0 * 3 + 1] - s[1 * 3 + 0];
+ Qs[2 * 4 + 1] = Qs[1 * 4 + 2] = s[1 * 3 + 0] + s[0 * 3 + 1];
+ Qs[3 * 4 + 1] = Qs[1 * 4 + 3] = s[2 * 3 + 0] + s[0 * 3 + 2];
+ Qs[3 * 4 + 2] = Qs[2 * 4 + 3] = s[2 * 3 + 1] + s[1 * 3 + 2];
+
+ jacobi_4x4(Qs, evs, U);
+
+ // Looking for the largest eigen value:
+ int i_ev = 0;
+ double ev_max = evs[i_ev];
+ for(int i = 1; i < 4; i++)
+ if (evs[i] > ev_max)
+ ev_max = evs[i_ev = i];
+
+ // Quaternion:
+ double q[4];
+ for(int i = 0; i < 4; i++)
+ q[i] = U[i * 4 + i_ev];
+
+ double q02 = q[0] * q[0], q12 = q[1] * q[1], q22 = q[2] * q[2], q32 = q[3] * q[3];
+ double q0_1 = q[0] * q[1], q0_2 = q[0] * q[2], q0_3 = q[0] * q[3];
+ double q1_2 = q[1] * q[2], q1_3 = q[1] * q[3];
+ double q2_3 = q[2] * q[3];
+
+ R[0][0] = q02 + q12 - q22 - q32;
+ R[0][1] = 2. * (q1_2 - q0_3);
+ R[0][2] = 2. * (q1_3 + q0_2);
+
+ R[1][0] = 2. * (q1_2 + q0_3);
+ R[1][1] = q02 + q22 - q12 - q32;
+ R[1][2] = 2. * (q2_3 - q0_1);
+
+ R[2][0] = 2. * (q1_3 - q0_2);
+ R[2][1] = 2. * (q2_3 + q0_1);
+ R[2][2] = q02 + q32 - q12 - q22;
+
+ for(int i = 0; i < 3; i++)
+ T[i] = C_end[i] - (R[i][0] * C_start[0] + R[i][1] * C_start[1] + R[i][2] * C_start[2]);
+
+ return true;
}
bool p3p::jacobi_4x4(double * A, double * D, double * U)
{
- double B[4], Z[4];
- double Id[16] = {1., 0., 0., 0.,
- 0., 1., 0., 0.,
- 0., 0., 1., 0.,
- 0., 0., 0., 1.};
-
- memcpy(U, Id, 16 * sizeof(double));
-
- B[0] = A[0]; B[1] = A[5]; B[2] = A[10]; B[3] = A[15];
- memcpy(D, B, 4 * sizeof(double));
- memset(Z, 0, 4 * sizeof(double));
-
- for(int iter = 0; iter < 50; iter++) {
- double sum = fabs(A[1]) + fabs(A[2]) + fabs(A[3]) + fabs(A[6]) + fabs(A[7]) + fabs(A[11]);
-
- if (sum == 0.0)
- return true;
-
- double tresh = (iter < 3) ? 0.2 * sum / 16. : 0.0;
- for(int i = 0; i < 3; i++) {
- double * pAij = A + 5 * i + 1;
- for(int j = i + 1 ; j < 4; j++) {
- double Aij = *pAij;
- double eps_machine = 100.0 * fabs(Aij);
-
- if ( iter > 3 && fabs(D[i]) + eps_machine == fabs(D[i]) && fabs(D[j]) + eps_machine == fabs(D[j]) )
- *pAij = 0.0;
- else if (fabs(Aij) > tresh) {
- double h = D[j] - D[i], t;
- if (fabs(h) + eps_machine == fabs(h))
- t = Aij / h;
- else {
- double theta = 0.5 * h / Aij;
- t = 1.0 / (fabs(theta) + sqrt(1.0 + theta * theta));
- if (theta < 0.0) t = -t;
- }
-
- h = t * Aij;
- Z[i] -= h;
- Z[j] += h;
- D[i] -= h;
- D[j] += h;
- *pAij = 0.0;
-
- double c = 1.0 / sqrt(1 + t * t);
- double s = t * c;
- double tau = s / (1.0 + c);
- for(int k = 0; k <= i - 1; k++) {
- double g = A[k * 4 + i], h = A[k * 4 + j];
- A[k * 4 + i] = g - s * (h + g * tau);
- A[k * 4 + j] = h + s * (g - h * tau);
- }
- for(int k = i + 1; k <= j - 1; k++) {
- double g = A[i * 4 + k], h = A[k * 4 + j];
- A[i * 4 + k] = g - s * (h + g * tau);
- A[k * 4 + j] = h + s * (g - h * tau);
- }
- for(int k = j + 1; k < 4; k++) {
- double g = A[i * 4 + k], h = A[j * 4 + k];
- A[i * 4 + k] = g - s * (h + g * tau);
- A[j * 4 + k] = h + s * (g - h * tau);
- }
- for(int k = 0; k < 4; k++) {
- double g = U[k * 4 + i], h = U[k * 4 + j];
- U[k * 4 + i] = g - s * (h + g * tau);
- U[k * 4 + j] = h + s * (g - h * tau);
- }
- }
- pAij++;
- }
- }
-
- for(int i = 0; i < 4; i++) B[i] += Z[i];
- memcpy(D, B, 4 * sizeof(double));
- memset(Z, 0, 4 * sizeof(double));
- }
-
- return false;
+ double B[4], Z[4];
+ double Id[16] = {1., 0., 0., 0.,
+ 0., 1., 0., 0.,
+ 0., 0., 1., 0.,
+ 0., 0., 0., 1.};
+
+ memcpy(U, Id, 16 * sizeof(double));
+
+ B[0] = A[0]; B[1] = A[5]; B[2] = A[10]; B[3] = A[15];
+ memcpy(D, B, 4 * sizeof(double));
+ memset(Z, 0, 4 * sizeof(double));
+
+ for(int iter = 0; iter < 50; iter++) {
+ double sum = fabs(A[1]) + fabs(A[2]) + fabs(A[3]) + fabs(A[6]) + fabs(A[7]) + fabs(A[11]);
+
+ if (sum == 0.0)
+ return true;
+
+ double tresh = (iter < 3) ? 0.2 * sum / 16. : 0.0;
+ for(int i = 0; i < 3; i++) {
+ double * pAij = A + 5 * i + 1;
+ for(int j = i + 1 ; j < 4; j++) {
+ double Aij = *pAij;
+ double eps_machine = 100.0 * fabs(Aij);
+
+ if ( iter > 3 && fabs(D[i]) + eps_machine == fabs(D[i]) && fabs(D[j]) + eps_machine == fabs(D[j]) )
+ *pAij = 0.0;
+ else if (fabs(Aij) > tresh) {
+ double hh = D[j] - D[i], t;
+ if (fabs(hh) + eps_machine == fabs(hh))
+ t = Aij / hh;
+ else {
+ double theta = 0.5 * hh / Aij;
+ t = 1.0 / (fabs(theta) + sqrt(1.0 + theta * theta));
+ if (theta < 0.0) t = -t;
+ }
+
+ hh = t * Aij;
+ Z[i] -= hh;
+ Z[j] += hh;
+ D[i] -= hh;
+ D[j] += hh;
+ *pAij = 0.0;
+
+ double c = 1.0 / sqrt(1 + t * t);
+ double s = t * c;
+ double tau = s / (1.0 + c);
+ for(int k = 0; k <= i - 1; k++) {
+ double g = A[k * 4 + i], h = A[k * 4 + j];
+ A[k * 4 + i] = g - s * (h + g * tau);
+ A[k * 4 + j] = h + s * (g - h * tau);
+ }
+ for(int k = i + 1; k <= j - 1; k++) {
+ double g = A[i * 4 + k], h = A[k * 4 + j];
+ A[i * 4 + k] = g - s * (h + g * tau);
+ A[k * 4 + j] = h + s * (g - h * tau);
+ }
+ for(int k = j + 1; k < 4; k++) {
+ double g = A[i * 4 + k], h = A[j * 4 + k];
+ A[i * 4 + k] = g - s * (h + g * tau);
+ A[j * 4 + k] = h + s * (g - h * tau);
+ }
+ for(int k = 0; k < 4; k++) {
+ double g = U[k * 4 + i], h = U[k * 4 + j];
+ U[k * 4 + i] = g - s * (h + g * tau);
+ U[k * 4 + j] = h + s * (g - h * tau);
+ }
+ }
+ pAij++;
+ }
+ }
+
+ for(int i = 0; i < 4; i++) B[i] += Z[i];
+ memcpy(D, B, 4 * sizeof(double));
+ memset(Z, 0, 4 * sizeof(double));
+ }
+
+ return false;
}
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4710 4711 4514 4996 )
-#endif
-
-#ifdef HAVE_CVCONFIG_H
+#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
#undef max
namespace cv {
-
-
-void drawCircles(Mat& img, const vector<Point2f>& corners, const vector<float>& radius)
-{
- for(size_t i = 0; i < corners.size(); i++)
- {
- circle(img, corners[i], cvRound(radius[i]), CV_RGB(255, 0, 0));
- }
-}
-
-int histQuantile(const Mat& hist, float quantile)
-{
- if(hist.dims > 1) return -1; // works for 1D histograms only
-
- float cur_sum = 0;
- float total_sum = (float)sum(hist).val[0];
- float quantile_sum = total_sum*quantile;
- for(int j = 0; j < hist.size[0]; j++)
- {
- cur_sum += (float)hist.at<float>(j);
- if(cur_sum > quantile_sum)
- {
- return j;
- }
- }
-
- return hist.size[0] - 1;
-}
-
-bool is_smaller(const std::pair<int, float>& p1, const std::pair<int, float>& p2)
+
+
+// static void drawCircles(Mat& img, const vector<Point2f>& corners, const vector<float>& radius)
+// {
+// for(size_t i = 0; i < corners.size(); i++)
+// {
+// circle(img, corners[i], cvRound(radius[i]), CV_RGB(255, 0, 0));
+// }
+// }
+
+// static int histQuantile(const Mat& hist, float quantile)
+// {
+// if(hist.dims > 1) return -1; // works for 1D histograms only
+
+// float cur_sum = 0;
+// float total_sum = (float)sum(hist).val[0];
+// float quantile_sum = total_sum*quantile;
+// for(int j = 0; j < hist.size[0]; j++)
+// {
+// cur_sum += (float)hist.at<float>(j);
+// if(cur_sum > quantile_sum)
+// {
+// return j;
+// }
+// }
+
+// return hist.size[0] - 1;
+// }
+
+inline bool is_smaller(const std::pair<int, float>& p1, const std::pair<int, float>& p2)
{
return p1.second < p2.second;
}
-void orderContours(const vector<vector<Point> >& contours, Point2f point, vector<std::pair<int, float> >& order)
+static void orderContours(const vector<vector<Point> >& contours, Point2f point, vector<std::pair<int, float> >& order)
{
order.clear();
size_t i, j, n = contours.size();
}
order.push_back(std::pair<int, float>((int)i, (float)min_dist));
}
-
+
std::sort(order.begin(), order.end(), is_smaller);
}
// fit second order curve to a set of 2D points
-void fitCurve2Order(const vector<Point2f>& /*points*/, vector<float>& /*curve*/)
+inline void fitCurve2Order(const vector<Point2f>& /*points*/, vector<float>& /*curve*/)
{
// TBD
}
-
-void findCurvesCross(const vector<float>& /*curve1*/, const vector<float>& /*curve2*/, Point2f& /*cross_point*/)
+
+inline void findCurvesCross(const vector<float>& /*curve1*/, const vector<float>& /*curve2*/, Point2f& /*cross_point*/)
{
}
-
-void findLinesCrossPoint(Point2f origin1, Point2f dir1, Point2f origin2, Point2f dir2, Point2f& cross_point)
+
+static void findLinesCrossPoint(Point2f origin1, Point2f dir1, Point2f origin2, Point2f dir2, Point2f& cross_point)
{
float det = dir2.x*dir1.y - dir2.y*dir1.x;
Point2f offset = origin2 - origin1;
-
+
float alpha = (dir2.x*offset.y - dir2.y*offset.x)/det;
cross_point = origin1 + dir1*alpha;
}
-
-void findCorner(const vector<Point>& contour, Point2f point, Point2f& corner)
-{
- // find the nearest point
- double min_dist = std::numeric_limits<double>::max();
- int min_idx = -1;
-
- // find corner idx
- for(size_t i = 0; i < contour.size(); i++)
- {
- double dist = norm(Point2f((float)contour[i].x, (float)contour[i].y) - point);
- if(dist < min_dist)
- {
- min_dist = dist;
- min_idx = (int)i;
- }
- }
- assert(min_idx >= 0);
-
- // temporary solution, have to make something more precise
- corner = contour[min_idx];
- return;
-}
-void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
+// static void findCorner(const vector<Point>& contour, Point2f point, Point2f& corner)
+// {
+// // find the nearest point
+// double min_dist = std::numeric_limits<double>::max();
+// int min_idx = -1;
+
+// // find corner idx
+// for(size_t i = 0; i < contour.size(); i++)
+// {
+// double dist = norm(Point2f((float)contour[i].x, (float)contour[i].y) - point);
+// if(dist < min_dist)
+// {
+// min_dist = dist;
+// min_idx = (int)i;
+// }
+// }
+// assert(min_idx >= 0);
+
+// // temporary solution, have to make something more precise
+// corner = contour[min_idx];
+// return;
+// }
+
+static void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
{
// find the nearest point
double min_dist = std::numeric_limits<double>::max();
int min_idx = -1;
-
+
// find corner idx
for(size_t i = 0; i < contour.size(); i++)
{
}
}
assert(min_idx >= 0);
-
+
// temporary solution, have to make something more precise
corner = contour[min_idx];
return;
}
-
-int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
+
+static int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
{
Mat bw;
//const double max_bell_width = 20; // we expect two bells with width bounded above
//const double min_bell_width = 5; // and below
-
+
double total_sum = sum(hist).val[0];
//double thresh = total_sum/(2*max_bell_width)*0.25f; // quarter of a bar inside a bell
-
+
// threshold(hist, bw, thresh, 255.0, CV_THRESH_BINARY);
-
+
double quantile_sum = 0.0;
//double min_quantile = 0.2;
double low_sum = 0;
{
quantile_sum += hist.at<float>(x);
if(quantile_sum < 0.2*total_sum) continue;
-
+
if(quantile_sum - low_sum > out_of_bells_fraction*total_sum)
{
if(max_segment_length < x - start_x)
start_x = x;
}
}
-
+
if(start_x == -1)
{
return 0;
return 1;
}
}
-
+
}
-
+
bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size region_size)
{
Mat img = _img.getMat(), cornersM = _corners.getMat();
float ranges[] = {0, 256};
const float* _ranges = ranges;
Mat hist;
-
+
#if defined(_SUBPIX_VERBOSE)
vector<float> radius;
radius.assign(corners.size(), 0.0f);
#endif //_SUBPIX_VERBOSE
-
-
+
+
Mat black_comp, white_comp;
for(int i = 0; i < ncorners; i++)
- {
+ {
int channels = 0;
Rect roi(cvRound(corners[i].x - region_size.width), cvRound(corners[i].y - region_size.height),
region_size.width*2 + 1, region_size.height*2 + 1);
Mat img_roi = img(roi);
calcHist(&img_roi, 1, &channels, Mat(), hist, 1, &nbins, &_ranges);
-
+
#if 0
int black_thresh = histQuantile(hist, 0.45f);
int white_thresh = histQuantile(hist, 0.55f);
#else
- int black_thresh, white_thresh;
+ int black_thresh = 0, white_thresh = 0;
segment_hist_max(hist, black_thresh, white_thresh);
#endif
-
+
threshold(img, black_comp, black_thresh, 255.0, CV_THRESH_BINARY_INV);
threshold(img, white_comp, white_thresh, 255.0, CV_THRESH_BINARY);
-
+
const int erode_count = 1;
erode(black_comp, black_comp, Mat(), Point(-1, -1), erode_count);
erode(white_comp, white_comp, Mat(), Point(-1, -1), erode_count);
imwrite("black.jpg", black_comp);
imwrite("white.jpg", white_comp);
#endif
-
-
+
+
vector<vector<Point> > white_contours, black_contours;
vector<Vec4i> white_hierarchy, black_hierarchy;
findContours(black_comp, black_contours, black_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
findContours(white_comp, white_contours, white_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
-
+
if(black_contours.size() < 5 || white_contours.size() < 5) continue;
-
+
// find two white and black blobs that are close to the input point
vector<std::pair<int, float> > white_order, black_order;
orderContours(black_contours, corners[i], black_order);
orderContours(white_contours, corners[i], white_order);
const float max_dist = 10.0f;
- if(black_order[0].second > max_dist || black_order[1].second > max_dist ||
+ if(black_order[0].second > max_dist || black_order[1].second > max_dist ||
white_order[0].second > max_dist || white_order[1].second > max_dist)
{
continue; // there will be no improvement in this corner position
}
-
- const vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first],
+
+ const vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first],
&white_contours[white_order[0].first], &white_contours[white_order[1].first]};
vector<Point2f> quads_approx[4];
Point2f quad_corners[4];
vector<Point2f> temp;
for(size_t j = 0; j < quads[k]->size(); j++) temp.push_back((*quads[k])[j]);
approxPolyDP(Mat(temp), quads_approx[k], 0.5, true);
-
+
findCorner(quads_approx[k], corners[i], quad_corners[k]);
#else
findCorner(*quads[k], corners[i], quad_corners[k]);
#endif
quad_corners[k] += Point2f(0.5f, 0.5f);
}
-
+
// cross two lines
Point2f origin1 = quad_corners[0];
Point2f dir1 = quad_corners[1] - quad_corners[0];
Point2f dir2 = quad_corners[3] - quad_corners[2];
double angle = acos(dir1.dot(dir2)/(norm(dir1)*norm(dir2)));
if(cvIsNaN(angle) || cvIsInf(angle) || angle < 0.5 || angle > CV_PI - 0.5) continue;
-
+
findLinesCrossPoint(origin1, dir1, origin2, dir2, corners[i]);
-
+
#if defined(_SUBPIX_VERBOSE)
radius[i] = norm(corners[i] - ground_truth_corners[ground_truth_idx])*6;
-
+
#if 1
Mat test(img.size(), CV_32FC3);
cvtColor(img, test, CV_GRAY2RGB);
waitKey(0);
#endif
#endif //_SUBPIX_VERBOSE
-
+
}
-
+
#if defined(_SUBPIX_VERBOSE)
Mat test(img.size(), CV_32FC3);
cvtColor(img, test, CV_GRAY2RGB);
imshow("corners", test);
waitKey();
#endif //_SUBPIX_VERBOSE
-
+
return true;
}
{
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
- CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
+ CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
_rvec.create(3, 1, CV_64F);
_tvec.create(3, 1, CV_64F);
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
if (flags == CV_EPNP)
{
- cv::Mat undistortedPoints;
- cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
- epnp PnP(cameraMatrix, opoints, undistortedPoints);
-
+ cv::Mat undistortedPoints;
+ cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
+ epnp PnP(cameraMatrix, opoints, undistortedPoints);
+
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
PnP.compute_pose(R, tvec);
cv::Rodrigues(R, rvec);
- return true;
- }
- else if (flags == CV_P3P)
- {
- CV_Assert( npoints == 4);
- cv::Mat undistortedPoints;
- cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
- p3p P3Psolver(cameraMatrix);
+ return true;
+ }
+ else if (flags == CV_P3P)
+ {
+ CV_Assert( npoints == 4);
+ cv::Mat undistortedPoints;
+ cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
+ p3p P3Psolver(cameraMatrix);
cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
bool result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
if (result)
- cv::Rodrigues(R, rvec);
- return result;
- }
- else if (flags == CV_ITERATIVE)
- {
- CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
- CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
- CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
- cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
- c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
- &c_rvec, &c_tvec, useExtrinsicGuess );
- return true;
- }
- else
+ cv::Rodrigues(R, rvec);
+ return result;
+ }
+ else if (flags == CV_ITERATIVE)
+ {
+ CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
+ CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
+ CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
+ cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
+ c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
+ &c_rvec, &c_tvec, useExtrinsicGuess );
+ return true;
+ }
+ else
CV_Error(CV_StsBadArg, "The flags argument must be one of CV_ITERATIVE or CV_EPNP");
- return false;
+ return false;
}
namespace cv
namespace pnpransac
{
const int MIN_POINTS_COUNT = 4;
-
- void project3dPoints(const Mat& points, const Mat& rvec, const Mat& tvec, Mat& modif_points)
+
+ static void project3dPoints(const Mat& points, const Mat& rvec, const Mat& tvec, Mat& modif_points)
{
modif_points.create(1, points.cols, CV_32FC3);
Mat R(3, 3, CV_64FC1);
tvec.copyTo(t);
transform(points, modif_points, transformation);
}
-
+
class Mutex
{
public:
Mutex() {
- }
+ }
void lock()
{
#ifdef HAVE_TBB
- resultsMutex.lock();
+ resultsMutex.lock();
#endif
}
-
+
void unlock()
{
#ifdef HAVE_TBB
resultsMutex.unlock();
#endif
}
-
+
private:
#ifdef HAVE_TBB
tbb::mutex resultsMutex;
#endif
};
-
+
struct CameraParameters
{
void init(Mat _intrinsics, Mat _distCoeffs)
_intrinsics.copyTo(intrinsics);
_distCoeffs.copyTo(distortion);
}
-
+
Mat intrinsics;
Mat distortion;
};
-
+
struct Parameters
{
int iterationsCount;
float reprojectionError;
int minInliersCount;
bool useExtrinsicGuess;
- int flags;
+ int flags;
CameraParameters camera;
};
-
- void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
+
+ static void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
const Parameters& params, vector<int>& inliers, Mat& rvec, Mat& tvec,
const Mat& rvecInit, const Mat& tvecInit, Mutex& resultsMutex)
{
colIndex = colIndex+1;
}
}
-
+
//filter same 3d points, hang in solvePnP
double eps = 1e-10;
int num_same_points = 0;
}
if (num_same_points > 0)
return;
-
+
Mat localRvec, localTvec;
rvecInit.copyTo(localRvec);
tvecInit.copyTo(localTvec);
-
- solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec,
- params.useExtrinsicGuess, params.flags);
-
-
+
+ solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec,
+ params.useExtrinsicGuess, params.flags);
+
+
vector<Point2f> projected_points;
projected_points.resize(objectPoints.cols);
projectPoints(objectPoints, localRvec, localTvec, params.camera.intrinsics, params.camera.distortion, projected_points);
-
+
Mat rotatedPoints;
project3dPoints(objectPoints, localRvec, localTvec, rotatedPoints);
-
+
vector<int> localInliers;
for (int i = 0; i < objectPoints.cols; i++)
{
localInliers.push_back(i);
}
}
-
+
if (localInliers.size() > inliers.size())
{
resultsMutex.lock();
-
+
inliers.clear();
inliers.resize(localInliers.size());
memcpy(&inliers[0], &localInliers[0], sizeof(int) * localInliers.size());
localRvec.copyTo(rvec);
localTvec.copyTo(tvec);
-
+
resultsMutex.unlock();
}
}
-
+
class PnPSolver
{
public:
}
}
}
- PnPSolver(const Mat& objectPoints, const Mat& imagePoints, const Parameters& parameters,
- Mat& rvec, Mat& tvec, vector<int>& inliers):
- objectPoints(objectPoints), imagePoints(imagePoints), parameters(parameters),
- rvec(rvec), tvec(tvec), inliers(inliers)
+ PnPSolver(const Mat& _objectPoints, const Mat& _imagePoints, const Parameters& _parameters,
+ Mat& _rvec, Mat& _tvec, vector<int>& _inliers):
+ objectPoints(_objectPoints), imagePoints(_imagePoints), parameters(_parameters),
+ rvec(_rvec), tvec(_tvec), inliers(_inliers)
{
rvec.copyTo(initRvec);
tvec.copyTo(initTvec);
}
private:
- PnPSolver& operator=(const PnPSolver&);
-
+ PnPSolver& operator=(const PnPSolver&);
+
const Mat& objectPoints;
const Mat& imagePoints;
const Parameters& parameters;
Mat &rvec, &tvec;
vector<int>& inliers;
Mat initRvec, initTvec;
-
+
static RNG generator;
static Mutex syncMutex;
-
+
void generateVar(vector<char>& mask) const
{
int size = (int)mask.size();
}
}
};
-
+
Mutex PnPSolver::syncMutex;
RNG PnPSolver::generator;
-
+
}
}
{
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
-
+
CV_Assert(opoints.isContinuous());
CV_Assert(opoints.depth() == CV_32F);
CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
CV_Assert(ipoints.isContinuous());
CV_Assert(ipoints.depth() == CV_32F);
CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);
-
+
_rvec.create(3, 1, CV_64FC1);
_tvec.create(3, 1, CV_64FC1);
Mat rvec = _rvec.getMat();
Mat tvec = _tvec.getMat();
-
+
Mat objectPoints = opoints.reshape(3, 1), imagePoints = ipoints.reshape(2, 1);
-
+
if (minInliersCount <= 0)
minInliersCount = objectPoints.cols;
cv::pnpransac::Parameters params;
params.reprojectionError = reprojectionError;
params.useExtrinsicGuess = useExtrinsicGuess;
params.camera.init(cameraMatrix, distCoeffs);
- params.flags = flags;
-
+ params.flags = flags;
+
vector<int> localInliers;
Mat localRvec, localTvec;
rvec.copyTo(localRvec);
tvec.copyTo(localTvec);
-
+
if (objectPoints.cols >= pnpransac::MIN_POINTS_COUNT)
{
parallel_for(BlockedRange(0,iterationsCount), cv::pnpransac::PnPSolver(objectPoints, imagePoints, params,
localRvec, localTvec, localInliers));
}
-
+
if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT)
{
- if (flags != CV_P3P)
- {
- int i, pointsCount = (int)localInliers.size();
- Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
- for (i = 0; i < pointsCount; i++)
- {
- int index = localInliers[i];
- Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1));
- imagePoints.col(index).copyTo(colInlierImagePoints);
- Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1));
- objectPoints.col(index).copyTo(colInlierObjectPoints);
- }
- solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true, flags);
- }
- localRvec.copyTo(rvec);
+ if (flags != CV_P3P)
+ {
+ int i, pointsCount = (int)localInliers.size();
+ Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
+ for (i = 0; i < pointsCount; i++)
+ {
+ int index = localInliers[i];
+ Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1));
+ imagePoints.col(index).copyTo(colInlierImagePoints);
+ Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1));
+ objectPoints.col(index).copyTo(colInlierObjectPoints);
+ }
+ solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true, flags);
+ }
+ localRvec.copyTo(rvec);
localTvec.copyTo(tvec);
if (_inliers.needed())
Mat(localInliers).copyTo(_inliers);
val = ((curr[x]*4 + curr[x-1] + curr[x+1] + prev[x] + next[x])*scale_g - sum*scale_s) >> 10;
dptr[x] = tab[val + OFS];
}
-
+
sum += vsum[x+wsz2] - vsum[x-wsz2-1];
val = ((curr[x]*5 + curr[x-1] + prev[x] + next[x])*scale_g - sum*scale_s) >> 10;
dptr[x] = tab[val + OFS];
const int OFS = 256*4, TABSZ = OFS*2 + 256;
uchar tab[TABSZ];
Size size = src.size();
-
+
for( x = 0; x < TABSZ; x++ )
tab[x] = (uchar)(x - OFS < -ftzero ? 0 : x - OFS > ftzero ? ftzero*2 : x - OFS + ftzero);
uchar val0 = tab[0 + OFS];
-
+
#if CV_SSE2
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
#endif
-
+
for( y = 0; y < size.height-1; y += 2 )
{
const uchar* srow1 = src.ptr<uchar>(y);
const uchar* srow3 = y < size.height-2 ? srow1 + src.step*2 : srow1;
uchar* dptr0 = dst.ptr<uchar>(y);
uchar* dptr1 = dptr0 + dst.step;
-
+
dptr0[0] = dptr0[size.width-1] = dptr1[0] = dptr1[size.width-1] = val0;
x = 1;
-
+
#if CV_SSE2
if( useSIMD )
{
d0 = _mm_sub_epi16(d0, c0);
d1 = _mm_sub_epi16(d1, c1);
-
+
__m128i c2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow2 + x - 1)), z);
__m128i c3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow2 + x - 1)), z);
__m128i d2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow2 + x + 1)), z);
__m128i d3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow2 + x + 1)), z);
-
+
d2 = _mm_sub_epi16(d2, c2);
d3 = _mm_sub_epi16(d3, c3);
-
+
__m128i v0 = _mm_add_epi16(d0, _mm_add_epi16(d2, _mm_add_epi16(d1, d1)));
__m128i v1 = _mm_add_epi16(d1, _mm_add_epi16(d3, _mm_add_epi16(d2, d2)));
v0 = _mm_packus_epi16(_mm_add_epi16(v0, ftz), _mm_add_epi16(v1, ftz));
v0 = _mm_min_epu8(v0, ftz2);
-
+
_mm_storel_epi64((__m128i*)(dptr0 + x), v0);
_mm_storel_epi64((__m128i*)(dptr1 + x), _mm_unpackhi_epi64(v0, v0));
}
}
#endif
-
+
for( ; x < size.width-1; x++ )
{
int d0 = srow0[x+1] - srow0[x-1], d1 = srow1[x+1] - srow1[x-1],
dptr1[x] = (uchar)v1;
}
}
-
+
for( ; y < size.height; y++ )
{
uchar* dptr = dst.ptr<uchar>(y);
short* costptr = cost.data ? (short*)cost.data + lofs + x : &costbuf;
int x0 = x - wsz2 - 1, x1 = x + wsz2;
const uchar* cbuf_sub = cbuf0 + ((x0 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
- uchar* cbuf = cbuf0 + ((x1 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
+ cbuf = cbuf0 + ((x1 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
hsad = hsad0 - dy0*ndisp;
lptr_sub = lptr0 + MIN(MAX(x0, -lofs), width-1-lofs) - dy0*sstep;
lptr = lptr0 + MIN(MAX(x1, -lofs), width-1-lofs) - dy0*sstep;
// initialize sums
for( d = 0; d < ndisp; d++ )
sad[d] = (ushort)(hsad0[d-ndisp*dy0]*(wsz2 + 2 - dy0));
-
+
hsad = hsad0 + (1 - dy0)*ndisp;
for( y = 1 - dy0; y < wsz2; y++, hsad += ndisp )
for( d = 0; d < ndisp; d += 16 )
{
__m128i u0 = _mm_load_si128((__m128i*)(hsad_sub + d));
__m128i u1 = _mm_load_si128((__m128i*)(hsad + d));
-
+
__m128i v0 = _mm_load_si128((__m128i*)(hsad_sub + d + 8));
__m128i v1 = _mm_load_si128((__m128i*)(hsad + d + 8));
-
+
__m128i usad8 = _mm_load_si128((__m128i*)(sad + d));
__m128i vsad8 = _mm_load_si128((__m128i*)(sad + d + 8));
-
+
u1 = _mm_sub_epi16(u1, u0);
v1 = _mm_sub_epi16(v1, v0);
usad8 = _mm_add_epi16(usad8, u1);
vsad8 = _mm_add_epi16(vsad8, v1);
-
+
mask = _mm_cmpgt_epi16(minsad8, usad8);
minsad8 = _mm_min_epi16(minsad8, usad8);
mind8 = _mm_max_epi16(mind8, _mm_and_si128(mask, d8));
-
+
_mm_store_si128((__m128i*)(sad + d), usad8);
_mm_store_si128((__m128i*)(sad + d + 8), vsad8);
-
+
mask = _mm_cmpgt_epi16(minsad8, vsad8);
minsad8 = _mm_min_epi16(minsad8, vsad8);
-
+
d8 = _mm_add_epi16(d8, dd_8);
mind8 = _mm_max_epi16(mind8, _mm_and_si128(mask, d8));
d8 = _mm_add_epi16(d8, dd_8);
dptr[y*dstep] = FILTERED;
continue;
}
-
+
__m128i minsad82 = _mm_unpackhi_epi64(minsad8, minsad8);
__m128i mind82 = _mm_unpackhi_epi64(mind8, mind8);
mask = _mm_cmpgt_epi16(minsad8, minsad82);
mind8 = _mm_xor_si128(mind8,_mm_and_si128(_mm_xor_si128(mind82,mind8),mask));
minsad8 = _mm_min_epi16(minsad8, minsad82);
-
+
minsad82 = _mm_shufflelo_epi16(minsad8, _MM_SHUFFLE(3,2,3,2));
mind82 = _mm_shufflelo_epi16(mind8, _MM_SHUFFLE(3,2,3,2));
mask = _mm_cmpgt_epi16(minsad8, minsad82);
mind8 = _mm_xor_si128(mind8,_mm_and_si128(_mm_xor_si128(mind82,mind8),mask));
minsad8 = _mm_min_epi16(minsad8, minsad82);
-
+
minsad82 = _mm_shufflelo_epi16(minsad8, 1);
mind82 = _mm_shufflelo_epi16(mind8, 1);
mask = _mm_cmpgt_epi16(minsad8, minsad82);
mind8 = _mm_xor_si128(mind8,_mm_and_si128(_mm_xor_si128(mind82,mind8),mask));
mind = (short)_mm_cvtsi128_si32(mind8);
minsad = sad[mind];
-
+
if( uniquenessRatio > 0 )
{
int thresh = minsad + ((minsad * uniquenessRatio) >> 8);
__m128i thresh8 = _mm_set1_epi16((short)(thresh + 1));
__m128i d1 = _mm_set1_epi16((short)(mind-1)), d2 = _mm_set1_epi16((short)(mind+1));
- __m128i dd_16 = _mm_add_epi16(dd_8, dd_8), d8 = _mm_sub_epi16(d0_8, dd_16);
+ __m128i dd_16 = _mm_add_epi16(dd_8, dd_8);
+ d8 = _mm_sub_epi16(d0_8, dd_16);
for( d = 0; d < ndisp; d += 16 )
{
if( 0 < mind && mind < ndisp - 1 )
{
- int p = sad[mind+1], n = sad[mind-1], d = p + n - 2*sad[mind] + std::abs(p - n);
+ int p = sad[mind+1], n = sad[mind-1];
+ d = p + n - 2*sad[mind] + std::abs(p - n);
dptr[y*dstep] = (short)(((ndisp - mind - 1 + mindisp)*256 + (d != 0 ? (p-n)*256/d : 0) + 15) >> 4);
}
else
htext[y] += tab[lval];
}
}
-
+
// initialize the left and right borders of the disparity map
for( y = 0; y < height; y++ )
{
int* costptr = cost.data ? (int*)cost.data + lofs + x : &costbuf;
int x0 = x - wsz2 - 1, x1 = x + wsz2;
const uchar* cbuf_sub = cbuf0 + ((x0 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
- uchar* cbuf = cbuf0 + ((x1 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
+ cbuf = cbuf0 + ((x1 + wsz2 + 1) % (wsz + 1))*cstep - dy0*ndisp;
hsad = hsad0 - dy0*ndisp;
lptr_sub = lptr0 + MIN(MAX(x0, -lofs), width-1-lofs) - dy0*sstep;
lptr = lptr0 + MIN(MAX(x1, -lofs), width-1-lofs) - dy0*sstep;
// initialize sums
for( d = 0; d < ndisp; d++ )
sad[d] = (int)(hsad0[d-ndisp*dy0]*(wsz2 + 2 - dy0));
-
+
hsad = hsad0 + (1 - dy0)*ndisp;
for( y = 1 - dy0; y < wsz2; y++, hsad += ndisp )
for( d = 0; d < ndisp; d++ )
{
sad[-1] = sad[1];
sad[ndisp] = sad[ndisp-2];
- int p = sad[mind+1], n = sad[mind-1], d = p + n - 2*sad[mind] + std::abs(p - n);
+ int p = sad[mind+1], n = sad[mind-1];
+ d = p + n - 2*sad[mind] + std::abs(p - n);
dptr[y*dstep] = (short)(((ndisp - mind - 1 + mindisp)*256 + (d != 0 ? (p-n)*256/d : 0) + 15) >> 4);
costptr[y*coststep] = sad[mind];
}
state = _state;
}
- void operator()( int ind ) const
+ void operator()( int ind ) const
{
if( state->preFilterType == CV_STEREO_BM_NORMALIZED_RESPONSE )
prefilterNorm( *imgs0[ind], *imgs[ind], state->preFilterSize, state->preFilterCap, buf[ind] );
else
- prefilterXSobel( *imgs0[ind], *imgs[ind], state->preFilterCap );
+ prefilterXSobel( *imgs0[ind], *imgs[ind], state->preFilterCap );
}
-
+
const Mat* imgs0[2];
- Mat* imgs[2];
+ Mat* imgs[2];
uchar* buf[2];
CvStereoBMState *state;
};
useShorts = _useShorts;
validDisparityRect = _validDisparityRect;
}
-
- void operator()( const BlockedRange& range ) const
+
+ void operator()( const BlockedRange& range ) const
{
int cols = left->cols, rows = left->rows;
int _row0 = min(cvRound(range.begin() * rows / nstripes), rows);
int _row1 = min(cvRound(range.end() * rows / nstripes), rows);
uchar *ptr = state->slidingSumBuf->data.ptr + range.begin() * stripeBufSize;
int FILTERED = (state->minDisparity - 1)*16;
-
+
Rect roi = validDisparityRect & Rect(0, _row0, cols, _row1 - _row0);
if( roi.height == 0 )
return;
int row0 = roi.y;
int row1 = roi.y + roi.height;
-
+
Mat part;
if( row0 > _row0 )
{
part = disp->rowRange(row1, _row1);
part = Scalar::all(FILTERED);
}
-
+
Mat left_i = left->rowRange(row0, row1);
Mat right_i = right->rowRange(row0, row1);
Mat disp_i = disp->rowRange(row0, row1);
Mat cost_i = state->disp12MaxDiff >= 0 ? Mat(state->cost).rowRange(row0, row1) : Mat();
-
-#if CV_SSE2
+
+#if CV_SSE2
if( useShorts )
findStereoCorrespondenceBM_SSE2( left_i, right_i, disp_i, cost_i, *state, ptr, row0, rows - row1 );
else
-#endif
+#endif
findStereoCorrespondenceBM( left_i, right_i, disp_i, cost_i, *state, ptr, row0, rows - row1 );
-
+
if( state->disp12MaxDiff >= 0 )
validateDisparity( disp_i, cost_i, state->minDisparity, state->numberOfDisparities, state->disp12MaxDiff );
-
+
if( roi.x > 0 )
{
part = disp_i.colRange(0, roi.x);
const Mat *left, *right;
Mat* disp;
CvStereoBMState *state;
-
+
int nstripes;
int stripeBufSize;
bool useShorts;
};
static void findStereoCorrespondenceBM( const Mat& left0, const Mat& right0, Mat& disp0, CvStereoBMState* state)
-{
+{
if (left0.size() != right0.size() || disp0.size() != left0.size())
CV_Error( CV_StsUnmatchedSizes, "All the images must have the same size" );
CV_Error( CV_StsUnsupportedFormat, "Both input images must have CV_8UC1" );
if (disp0.type() != CV_16SC1 && disp0.type() != CV_32FC1)
- CV_Error( CV_StsUnsupportedFormat, "Disparity image must have CV_16SC1 or CV_32FC1 format" );
+ CV_Error( CV_StsUnsupportedFormat, "Disparity image must have CV_16SC1 or CV_32FC1 format" );
if( !state )
CV_Error( CV_StsNullPtr, "Stereo BM state is NULL." );
if( state->uniquenessRatio < 0 )
CV_Error( CV_StsOutOfRange, "uniqueness ratio must be non-negative" );
-
+
if( !state->preFilteredImg0 || state->preFilteredImg0->cols * state->preFilteredImg0->rows < left0.cols * left0.rows )
{
cvReleaseMat( &state->preFilteredImg0 );
}
Mat left(left0.size(), CV_8U, state->preFilteredImg0->data.ptr);
Mat right(right0.size(), CV_8U, state->preFilteredImg1->data.ptr);
-
+
int mindisp = state->minDisparity;
int ndisp = state->numberOfDisparities;
int rofs = -min(ndisp - 1 + mindisp, 0);
int width1 = width - rofs - ndisp + 1;
int FILTERED = (state->minDisparity - 1) << DISPARITY_SHIFT;
-
+
if( lofs >= width || rofs >= width || width1 < 1 )
{
- disp0 = Scalar::all( FILTERED * ( disp0.type() < CV_32F ? 1 : 1./(1 << DISPARITY_SHIFT) ) );
+ disp0 = Scalar::all( FILTERED * ( disp0.type() < CV_32F ? 1 : 1./(1 << DISPARITY_SHIFT) ) );
return;
}
Mat disp = disp0;
-
+
if( disp0.type() == CV_32F)
{
if( !state->disp || state->disp->rows != disp0.rows || state->disp->cols != disp0.cols )
}
disp = cv::cvarrToMat(state->disp);
}
-
- int wsz = state->SADWindowSize;
+
+ int wsz = state->SADWindowSize;
int bufSize0 = (int)((ndisp + 2)*sizeof(int));
bufSize0 += (int)((height+wsz+2)*ndisp*sizeof(int));
bufSize0 += (int)((height + wsz + 2)*sizeof(int));
int bufSize2 = 0;
if( state->speckleRange >= 0 && state->speckleWindowSize > 0 )
bufSize2 = width*height*(sizeof(cv::Point_<short>) + sizeof(int) + sizeof(uchar));
-
+
#if CV_SSE2
bool useShorts = state->preFilterCap <= 31 && state->SADWindowSize <= 21 && checkHardwareSupport(CV_CPU_SSE2);
#else
const bool useShorts = false;
#endif
-
-#ifdef HAVE_TBB
+
+#ifdef HAVE_TBB
const double SAD_overhead_coeff = 10.0;
- double N0 = 8000000 / (useShorts ? 1 : 4); // approx tbb's min number instructions reasonable for one thread
+ double N0 = 8000000 / (useShorts ? 1 : 4); // approx tbb's min number instructions reasonable for one thread
double maxStripeSize = min(max(N0 / (width * ndisp), (wsz-1) * SAD_overhead_coeff), (double)height);
int nstripes = cvCeil(height / maxStripeSize);
#else
#endif
int bufSize = max(bufSize0 * nstripes, max(bufSize1 * 2, bufSize2));
-
+
if( !state->slidingSumBuf || state->slidingSumBuf->cols < bufSize )
{
cvReleaseMat( &state->slidingSumBuf );
state->slidingSumBuf = cvCreateMat( 1, bufSize, CV_8U );
}
-
+
uchar *_buf = state->slidingSumBuf->data.ptr;
int idx[] = {0,1};
parallel_do(idx, idx+2, PrefilterInvoker(left0, right0, left, right, _buf, _buf + bufSize1, state));
-
+
Rect validDisparityRect(0, 0, width, height), R1 = state->roi1, R2 = state->roi2;
validDisparityRect = getValidDisparityROI(R1.area() > 0 ? Rect(0, 0, width, height) : validDisparityRect,
R2.area() > 0 ? Rect(0, 0, width, height) : validDisparityRect,
state->minDisparity, state->numberOfDisparities,
- state->SADWindowSize);
-
+ state->SADWindowSize);
+
parallel_for(BlockedRange(0, nstripes),
FindStereoCorrespInvoker(left, right, disp, state, nstripes,
bufSize0, useShorts, validDisparityRect));
-
+
if( state->speckleRange >= 0 && state->speckleWindowSize > 0 )
{
Mat buf(state->slidingSumBuf);
}
if (disp0.data != disp.data)
- disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
+ disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
}
StereoBM::StereoBM()
CV_Assert( disptype == CV_16S || disptype == CV_32F );
_disparity.create(left.size(), disptype);
Mat disparity = _disparity.getMat();
-
+
findStereoCorrespondenceBM(left, right, disparity, state);
}
template<> void Ptr<CvStereoBMState>::delete_obj()
{ cvReleaseStereoBMState(&obj); }
-
+
}
CV_IMPL void cvFindStereoCorrespondenceBM( const CvArr* leftarr, const CvArr* rightarr,
{
cv::Mat left = cv::cvarrToMat(leftarr),
right = cv::cvarrToMat(rightarr),
- disp = cv::cvarrToMat(disparr);
+ disp = cv::cvarrToMat(disparr);
cv::findStereoCorrespondenceBM(left, right, disp, state);
}
This is a variation of
"Stereo Processing by Semiglobal Matching and Mutual Information"
by Heiko Hirschmuller.
-
+
We match blocks rather than individual pixels, thus the algorithm is called
SGBM (Semi-global block matching)
- */
+ */
#include "precomp.hpp"
#include <limits.h>
namespace cv
{
-
+
typedef uchar PixType;
typedef short CostType;
typedef short DispType;
row1[x] and row2[x-d]. The subpixel algorithm from
"Depth Discontinuities by Pixel-to-Pixel Stereo" by Stan Birchfield and C. Tomasi
is used, hence the suffix BT.
-
+
the temporary buffer should contain width2*2 elements
*/
static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int D = maxD - minD, width1 = maxX1 - minX1, width2 = maxX2 - minX2;
const PixType *row1 = img1.ptr<PixType>(y), *row2 = img2.ptr<PixType>(y);
PixType *prow1 = buffer + width2*2, *prow2 = prow1 + width*cn*2;
-
+
tab += tabOfs;
-
+
for( c = 0; c < cn*2; c++ )
{
- prow1[width*c] = prow1[width*c + width-1] =
+ prow1[width*c] = prow1[width*c + width-1] =
prow2[width*c] = prow2[width*c + width-1] = tab[0];
}
-
+
int n1 = y > 0 ? -(int)img1.step : 0, s1 = y < img1.rows-1 ? (int)img1.step : 0;
int n2 = y > 0 ? -(int)img2.step : 0, s2 = y < img2.rows-1 ? (int)img2.step : 0;
-
+
if( cn == 1 )
{
for( x = 1; x < width-1; x++ )
{
prow1[x] = tab[(row1[x+1] - row1[x-1])*2 + row1[x+n1+1] - row1[x+n1-1] + row1[x+s1+1] - row1[x+s1-1]];
prow2[width-1-x] = tab[(row2[x+1] - row2[x-1])*2 + row2[x+n2+1] - row2[x+n2-1] + row2[x+s2+1] - row2[x+s2-1]];
-
+
prow1[x+width] = row1[x];
prow2[width-1-x+width] = row2[x];
}
prow1[x] = tab[(row1[x*3+3] - row1[x*3-3])*2 + row1[x*3+n1+3] - row1[x*3+n1-3] + row1[x*3+s1+3] - row1[x*3+s1-3]];
prow1[x+width] = tab[(row1[x*3+4] - row1[x*3-2])*2 + row1[x*3+n1+4] - row1[x*3+n1-2] + row1[x*3+s1+4] - row1[x*3+s1-2]];
prow1[x+width*2] = tab[(row1[x*3+5] - row1[x*3-1])*2 + row1[x*3+n1+5] - row1[x*3+n1-1] + row1[x*3+s1+5] - row1[x*3+s1-1]];
-
+
prow2[width-1-x] = tab[(row2[x*3+3] - row2[x*3-3])*2 + row2[x*3+n2+3] - row2[x*3+n2-3] + row2[x*3+s2+3] - row2[x*3+s2-3]];
prow2[width-1-x+width] = tab[(row2[x*3+4] - row2[x*3-2])*2 + row2[x*3+n2+4] - row2[x*3+n2-2] + row2[x*3+s2+4] - row2[x*3+s2-2]];
prow2[width-1-x+width*2] = tab[(row2[x*3+5] - row2[x*3-1])*2 + row2[x*3+n2+5] - row2[x*3+n2-1] + row2[x*3+s2+5] - row2[x*3+s2-1]];
-
+
prow1[x+width*3] = row1[x*3];
prow1[x+width*4] = row1[x*3+1];
prow1[x+width*5] = row1[x*3+2];
-
+
prow2[width-1-x+width*3] = row2[x*3];
prow2[width-1-x+width*4] = row2[x*3+1];
prow2[width-1-x+width*5] = row2[x*3+2];
}
}
-
+
memset( cost, 0, width1*D*sizeof(cost[0]) );
-
+
buffer -= minX2;
cost -= minX1*D + minD; // simplify the cost indices inside the loop
-
-#if CV_SSE2
+
+#if CV_SSE2
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
#endif
-
-#if 1
+
+#if 1
for( c = 0; c < cn*2; c++, prow1 += width, prow2 += width )
{
int diff_scale = c < cn ? 0 : 2;
-
+
// precompute
// v0 = min(row2[x-1/2], row2[x], row2[x+1/2]) and
// v1 = max(row2[x-1/2], row2[x], row2[x+1/2]) and
buffer[x] = (PixType)v0;
buffer[x + width2] = (PixType)v1;
}
-
+
for( x = minX1; x < maxX1; x++ )
{
int u = prow1[x];
int ur = x < width-1 ? (u + prow1[x+1])/2 : u;
int u0 = min(ul, ur); u0 = min(u0, u);
int u1 = max(ul, ur); u1 = max(u1, u);
-
+
#if CV_SSE2
if( useSIMD )
{
__m128i _u = _mm_set1_epi8((char)u), _u0 = _mm_set1_epi8((char)u0);
__m128i _u1 = _mm_set1_epi8((char)u1), z = _mm_setzero_si128();
__m128i ds = _mm_cvtsi32_si128(diff_scale);
-
+
for( int d = minD; d < maxD; d += 16 )
{
__m128i _v = _mm_loadu_si128((const __m128i*)(prow2 + width-x-1 + d));
__m128i c0 = _mm_max_epu8(_mm_subs_epu8(_u, _v1), _mm_subs_epu8(_v0, _u));
__m128i c1 = _mm_max_epu8(_mm_subs_epu8(_v, _u1), _mm_subs_epu8(_u0, _v));
__m128i diff = _mm_min_epu8(c0, c1);
-
+
c0 = _mm_load_si128((__m128i*)(cost + x*D + d));
c1 = _mm_load_si128((__m128i*)(cost + x*D + d + 8));
-
+
_mm_store_si128((__m128i*)(cost + x*D + d), _mm_adds_epi16(c0, _mm_srl_epi16(_mm_unpacklo_epi8(diff,z), ds)));
_mm_store_si128((__m128i*)(cost + x*D + d + 8), _mm_adds_epi16(c1, _mm_srl_epi16(_mm_unpackhi_epi8(diff,z), ds)));
}
int v1 = buffer[width-x-1 + d + width2];
int c0 = max(0, u - v1); c0 = max(c0, v0 - u);
int c1 = max(0, v - u1); c1 = max(c1, u0 - v);
-
+
cost[x*D + d] = (CostType)(cost[x*D+d] + (min(c0, c1) >> diff_scale));
}
}
if( useSIMD )
{
__m128i _u = _mm_set1_epi8(u), z = _mm_setzero_si128();
-
+
for( int d = minD; d < maxD; d += 16 )
{
__m128i _v = _mm_loadu_si128((const __m128i*)(prow2 + width-1-x + d));
__m128i diff = _mm_adds_epu8(_mm_subs_epu8(_u,_v), _mm_subs_epu8(_v,_u));
__m128i c0 = _mm_load_si128((__m128i*)(cost + x*D + d));
__m128i c1 = _mm_load_si128((__m128i*)(cost + x*D + d + 8));
-
+
_mm_store_si128((__m128i*)(cost + x*D + d), _mm_adds_epi16(c0, _mm_unpacklo_epi8(diff,z)));
_mm_store_si128((__m128i*)(cost + x*D + d + 8), _mm_adds_epi16(c1, _mm_unpackhi_epi8(diff,z)));
}
minD <= d < maxD.
disp2full is the reverse disparity map, that is:
disp2full(x+roi.x,y+roi.y)=d means that img2(x+roi.x, y+roi.y) ~ img1(x+roi.x+d, y+roi.y)
-
+
note that disp1buf will have the same size as the roi and
disp2full will have the same size as img1 (or img2).
On exit disp2buf is not the final disparity, it is an intermediate result that becomes
final after all the tiles are processed.
-
+
the disparity in disp1buf is written with sub-pixel accuracy
(4 fractional bits, see CvStereoSGBM::DISP_SCALE),
using quadratic interpolation, while the disparity in disp2buf
is written as is, without interpolation.
-
+
disp2cost also has the same size as img1 (or img2).
It contains the minimum current cost, used to find the best disparity, corresponding to the minimal cost.
- */
+ */
static void computeDisparitySGBM( const Mat& img1, const Mat& img2,
- Mat& disp1, const StereoSGBM& params,
+ Mat& disp1, const StereoSGBM& params,
Mat& buffer )
{
#if CV_SSE2
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
-
+
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
-#endif
-
+#endif
+
const int ALIGN = 16;
const int DISP_SHIFT = StereoSGBM::DISP_SHIFT;
const int DISP_SCALE = StereoSGBM::DISP_SCALE;
const CostType MAX_COST = SHRT_MAX;
-
+
int minD = params.minDisparity, maxD = minD + params.numberOfDisparities;
Size SADWindowSize;
SADWindowSize.width = SADWindowSize.height = params.SADWindowSize > 0 ? params.SADWindowSize : 5;
int npasses = params.fullDP ? 2 : 1;
const int TAB_OFS = 256*4, TAB_SIZE = 256 + TAB_OFS*2;
PixType clipTab[TAB_SIZE];
-
+
for( k = 0; k < TAB_SIZE; k++ )
clipTab[k] = (PixType)(min(max(k - TAB_OFS, -ftzero), ftzero) + ftzero);
-
+
if( minX1 >= maxX1 )
{
disp1 = Scalar::all(INVALID_DISP_SCALED);
return;
}
-
+
CV_Assert( D % 16 == 0 );
-
+
// NR - the number of directions. the loop on x below that computes Lr assumes that NR == 8.
// if you change NR, please, modify the loop as well.
int D2 = D+16, NRD2 = NR2*D2;
-
+
// the number of L_r(.,.) and min_k L_r(.,.) lines in the buffer:
// for 8-way dynamic programming we need the current row and
// the previous row, i.e. 2 rows in total
const int NLR = 2;
const int LrBorder = NLR - 1;
-
+
// for each possible stereo match (img1(x,y) <=> img2(x-d,y))
// we keep pixel difference cost (C) and the summary cost over NR directions (S).
// we also keep all the partial costs for the previous line L_r(x,d) and also min_k L_r(x, k)
CSBufSize*2*sizeof(CostType) + // C, S
width*16*img1.channels()*sizeof(PixType) + // temp buffer for computing per-pixel cost
width*(sizeof(CostType) + sizeof(DispType)) + 1024; // disp2cost + disp2
-
+
if( !buffer.data || !buffer.isContinuous() ||
buffer.cols*buffer.rows*buffer.elemSize() < totalBufSize )
buffer.create(1, (int)totalBufSize, CV_8U);
-
+
// summary cost over different (nDirs) directions
CostType* Cbuf = (CostType*)alignPtr(buffer.data, ALIGN);
CostType* Sbuf = Cbuf + CSBufSize;
CostType* hsumBuf = Sbuf + CSBufSize;
CostType* pixDiff = hsumBuf + costBufSize*hsumBufNRows;
-
+
CostType* disp2cost = pixDiff + costBufSize + (LrSize + minLrSize)*NLR;
DispType* disp2ptr = (DispType*)(disp2cost + width);
PixType* tempBuf = (PixType*)(disp2ptr + width);
-
+
// add P2 to every C(x,y). it saves a few operations in the inner loops
for( k = 0; k < width1*D; k++ )
Cbuf[k] = (CostType)P2;
-
+
for( int pass = 1; pass <= npasses; pass++ )
{
int x1, y1, x2, y2, dx, dy;
-
+
if( pass == 1 )
{
y1 = 0; y2 = height; dy = 1;
y1 = height-1; y2 = -1; dy = -1;
x1 = width1-1; x2 = -1; dx = -1;
}
-
+
CostType *Lr[NLR]={0}, *minLr[NLR]={0};
-
+
for( k = 0; k < NLR; k++ )
{
// shift Lr[k] and minLr[k] pointers, because we allocated them with the borders,
minLr[k] = pixDiff + costBufSize + LrSize*NLR + minLrSize*k + NR2*2;
memset( minLr[k] - LrBorder*NR2, 0, minLrSize*sizeof(CostType) );
}
-
+
for( int y = y1; y != y2; y += dy )
{
int x, d;
DispType* disp1ptr = disp1.ptr<DispType>(y);
CostType* C = Cbuf + (!params.fullDP ? 0 : y*costBufSize);
CostType* S = Sbuf + (!params.fullDP ? 0 : y*costBufSize);
-
+
if( pass == 1 ) // compute C on the first pass, and reuse it on the second pass, if any.
{
int dy1 = y == 0 ? 0 : y + SH2, dy2 = y == 0 ? SH2 : dy1;
-
+
for( k = dy1; k <= dy2; k++ )
{
CostType* hsumAdd = hsumBuf + (min(k, height-1) % hsumBufNRows)*costBufSize;
-
+
if( k < height )
{
calcPixelCostBT( img1, img2, k, minD, maxD, pixDiff, tempBuf, clipTab, TAB_OFS, ftzero );
-
+
memset(hsumAdd, 0, D*sizeof(CostType));
for( x = 0; x <= SW2*D; x += D )
{
for( d = 0; d < D; d++ )
hsumAdd[d] = (CostType)(hsumAdd[d] + pixDiff[x + d]*scale);
}
-
+
if( y > 0 )
{
const CostType* hsumSub = hsumBuf + (max(y - SH2 - 1, 0) % hsumBufNRows)*costBufSize;
const CostType* Cprev = !params.fullDP || y == 0 ? C : C - costBufSize;
-
+
for( x = D; x < width1*D; x += D )
{
const CostType* pixAdd = pixDiff + min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + max(x - (SW2+1)*D, 0);
-
+
#if CV_SSE2
if( useSIMD )
{
{
const CostType* pixAdd = pixDiff + min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + max(x - (SW2+1)*D, 0);
-
+
for( d = 0; d < D; d++ )
hsumAdd[x + d] = (CostType)(hsumAdd[x - D + d] + pixAdd[d] - pixSub[d]);
}
}
}
-
+
if( y == 0 )
{
int scale = k == 0 ? SH2 + 1 : 1;
C[x] = (CostType)(C[x] + hsumAdd[x]*scale);
}
}
-
+
// also, clear the S buffer
for( k = 0; k < width1*D; k++ )
S[k] = 0;
}
-
+
// clear the left and the right borders
memset( Lr[0] - NRD2*LrBorder - 8, 0, NRD2*LrBorder*sizeof(CostType) );
memset( Lr[0] + width1*NRD2 - 8, 0, NRD2*LrBorder*sizeof(CostType) );
memset( minLr[0] - NR2*LrBorder, 0, NR2*LrBorder*sizeof(CostType) );
memset( minLr[0] + width1*NR2, 0, NR2*LrBorder*sizeof(CostType) );
-
+
/*
[formula 13 in the paper]
compute L_r(p, d) = C(p, d) +
for( x = x1; x != x2; x += dx )
{
int xm = x*NR2, xd = xm*D2;
-
+
int delta0 = minLr[0][xm - dx*NR2] + P2, delta1 = minLr[1][xm - NR2 + 1] + P2;
int delta2 = minLr[1][xm + 2] + P2, delta3 = minLr[1][xm + NR2 + 3] + P2;
-
+
CostType* Lr_p0 = Lr[0] + xd - dx*NRD2;
CostType* Lr_p1 = Lr[1] + xd - NRD2 + D2;
CostType* Lr_p2 = Lr[1] + xd + D2*2;
CostType* Lr_p3 = Lr[1] + xd + NRD2 + D2*3;
-
+
Lr_p0[-1] = Lr_p0[D] = Lr_p1[-1] = Lr_p1[D] =
Lr_p2[-1] = Lr_p2[D] = Lr_p3[-1] = Lr_p3[D] = MAX_COST;
-
+
CostType* Lr_p = Lr[0] + xd;
const CostType* Cp = C + x*D;
CostType* Sp = S + x*D;
-
+
#if CV_SSE2
if( useSIMD )
{
__m128i _P1 = _mm_set1_epi16((short)P1);
-
+
__m128i _delta0 = _mm_set1_epi16((short)delta0);
__m128i _delta1 = _mm_set1_epi16((short)delta1);
__m128i _delta2 = _mm_set1_epi16((short)delta2);
__m128i _delta3 = _mm_set1_epi16((short)delta3);
__m128i _minL0 = _mm_set1_epi16((short)MAX_COST);
-
+
for( d = 0; d < D; d += 8 )
{
__m128i Cpd = _mm_load_si128((const __m128i*)(Cp + d));
__m128i L0, L1, L2, L3;
-
+
L0 = _mm_load_si128((const __m128i*)(Lr_p0 + d));
L1 = _mm_load_si128((const __m128i*)(Lr_p1 + d));
L2 = _mm_load_si128((const __m128i*)(Lr_p2 + d));
L3 = _mm_load_si128((const __m128i*)(Lr_p3 + d));
-
+
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d - 1)), _P1));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d + 1)), _P1));
-
+
L1 = _mm_min_epi16(L1, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p1 + d - 1)), _P1));
L1 = _mm_min_epi16(L1, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p1 + d + 1)), _P1));
-
+
L2 = _mm_min_epi16(L2, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p2 + d - 1)), _P1));
L2 = _mm_min_epi16(L2, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p2 + d + 1)), _P1));
-
+
L3 = _mm_min_epi16(L3, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p3 + d - 1)), _P1));
L3 = _mm_min_epi16(L3, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p3 + d + 1)), _P1));
-
+
L0 = _mm_min_epi16(L0, _delta0);
L0 = _mm_adds_epi16(_mm_subs_epi16(L0, _delta0), Cpd);
-
+
L1 = _mm_min_epi16(L1, _delta1);
L1 = _mm_adds_epi16(_mm_subs_epi16(L1, _delta1), Cpd);
-
+
L2 = _mm_min_epi16(L2, _delta2);
L2 = _mm_adds_epi16(_mm_subs_epi16(L2, _delta2), Cpd);
-
+
L3 = _mm_min_epi16(L3, _delta3);
L3 = _mm_adds_epi16(_mm_subs_epi16(L3, _delta3), Cpd);
-
+
_mm_store_si128( (__m128i*)(Lr_p + d), L0);
_mm_store_si128( (__m128i*)(Lr_p + d + D2), L1);
_mm_store_si128( (__m128i*)(Lr_p + d + D2*2), L2);
_mm_store_si128( (__m128i*)(Lr_p + d + D2*3), L3);
-
+
__m128i t0 = _mm_min_epi16(_mm_unpacklo_epi16(L0, L2), _mm_unpackhi_epi16(L0, L2));
__m128i t1 = _mm_min_epi16(_mm_unpacklo_epi16(L1, L3), _mm_unpackhi_epi16(L1, L3));
t0 = _mm_min_epi16(_mm_unpacklo_epi16(t0, t1), _mm_unpackhi_epi16(t0, t1));
_minL0 = _mm_min_epi16(_minL0, t0);
-
+
__m128i Sval = _mm_load_si128((const __m128i*)(Sp + d));
-
+
L0 = _mm_adds_epi16(L0, L1);
L2 = _mm_adds_epi16(L2, L3);
Sval = _mm_adds_epi16(Sval, L0);
Sval = _mm_adds_epi16(Sval, L2);
-
+
_mm_store_si128((__m128i*)(Sp + d), Sval);
}
-
+
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 8));
_mm_storel_epi64((__m128i*)&minLr[0][xm], _minL0);
}
#endif
{
int minL0 = MAX_COST, minL1 = MAX_COST, minL2 = MAX_COST, minL3 = MAX_COST;
-
+
for( d = 0; d < D; d++ )
{
int Cpd = Cp[d], L0, L1, L2, L3;
-
+
L0 = Cpd + min((int)Lr_p0[d], min(Lr_p0[d-1] + P1, min(Lr_p0[d+1] + P1, delta0))) - delta0;
- L1 = Cpd + min((int)Lr_p1[d], min(Lr_p1[d-1] + P1, min(Lr_p1[d+1] + P1, delta1))) - delta1;
+ L1 = Cpd + min((int)Lr_p1[d], min(Lr_p1[d-1] + P1, min(Lr_p1[d+1] + P1, delta1))) - delta1;
L2 = Cpd + min((int)Lr_p2[d], min(Lr_p2[d-1] + P1, min(Lr_p2[d+1] + P1, delta2))) - delta2;
L3 = Cpd + min((int)Lr_p3[d], min(Lr_p3[d-1] + P1, min(Lr_p3[d+1] + P1, delta3))) - delta3;
-
+
Lr_p[d] = (CostType)L0;
minL0 = min(minL0, L0);
-
+
Lr_p[d + D2] = (CostType)L1;
minL1 = min(minL1, L1);
-
+
Lr_p[d + D2*2] = (CostType)L2;
minL2 = min(minL2, L2);
-
+
Lr_p[d + D2*3] = (CostType)L3;
minL3 = min(minL3, L3);
-
+
Sp[d] = saturate_cast<CostType>(Sp[d] + L0 + L1 + L2 + L3);
}
minLr[0][xm] = (CostType)minL0;
minLr[0][xm+3] = (CostType)minL3;
}
}
-
+
if( pass == npasses )
{
for( x = 0; x < width; x++ )
disp1ptr[x] = disp2ptr[x] = (DispType)INVALID_DISP_SCALED;
disp2cost[x] = MAX_COST;
}
-
+
for( x = width1 - 1; x >= 0; x-- )
{
CostType* Sp = S + x*D;
int minS = MAX_COST, bestDisp = -1;
-
+
if( npasses == 1 )
{
int xm = x*NR2, xd = xm*D2;
-
+
int minL0 = MAX_COST;
int delta0 = minLr[0][xm + NR2] + P2;
CostType* Lr_p0 = Lr[0] + xd + NRD2;
Lr_p0[-1] = Lr_p0[D] = MAX_COST;
CostType* Lr_p = Lr[0] + xd;
-
+
const CostType* Cp = C + x*D;
-
+
#if CV_SSE2
if( useSIMD )
{
__m128i _P1 = _mm_set1_epi16((short)P1);
__m128i _delta0 = _mm_set1_epi16((short)delta0);
-
+
__m128i _minL0 = _mm_set1_epi16((short)minL0);
__m128i _minS = _mm_set1_epi16(MAX_COST), _bestDisp = _mm_set1_epi16(-1);
__m128i _d8 = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7), _8 = _mm_set1_epi16(8);
-
+
for( d = 0; d < D; d += 8 )
{
__m128i Cpd = _mm_load_si128((const __m128i*)(Cp + d)), L0;
-
+
L0 = _mm_load_si128((const __m128i*)(Lr_p0 + d));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d - 1)), _P1));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d + 1)), _P1));
L0 = _mm_min_epi16(L0, _delta0);
L0 = _mm_adds_epi16(_mm_subs_epi16(L0, _delta0), Cpd);
-
+
_mm_store_si128((__m128i*)(Lr_p + d), L0);
_minL0 = _mm_min_epi16(_minL0, L0);
L0 = _mm_adds_epi16(L0, *(__m128i*)(Sp + d));
_mm_store_si128((__m128i*)(Sp + d), L0);
-
+
__m128i mask = _mm_cmpgt_epi16(_minS, L0);
_minS = _mm_min_epi16(_minS, L0);
_bestDisp = _mm_xor_si128(_bestDisp, _mm_and_si128(_mm_xor_si128(_bestDisp,_d8), mask));
_d8 = _mm_adds_epi16(_d8, _8);
}
-
+
short CV_DECL_ALIGNED(16) bestDispBuf[8];
_mm_store_si128((__m128i*)bestDispBuf, _bestDisp);
-
+
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 8));
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 4));
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 2));
-
+
__m128i qS = _mm_min_epi16(_minS, _mm_srli_si128(_minS, 8));
qS = _mm_min_epi16(qS, _mm_srli_si128(qS, 4));
qS = _mm_min_epi16(qS, _mm_srli_si128(qS, 2));
-
+
minLr[0][xm] = (CostType)_mm_cvtsi128_si32(_minL0);
minS = (CostType)_mm_cvtsi128_si32(qS);
-
+
qS = _mm_shuffle_epi32(_mm_unpacklo_epi16(qS, qS), 0);
qS = _mm_cmpeq_epi16(_minS, qS);
int idx = _mm_movemask_epi8(_mm_packs_epi16(qS, qS)) & 255;
-
+
bestDisp = bestDispBuf[LSBTab[idx]];
}
else
for( d = 0; d < D; d++ )
{
int L0 = Cp[d] + min((int)Lr_p0[d], min(Lr_p0[d-1] + P1, min(Lr_p0[d+1] + P1, delta0))) - delta0;
-
+
Lr_p[d] = (CostType)L0;
minL0 = min(minL0, L0);
-
+
int Sval = Sp[d] = saturate_cast<CostType>(Sp[d] + L0);
if( Sval < minS )
{
}
}
}
-
+
for( d = 0; d < D; d++ )
{
if( Sp[d]*(100 - uniquenessRatio) < minS*100 && std::abs(bestDisp - d) > 1 )
if( d < D )
continue;
d = bestDisp;
- int x2 = x + minX1 - d - minD;
- if( disp2cost[x2] > minS )
+ int _x2 = x + minX1 - d - minD;
+ if( disp2cost[_x2] > minS )
{
- disp2cost[x2] = (CostType)minS;
- disp2ptr[x2] = (DispType)(d + minD);
+ disp2cost[_x2] = (CostType)minS;
+ disp2ptr[_x2] = (DispType)(d + minD);
}
-
+
if( 0 < d && d < D-1 )
{
// do subpixel quadratic interpolation:
d *= DISP_SCALE;
disp1ptr[x + minX1] = (DispType)(d + minD*DISP_SCALE);
}
-
+
for( x = minX1; x < maxX1; x++ )
{
// we round the computed disparity both towards -inf and +inf and check
// if either of the corresponding disparities in disp2 is consistent.
// This is to give the computed disparity a chance to look valid if it is.
- int d = disp1ptr[x];
- if( d == INVALID_DISP_SCALED )
+ int d1 = disp1ptr[x];
+ if( d1 == INVALID_DISP_SCALED )
continue;
- int _d = d >> DISP_SHIFT;
- int d_ = (d + DISP_SCALE-1) >> DISP_SHIFT;
+ int _d = d1 >> DISP_SHIFT;
+ int d_ = (d1 + DISP_SCALE-1) >> DISP_SHIFT;
int _x = x - _d, x_ = x - d_;
if( 0 <= _x && _x < width && disp2ptr[_x] >= minD && std::abs(disp2ptr[_x] - _d) > disp12MaxDiff &&
0 <= x_ && x_ < width && disp2ptr[x_] >= minD && std::abs(disp2ptr[x_] - d_) > disp12MaxDiff )
disp1ptr[x] = (DispType)INVALID_DISP_SCALED;
}
}
-
+
// now shift the cyclic buffers
std::swap( Lr[0], Lr[1] );
std::swap( minLr[0], minLr[1] );
Mat left = _left.getMat(), right = _right.getMat();
CV_Assert( left.size() == right.size() && left.type() == right.type() &&
left.depth() == DataType<PixType>::depth );
-
+
_disp.create( left.size(), CV_16S );
Mat disp = _disp.getMat();
-
+
computeDisparitySGBM( left, right, disp, *this, buffer );
medianBlur(disp, disp, 3);
-
+
if( speckleWindowSize > 0 )
filterSpeckles(disp, (minDisparity - 1)*DISP_SCALE, speckleWindowSize, DISP_SCALE*speckleRange, buffer);
}
{
int SW2 = SADWindowSize/2;
int minD = minDisparity, maxD = minDisparity + numberOfDisparities - 1;
-
+
int xmin = max(roi1.x, roi2.x + maxD) + SW2;
int xmax = min(roi1.x + roi1.width, roi2.x + roi2.width - minD) - SW2;
int ymin = max(roi1.y, roi2.y) + SW2;
int ymax = min(roi1.y + roi1.height, roi2.y + roi2.height) - SW2;
-
+
Rect r(xmin, ymin, xmax - xmin, ymax - ymin);
-
+
return r.width > 0 && r.height > 0 ? r : Rect();
-}
-
}
-
+
+}
+
void cv::filterSpeckles( InputOutputArray _img, double _newval, int maxSpeckleSize,
double _maxDiff, InputOutputArray __buf )
{
Mat img = _img.getMat();
Mat temp, &_buf = __buf.needed() ? __buf.getMatRef() : temp;
CV_Assert( img.type() == CV_16SC1 );
-
+
int newVal = cvRound(_newval);
int maxDiff = cvRound(_maxDiff);
int width = img.cols, height = img.rows, npixels = width*height;
size_t bufSize = npixels*(int)(sizeof(Point2s) + sizeof(int) + sizeof(uchar));
if( !_buf.isContinuous() || !_buf.data || _buf.cols*_buf.rows*_buf.elemSize() < bufSize )
_buf.create(1, (int)bufSize, CV_8U);
-
+
uchar* buf = _buf.data;
int i, j, dstep = (int)(img.step/sizeof(short));
int* labels = (int*)buf;
buf += npixels*sizeof(wbuf[0]);
uchar* rtype = (uchar*)buf;
int curlabel = 0;
-
+
// clear out label assignments
memset(labels, 0, npixels*sizeof(labels[0]));
-
+
for( i = 0; i < height; i++ )
{
short* ds = img.ptr<short>(i);
int* ls = labels + width*i;
-
+
for( j = 0; j < width; j++ )
{
- if( ds[j] != newVal ) // not a bad disparity
+ if( ds[j] != newVal ) // not a bad disparity
{
- if( ls[j] ) // has a label, check for bad label
- {
+ if( ls[j] ) // has a label, check for bad label
+ {
if( rtype[ls[j]] ) // small region, zero out disparity
ds[j] = (short)newVal;
}
// no label, assign and propagate
else
{
- Point2s* ws = wbuf; // initialize wavefront
- Point2s p((short)j, (short)i); // current pixel
- curlabel++; // next label
- int count = 0; // current region size
+ Point2s* ws = wbuf; // initialize wavefront
+ Point2s p((short)j, (short)i); // current pixel
+ curlabel++; // next label
+ int count = 0; // current region size
ls[j] = curlabel;
-
+
// wavefront propagation
while( ws >= wbuf ) // wavefront not empty
{
short* dpp = &img.at<short>(p.y, p.x);
short dp = *dpp;
int* lpp = labels + width*p.y + p.x;
-
+
if( p.x < width-1 && !lpp[+1] && dpp[+1] != newVal && std::abs(dp - dpp[+1]) <= maxDiff )
{
lpp[+1] = curlabel;
*ws++ = Point2s(p.x+1, p.y);
}
-
+
if( p.x > 0 && !lpp[-1] && dpp[-1] != newVal && std::abs(dp - dpp[-1]) <= maxDiff )
{
lpp[-1] = curlabel;
*ws++ = Point2s(p.x-1, p.y);
}
-
+
if( p.y < height-1 && !lpp[+width] && dpp[+dstep] != newVal && std::abs(dp - dpp[+dstep]) <= maxDiff )
{
lpp[+width] = curlabel;
*ws++ = Point2s(p.x, p.y+1);
}
-
+
if( p.y > 0 && !lpp[-width] && dpp[-dstep] != newVal && std::abs(dp - dpp[-dstep]) <= maxDiff )
{
lpp[-width] = curlabel;
*ws++ = Point2s(p.x, p.y-1);
}
-
+
// pop most recent and propagate
// NB: could try least recent, maybe better convergence
p = *--ws;
}
-
+
// assign label type
- if( count <= maxSpeckleSize ) // speckle region
+ if( count <= maxSpeckleSize ) // speckle region
{
- rtype[ls[j]] = 1; // small region label
+ rtype[ls[j]] = 1; // small region label
ds[j] = (short)newVal;
}
else
- rtype[ls[j]] = 0; // large region label
+ rtype[ls[j]] = 0; // large region label
}
}
}
}
-}
-
+}
+
void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDisparity,
int numberOfDisparities, int disp12MaxDiff )
{
const int DISP_SHIFT = 4, DISP_SCALE = 1 << DISP_SHIFT;
int INVALID_DISP = minD - 1, INVALID_DISP_SCALED = INVALID_DISP*DISP_SCALE;
int costType = cost.type();
-
+
disp12MaxDiff *= DISP_SCALE;
-
+
CV_Assert( numberOfDisparities > 0 && disp.type() == CV_16S &&
(costType == CV_16S || costType == CV_32S) &&
disp.size() == cost.size() );
-
+
for( int y = 0; y < rows; y++ )
{
short* dptr = disp.ptr<short>(y);
-
+
for( x = 0; x < cols; x++ )
{
disp2buf[x] = INVALID_DISP_SCALED;
disp2cost[x] = INT_MAX;
}
-
+
if( costType == CV_16S )
{
const short* cptr = cost.ptr<short>(y);
-
+
for( x = minX1; x < maxX1; x++ )
{
int d = dptr[x], c = cptr[x];
int x2 = x - ((d + DISP_SCALE/2) >> DISP_SHIFT);
-
+
if( disp2cost[x2] > c )
{
disp2cost[x2] = c;
else
{
const int* cptr = cost.ptr<int>(y);
-
+
for( x = minX1; x < maxX1; x++ )
{
int d = dptr[x], c = cptr[x];
int x2 = x - ((d + DISP_SCALE/2) >> DISP_SHIFT);
-
+
if( disp2cost[x2] < c )
{
disp2cost[x2] = c;
}
}
}
-
+
for( x = minX1; x < maxX1; x++ )
{
// we round the computed disparity both towards -inf and +inf and check
protected:
int compare(double* val, double* refVal, int len,
double eps, const char* paramName);
- virtual void calibrate( int imageCount, int* pointCounts,
- CvSize imageSize, CvPoint2D64f* imagePoints, CvPoint3D64f* objectPoints,
- double* distortionCoeffs, double* cameraMatrix, double* translationVectors,
- double* rotationMatrices, int flags ) = 0;
- virtual void project( int pointCount, CvPoint3D64f* objectPoints,
- double* rotationMatrix, double* translationVector,
- double* cameraMatrix, double* distortion, CvPoint2D64f* imagePoints ) = 0;
+ virtual void calibrate( int imageCount, int* pointCounts,
+ CvSize imageSize, CvPoint2D64f* imagePoints, CvPoint3D64f* objectPoints,
+ double* distortionCoeffs, double* cameraMatrix, double* translationVectors,
+ double* rotationMatrices, int flags ) = 0;
+ virtual void project( int pointCount, CvPoint3D64f* objectPoints,
+ double* rotationMatrix, double* translationVector,
+ double* cameraMatrix, double* distortion, CvPoint2D64f* imagePoints ) = 0;
void run(int);
};
void CV_CameraCalibrationTest::clear()
{
- cvtest::BaseTest::clear();
+ cvtest::BaseTest::clear();
}
int CV_CameraCalibrationTest::compare(double* val, double* ref_val, int len,
/* ---- Reproject points to the image ---- */
for( currImage = 0; currImage < numImages; currImage++ )
{
- int numPoints = etalonSize.width * etalonSize.height;
- project( numPoints,
- objectPoints + currImage * numPoints,
+ int nPoints = etalonSize.width * etalonSize.height;
+ project( nPoints,
+ objectPoints + currImage * nPoints,
rotMatrs + currImage * 9,
transVects + currImage * 3,
cameraMatrix,
distortion,
- reprojectPoints + currImage * numPoints);
+ reprojectPoints + currImage * nPoints);
}
/* ----- Compute reprojection error ----- */
class CV_CameraCalibrationTest_C : public CV_CameraCalibrationTest
{
public:
- CV_CameraCalibrationTest_C(){}
+ CV_CameraCalibrationTest_C(){}
protected:
- virtual void calibrate( int imageCount, int* pointCounts,
- CvSize imageSize, CvPoint2D64f* imagePoints, CvPoint3D64f* objectPoints,
- double* distortionCoeffs, double* cameraMatrix, double* translationVectors,
- double* rotationMatrices, int flags );
- virtual void project( int pointCount, CvPoint3D64f* objectPoints,
- double* rotationMatrix, double* translationVector,
- double* cameraMatrix, double* distortion, CvPoint2D64f* imagePoints );
+ virtual void calibrate( int imageCount, int* pointCounts,
+ CvSize imageSize, CvPoint2D64f* imagePoints, CvPoint3D64f* objectPoints,
+ double* distortionCoeffs, double* cameraMatrix, double* translationVectors,
+ double* rotationMatrices, int flags );
+ virtual void project( int pointCount, CvPoint3D64f* objectPoints,
+ double* rotationMatrix, double* translationVector,
+ double* cameraMatrix, double* distortion, CvPoint2D64f* imagePoints );
};
void CV_CameraCalibrationTest_C::calibrate( int imageCount, int* pointCounts,
- CvSize imageSize, CvPoint2D64f* imagePoints, CvPoint3D64f* objectPoints,
- double* distortionCoeffs, double* cameraMatrix, double* translationVectors,
- double* rotationMatrices, int flags )
+ CvSize imageSize, CvPoint2D64f* imagePoints, CvPoint3D64f* objectPoints,
+ double* distortionCoeffs, double* cameraMatrix, double* translationVectors,
+ double* rotationMatrices, int flags )
{
int i, total = 0;
for( i = 0; i < imageCount; i++ )
total += pointCounts[i];
-
+
CvMat _objectPoints = cvMat(1, total, CV_64FC3, objectPoints);
CvMat _imagePoints = cvMat(1, total, CV_64FC2, imagePoints);
CvMat _pointCounts = cvMat(1, imageCount, CV_32S, pointCounts);
CvMat _distCoeffs = cvMat(4, 1, CV_64F, distortionCoeffs);
CvMat _rotationMatrices = cvMat(imageCount, 9, CV_64F, rotationMatrices);
CvMat _translationVectors = cvMat(imageCount, 3, CV_64F, translationVectors);
-
+
cvCalibrateCamera2(&_objectPoints, &_imagePoints, &_pointCounts, imageSize,
&_cameraMatrix, &_distCoeffs, &_rotationMatrices, &_translationVectors,
flags);
}
void CV_CameraCalibrationTest_C::project( int pointCount, CvPoint3D64f* objectPoints,
- double* rotationMatrix, double* translationVector,
- double* cameraMatrix, double* distortion, CvPoint2D64f* imagePoints )
+ double* rotationMatrix, double* translationVector,
+ double* cameraMatrix, double* distortion, CvPoint2D64f* imagePoints )
{
- CvMat _objectPoints = cvMat(1, pointCount, CV_64FC3, objectPoints);
+ CvMat _objectPoints = cvMat(1, pointCount, CV_64FC3, objectPoints);
CvMat _imagePoints = cvMat(1, pointCount, CV_64FC2, imagePoints);
CvMat _cameraMatrix = cvMat(3, 3, CV_64F, cameraMatrix);
CvMat _distCoeffs = cvMat(4, 1, CV_64F, distortion);
CvMat _rotationMatrix = cvMat(3, 3, CV_64F, rotationMatrix);
CvMat _translationVector = cvMat(1, 3, CV_64F, translationVector);
-
+
cvProjectPoints2(&_objectPoints, &_rotationMatrix, &_translationVector, &_cameraMatrix, &_distCoeffs, &_imagePoints);
}
class CV_CameraCalibrationTest_CPP : public CV_CameraCalibrationTest
{
public:
- CV_CameraCalibrationTest_CPP(){}
+ CV_CameraCalibrationTest_CPP(){}
protected:
- virtual void calibrate( int imageCount, int* pointCounts,
- CvSize imageSize, CvPoint2D64f* imagePoints, CvPoint3D64f* objectPoints,
- double* distortionCoeffs, double* cameraMatrix, double* translationVectors,
- double* rotationMatrices, int flags );
- virtual void project( int pointCount, CvPoint3D64f* objectPoints,
- double* rotationMatrix, double* translationVector,
- double* cameraMatrix, double* distortion, CvPoint2D64f* imagePoints );
+ virtual void calibrate( int imageCount, int* pointCounts,
+ CvSize imageSize, CvPoint2D64f* imagePoints, CvPoint3D64f* objectPoints,
+ double* distortionCoeffs, double* cameraMatrix, double* translationVectors,
+ double* rotationMatrices, int flags );
+ virtual void project( int pointCount, CvPoint3D64f* objectPoints,
+ double* rotationMatrix, double* translationVector,
+ double* cameraMatrix, double* distortion, CvPoint2D64f* imagePoints );
};
void CV_CameraCalibrationTest_CPP::calibrate( int imageCount, int* pointCounts,
- CvSize _imageSize, CvPoint2D64f* _imagePoints, CvPoint3D64f* _objectPoints,
- double* _distortionCoeffs, double* _cameraMatrix, double* translationVectors,
- double* rotationMatrices, int flags )
+ CvSize _imageSize, CvPoint2D64f* _imagePoints, CvPoint3D64f* _objectPoints,
+ double* _distortionCoeffs, double* _cameraMatrix, double* translationVectors,
+ double* rotationMatrices, int flags )
{
- vector<vector<Point3f> > objectPoints( imageCount );
- vector<vector<Point2f> > imagePoints( imageCount );
- Size imageSize = _imageSize;
- Mat cameraMatrix, distCoeffs(1,4,CV_64F,Scalar::all(0));
- vector<Mat> rvecs, tvecs;
-
- CvPoint3D64f* op = _objectPoints;
- CvPoint2D64f* ip = _imagePoints;
- vector<vector<Point3f> >::iterator objectPointsIt = objectPoints.begin();
- vector<vector<Point2f> >::iterator imagePointsIt = imagePoints.begin();
- for( int i = 0; i < imageCount; ++objectPointsIt, ++imagePointsIt, i++ )
- {
- int num = pointCounts[i];
- objectPointsIt->resize( num );
- imagePointsIt->resize( num );
- vector<Point3f>::iterator oIt = objectPointsIt->begin();
- vector<Point2f>::iterator iIt = imagePointsIt->begin();
- for( int j = 0; j < num; ++oIt, ++iIt, j++, op++, ip++)
- {
- oIt->x = (float)op->x, oIt->y = (float)op->y, oIt->z = (float)op->z;
- iIt->x = (float)ip->x, iIt->y = (float)ip->y;
- }
- }
-
- calibrateCamera( objectPoints,
- imagePoints,
- imageSize,
- cameraMatrix,
- distCoeffs,
- rvecs,
- tvecs,
- flags );
-
- assert( cameraMatrix.type() == CV_64FC1 );
- memcpy( _cameraMatrix, cameraMatrix.data, 9*sizeof(double) );
-
- assert( cameraMatrix.type() == CV_64FC1 );
- memcpy( _distortionCoeffs, distCoeffs.data, 4*sizeof(double) );
-
- vector<Mat>::iterator rvecsIt = rvecs.begin();
- vector<Mat>::iterator tvecsIt = tvecs.begin();
- double *rm = rotationMatrices,
- *tm = translationVectors;
- assert( rvecsIt->type() == CV_64FC1 );
- assert( tvecsIt->type() == CV_64FC1 );
- for( int i = 0; i < imageCount; ++rvecsIt, ++tvecsIt, i++, rm+=9, tm+=3 )
- {
- Mat r9( 3, 3, CV_64FC1 );
- Rodrigues( *rvecsIt, r9 );
- memcpy( rm, r9.data, 9*sizeof(double) );
- memcpy( tm, tvecsIt->data, 3*sizeof(double) );
- }
+ vector<vector<Point3f> > objectPoints( imageCount );
+ vector<vector<Point2f> > imagePoints( imageCount );
+ Size imageSize = _imageSize;
+ Mat cameraMatrix, distCoeffs(1,4,CV_64F,Scalar::all(0));
+ vector<Mat> rvecs, tvecs;
+
+ CvPoint3D64f* op = _objectPoints;
+ CvPoint2D64f* ip = _imagePoints;
+ vector<vector<Point3f> >::iterator objectPointsIt = objectPoints.begin();
+ vector<vector<Point2f> >::iterator imagePointsIt = imagePoints.begin();
+ for( int i = 0; i < imageCount; ++objectPointsIt, ++imagePointsIt, i++ )
+ {
+ int num = pointCounts[i];
+ objectPointsIt->resize( num );
+ imagePointsIt->resize( num );
+ vector<Point3f>::iterator oIt = objectPointsIt->begin();
+ vector<Point2f>::iterator iIt = imagePointsIt->begin();
+ for( int j = 0; j < num; ++oIt, ++iIt, j++, op++, ip++)
+ {
+ oIt->x = (float)op->x, oIt->y = (float)op->y, oIt->z = (float)op->z;
+ iIt->x = (float)ip->x, iIt->y = (float)ip->y;
+ }
+ }
+
+ calibrateCamera( objectPoints,
+ imagePoints,
+ imageSize,
+ cameraMatrix,
+ distCoeffs,
+ rvecs,
+ tvecs,
+ flags );
+
+ assert( cameraMatrix.type() == CV_64FC1 );
+ memcpy( _cameraMatrix, cameraMatrix.data, 9*sizeof(double) );
+
+ assert( cameraMatrix.type() == CV_64FC1 );
+ memcpy( _distortionCoeffs, distCoeffs.data, 4*sizeof(double) );
+
+ vector<Mat>::iterator rvecsIt = rvecs.begin();
+ vector<Mat>::iterator tvecsIt = tvecs.begin();
+ double *rm = rotationMatrices,
+ *tm = translationVectors;
+ assert( rvecsIt->type() == CV_64FC1 );
+ assert( tvecsIt->type() == CV_64FC1 );
+ for( int i = 0; i < imageCount; ++rvecsIt, ++tvecsIt, i++, rm+=9, tm+=3 )
+ {
+ Mat r9( 3, 3, CV_64FC1 );
+ Rodrigues( *rvecsIt, r9 );
+ memcpy( rm, r9.data, 9*sizeof(double) );
+ memcpy( tm, tvecsIt->data, 3*sizeof(double) );
+ }
}
void CV_CameraCalibrationTest_CPP::project( int pointCount, CvPoint3D64f* _objectPoints,
- double* rotationMatrix, double* translationVector,
- double* _cameraMatrix, double* distortion, CvPoint2D64f* _imagePoints )
+ double* rotationMatrix, double* translationVector,
+ double* _cameraMatrix, double* distortion, CvPoint2D64f* _imagePoints )
{
- Mat objectPoints( pointCount, 3, CV_64FC1, _objectPoints );
- Mat rmat( 3, 3, CV_64FC1, rotationMatrix ),
- rvec( 1, 3, CV_64FC1 ),
- tvec( 1, 3, CV_64FC1, translationVector );
- Mat cameraMatrix( 3, 3, CV_64FC1, _cameraMatrix );
- Mat distCoeffs( 1, 4, CV_64FC1, distortion );
- vector<Point2f> imagePoints;
- Rodrigues( rmat, rvec );
-
- objectPoints.convertTo( objectPoints, CV_32FC1 );
- projectPoints( objectPoints, rvec, tvec,
- cameraMatrix, distCoeffs, imagePoints );
- vector<Point2f>::const_iterator it = imagePoints.begin();
- for( int i = 0; it != imagePoints.end(); ++it, i++ )
- {
- _imagePoints[i] = cvPoint2D64f( it->x, it->y );
- }
+ Mat objectPoints( pointCount, 3, CV_64FC1, _objectPoints );
+ Mat rmat( 3, 3, CV_64FC1, rotationMatrix ),
+ rvec( 1, 3, CV_64FC1 ),
+ tvec( 1, 3, CV_64FC1, translationVector );
+ Mat cameraMatrix( 3, 3, CV_64FC1, _cameraMatrix );
+ Mat distCoeffs( 1, 4, CV_64FC1, distortion );
+ vector<Point2f> imagePoints;
+ Rodrigues( rmat, rvec );
+
+ objectPoints.convertTo( objectPoints, CV_32FC1 );
+ projectPoints( objectPoints, rvec, tvec,
+ cameraMatrix, distCoeffs, imagePoints );
+ vector<Point2f>::const_iterator it = imagePoints.begin();
+ for( int i = 0; it != imagePoints.end(); ++it, i++ )
+ {
+ _imagePoints[i] = cvPoint2D64f( it->x, it->y );
+ }
}
class CV_CalibrationMatrixValuesTest : public cvtest::BaseTest
{
public:
- CV_CalibrationMatrixValuesTest() {}
+ CV_CalibrationMatrixValuesTest() {}
protected:
- void run(int);
- virtual void calibMatrixValues( const Mat& cameraMatrix, Size imageSize,
- double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength,
- Point2d& principalPoint, double& aspectRatio ) = 0;
+ void run(int);
+ virtual void calibMatrixValues( const Mat& cameraMatrix, Size imageSize,
+ double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength,
+ Point2d& principalPoint, double& aspectRatio ) = 0;
};
void CV_CalibrationMatrixValuesTest::run(int)
{
- int code = cvtest::TS::OK;
- const double fcMinVal = 1e-5;
- const double fcMaxVal = 1000;
- const double apertureMaxVal = 0.01;
-
- RNG rng = ts->get_rng();
-
- double fx, fy, cx, cy, nx, ny;
- Mat cameraMatrix( 3, 3, CV_64FC1 );
- cameraMatrix.setTo( Scalar(0) );
- fx = cameraMatrix.at<double>(0,0) = rng.uniform( fcMinVal, fcMaxVal );
- fy = cameraMatrix.at<double>(1,1) = rng.uniform( fcMinVal, fcMaxVal );
- cx = cameraMatrix.at<double>(0,2) = rng.uniform( fcMinVal, fcMaxVal );
- cy = cameraMatrix.at<double>(1,2) = rng.uniform( fcMinVal, fcMaxVal );
- cameraMatrix.at<double>(2,2) = 1;
-
- Size imageSize( 600, 400 );
-
- double apertureWidth = (double)rng * apertureMaxVal,
- apertureHeight = (double)rng * apertureMaxVal;
-
- double fovx, fovy, focalLength, aspectRatio,
- goodFovx, goodFovy, goodFocalLength, goodAspectRatio;
- Point2d principalPoint, goodPrincipalPoint;
-
-
- calibMatrixValues( cameraMatrix, imageSize, apertureWidth, apertureHeight,
- fovx, fovy, focalLength, principalPoint, aspectRatio );
-
- // calculate calibration matrix values
- goodAspectRatio = fy / fx;
-
- if( apertureWidth != 0.0 && apertureHeight != 0.0 )
- {
- nx = imageSize.width / apertureWidth;
- ny = imageSize.height / apertureHeight;
- }
- else
- {
- nx = 1.0;
- ny = goodAspectRatio;
- }
-
- goodFovx = 2 * atan( imageSize.width / (2 * fx)) * 180.0 / CV_PI;
- goodFovy = 2 * atan( imageSize.height / (2 * fy)) * 180.0 / CV_PI;
-
- goodFocalLength = fx / nx;
-
- goodPrincipalPoint.x = cx / nx;
- goodPrincipalPoint.y = cy / ny;
-
- // check results
- if( fabs(fovx - goodFovx) > FLT_EPSILON )
- {
- ts->printf( cvtest::TS::LOG, "bad fovx (real=%f, good = %f\n", fovx, goodFovx );
- code = cvtest::TS::FAIL_BAD_ACCURACY;
- goto _exit_;
- }
- if( fabs(fovy - goodFovy) > FLT_EPSILON )
- {
- ts->printf( cvtest::TS::LOG, "bad fovy (real=%f, good = %f\n", fovy, goodFovy );
- code = cvtest::TS::FAIL_BAD_ACCURACY;
- goto _exit_;
- }
- if( fabs(focalLength - goodFocalLength) > FLT_EPSILON )
- {
- ts->printf( cvtest::TS::LOG, "bad focalLength (real=%f, good = %f\n", focalLength, goodFocalLength );
- code = cvtest::TS::FAIL_BAD_ACCURACY;
- goto _exit_;
- }
- if( fabs(aspectRatio - goodAspectRatio) > FLT_EPSILON )
- {
- ts->printf( cvtest::TS::LOG, "bad aspectRatio (real=%f, good = %f\n", aspectRatio, goodAspectRatio );
- code = cvtest::TS::FAIL_BAD_ACCURACY;
- goto _exit_;
- }
- if( norm( principalPoint - goodPrincipalPoint ) > FLT_EPSILON )
- {
- ts->printf( cvtest::TS::LOG, "bad principalPoint\n" );
- code = cvtest::TS::FAIL_BAD_ACCURACY;
- goto _exit_;
- }
+ int code = cvtest::TS::OK;
+ const double fcMinVal = 1e-5;
+ const double fcMaxVal = 1000;
+ const double apertureMaxVal = 0.01;
+
+ RNG rng = ts->get_rng();
+
+ double fx, fy, cx, cy, nx, ny;
+ Mat cameraMatrix( 3, 3, CV_64FC1 );
+ cameraMatrix.setTo( Scalar(0) );
+ fx = cameraMatrix.at<double>(0,0) = rng.uniform( fcMinVal, fcMaxVal );
+ fy = cameraMatrix.at<double>(1,1) = rng.uniform( fcMinVal, fcMaxVal );
+ cx = cameraMatrix.at<double>(0,2) = rng.uniform( fcMinVal, fcMaxVal );
+ cy = cameraMatrix.at<double>(1,2) = rng.uniform( fcMinVal, fcMaxVal );
+ cameraMatrix.at<double>(2,2) = 1;
+
+ Size imageSize( 600, 400 );
+
+ double apertureWidth = (double)rng * apertureMaxVal,
+ apertureHeight = (double)rng * apertureMaxVal;
+
+ double fovx, fovy, focalLength, aspectRatio,
+ goodFovx, goodFovy, goodFocalLength, goodAspectRatio;
+ Point2d principalPoint, goodPrincipalPoint;
+
+
+ calibMatrixValues( cameraMatrix, imageSize, apertureWidth, apertureHeight,
+ fovx, fovy, focalLength, principalPoint, aspectRatio );
+
+ // calculate calibration matrix values
+ goodAspectRatio = fy / fx;
+
+ if( apertureWidth != 0.0 && apertureHeight != 0.0 )
+ {
+ nx = imageSize.width / apertureWidth;
+ ny = imageSize.height / apertureHeight;
+ }
+ else
+ {
+ nx = 1.0;
+ ny = goodAspectRatio;
+ }
+
+ goodFovx = 2 * atan( imageSize.width / (2 * fx)) * 180.0 / CV_PI;
+ goodFovy = 2 * atan( imageSize.height / (2 * fy)) * 180.0 / CV_PI;
+
+ goodFocalLength = fx / nx;
+
+ goodPrincipalPoint.x = cx / nx;
+ goodPrincipalPoint.y = cy / ny;
+
+ // check results
+ if( fabs(fovx - goodFovx) > FLT_EPSILON )
+ {
+ ts->printf( cvtest::TS::LOG, "bad fovx (real=%f, good = %f\n", fovx, goodFovx );
+ code = cvtest::TS::FAIL_BAD_ACCURACY;
+ goto _exit_;
+ }
+ if( fabs(fovy - goodFovy) > FLT_EPSILON )
+ {
+ ts->printf( cvtest::TS::LOG, "bad fovy (real=%f, good = %f\n", fovy, goodFovy );
+ code = cvtest::TS::FAIL_BAD_ACCURACY;
+ goto _exit_;
+ }
+ if( fabs(focalLength - goodFocalLength) > FLT_EPSILON )
+ {
+ ts->printf( cvtest::TS::LOG, "bad focalLength (real=%f, good = %f\n", focalLength, goodFocalLength );
+ code = cvtest::TS::FAIL_BAD_ACCURACY;
+ goto _exit_;
+ }
+ if( fabs(aspectRatio - goodAspectRatio) > FLT_EPSILON )
+ {
+ ts->printf( cvtest::TS::LOG, "bad aspectRatio (real=%f, good = %f\n", aspectRatio, goodAspectRatio );
+ code = cvtest::TS::FAIL_BAD_ACCURACY;
+ goto _exit_;
+ }
+ if( norm( principalPoint - goodPrincipalPoint ) > FLT_EPSILON )
+ {
+ ts->printf( cvtest::TS::LOG, "bad principalPoint\n" );
+ code = cvtest::TS::FAIL_BAD_ACCURACY;
+ goto _exit_;
+ }
_exit_:
- RNG& _rng = ts->get_rng();
- _rng = rng;
- ts->set_failed_test_info( code );
+ RNG& _rng = ts->get_rng();
+ _rng = rng;
+ ts->set_failed_test_info( code );
}
//----------------------------------------- CV_CalibrationMatrixValuesTest_C --------------------------------
class CV_CalibrationMatrixValuesTest_C : public CV_CalibrationMatrixValuesTest
{
public:
- CV_CalibrationMatrixValuesTest_C(){}
+ CV_CalibrationMatrixValuesTest_C(){}
protected:
- virtual void calibMatrixValues( const Mat& cameraMatrix, Size imageSize,
- double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength,
- Point2d& principalPoint, double& aspectRatio );
+ virtual void calibMatrixValues( const Mat& cameraMatrix, Size imageSize,
+ double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength,
+ Point2d& principalPoint, double& aspectRatio );
};
void CV_CalibrationMatrixValuesTest_C::calibMatrixValues( const Mat& _cameraMatrix, Size imageSize,
- double apertureWidth, double apertureHeight,
- double& fovx, double& fovy, double& focalLength,
- Point2d& principalPoint, double& aspectRatio )
+ double apertureWidth, double apertureHeight,
+ double& fovx, double& fovy, double& focalLength,
+ Point2d& principalPoint, double& aspectRatio )
{
- CvMat cameraMatrix = _cameraMatrix;
- CvPoint2D64f pp;
- cvCalibrationMatrixValues( &cameraMatrix, imageSize, apertureWidth, apertureHeight,
- &fovx, &fovy, &focalLength, &pp, &aspectRatio );
- principalPoint.x = pp.x;
- principalPoint.y = pp.y;
+ CvMat cameraMatrix = _cameraMatrix;
+ CvPoint2D64f pp;
+ cvCalibrationMatrixValues( &cameraMatrix, imageSize, apertureWidth, apertureHeight,
+ &fovx, &fovy, &focalLength, &pp, &aspectRatio );
+ principalPoint.x = pp.x;
+ principalPoint.y = pp.y;
}
class CV_CalibrationMatrixValuesTest_CPP : public CV_CalibrationMatrixValuesTest
{
public:
- CV_CalibrationMatrixValuesTest_CPP() {}
+ CV_CalibrationMatrixValuesTest_CPP() {}
protected:
- virtual void calibMatrixValues( const Mat& cameraMatrix, Size imageSize,
- double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength,
- Point2d& principalPoint, double& aspectRatio );
+ virtual void calibMatrixValues( const Mat& cameraMatrix, Size imageSize,
+ double apertureWidth, double apertureHeight, double& fovx, double& fovy, double& focalLength,
+ Point2d& principalPoint, double& aspectRatio );
};
void CV_CalibrationMatrixValuesTest_CPP::calibMatrixValues( const Mat& cameraMatrix, Size imageSize,
- double apertureWidth, double apertureHeight,
- double& fovx, double& fovy, double& focalLength,
- Point2d& principalPoint, double& aspectRatio )
+ double apertureWidth, double apertureHeight,
+ double& fovx, double& fovy, double& focalLength,
+ Point2d& principalPoint, double& aspectRatio )
{
- calibrationMatrixValues( cameraMatrix, imageSize, apertureWidth, apertureHeight,
- fovx, fovy, focalLength, principalPoint, aspectRatio );
+ calibrationMatrixValues( cameraMatrix, imageSize, apertureWidth, apertureHeight,
+ fovx, fovy, focalLength, principalPoint, aspectRatio );
}
void calcdfdx( const vector<vector<Point2f> >& leftF, const vector<vector<Point2f> >& rightF, double eps, Mat& dfdx )
{
const int fdim = 2;
- CV_Assert( !leftF.empty() && !rightF.empty() && !leftF[0].empty() && !rightF[0].empty() );
- CV_Assert( leftF[0].size() == rightF[0].size() );
- CV_Assert( fabs(eps) > std::numeric_limits<double>::epsilon() );
- int fcount = (int)leftF[0].size(), xdim = (int)leftF.size();
+ CV_Assert( !leftF.empty() && !rightF.empty() && !leftF[0].empty() && !rightF[0].empty() );
+ CV_Assert( leftF[0].size() == rightF[0].size() );
+ CV_Assert( fabs(eps) > std::numeric_limits<double>::epsilon() );
+ int fcount = (int)leftF[0].size(), xdim = (int)leftF.size();
- dfdx.create( fcount*fdim, xdim, CV_64FC1 );
+ dfdx.create( fcount*fdim, xdim, CV_64FC1 );
- vector<vector<Point2f> >::const_iterator arrLeftIt = leftF.begin();
- vector<vector<Point2f> >::const_iterator arrRightIt = rightF.begin();
- for( int xi = 0; xi < xdim; xi++, ++arrLeftIt, ++arrRightIt )
- {
+ vector<vector<Point2f> >::const_iterator arrLeftIt = leftF.begin();
+ vector<vector<Point2f> >::const_iterator arrRightIt = rightF.begin();
+ for( int xi = 0; xi < xdim; xi++, ++arrLeftIt, ++arrRightIt )
+ {
CV_Assert( (int)arrLeftIt->size() == fcount );
CV_Assert( (int)arrRightIt->size() == fcount );
vector<Point2f>::const_iterator lIt = arrLeftIt->begin();
for( int fi = 0; fi < dfdx.rows; fi+=fdim, ++lIt, ++rIt )
{
dfdx.at<double>(fi, xi ) = 0.5 * ((double)(rIt->x - lIt->x)) / eps;
- dfdx.at<double>(fi+1, xi ) = 0.5 * ((double)(rIt->y - lIt->y)) / eps;
- }
- }
+ dfdx.at<double>(fi+1, xi ) = 0.5 * ((double)(rIt->y - lIt->y)) / eps;
+ }
+ }
}
class CV_ProjectPointsTest : public cvtest::BaseTest
{
public:
- CV_ProjectPointsTest() {}
+ CV_ProjectPointsTest() {}
protected:
- void run(int);
- virtual void project( const Mat& objectPoints,
- const Mat& rvec, const Mat& tvec,
- const Mat& cameraMatrix,
- const Mat& distCoeffs,
- vector<Point2f>& imagePoints,
- Mat& dpdrot, Mat& dpdt, Mat& dpdf,
- Mat& dpdc, Mat& dpddist,
- double aspectRatio=0 ) = 0;
+ void run(int);
+ virtual void project( const Mat& objectPoints,
+ const Mat& rvec, const Mat& tvec,
+ const Mat& cameraMatrix,
+ const Mat& distCoeffs,
+ vector<Point2f>& imagePoints,
+ Mat& dpdrot, Mat& dpdt, Mat& dpdf,
+ Mat& dpdc, Mat& dpddist,
+ double aspectRatio=0 ) = 0;
};
void CV_ProjectPointsTest::run(int)
{
//typedef float matType;
- int code = cvtest::TS::OK;
- const int pointCount = 100;
+ int code = cvtest::TS::OK;
+ const int pointCount = 100;
- const float zMinVal = 10.0f, zMaxVal = 100.0f,
+ const float zMinVal = 10.0f, zMaxVal = 100.0f,
rMinVal = -0.3f, rMaxVal = 0.3f,
- tMinVal = -2.0f, tMaxVal = 2.0f;
+ tMinVal = -2.0f, tMaxVal = 2.0f;
const float imgPointErr = 1e-3f,
dEps = 1e-3f;
-
+
double err;
Size imgSize( 600, 800 );
Mat_<float> objPoints( pointCount, 3), rvec( 1, 3), rmat, tvec( 1, 3 ), cameraMatrix( 3, 3 ), distCoeffs( 1, 4 ),
leftRvec, rightRvec, leftTvec, rightTvec, leftCameraMatrix, rightCameraMatrix, leftDistCoeffs, rightDistCoeffs;
- RNG rng = ts->get_rng();
+ RNG rng = ts->get_rng();
- // generate data
- cameraMatrix << 300.f, 0.f, imgSize.width/2.f,
+ // generate data
+ cameraMatrix << 300.f, 0.f, imgSize.width/2.f,
0.f, 300.f, imgSize.height/2.f,
0.f, 0.f, 1.f;
- distCoeffs << 0.1, 0.01, 0.001, 0.001;
+ distCoeffs << 0.1, 0.01, 0.001, 0.001;
- rvec(0,0) = rng.uniform( rMinVal, rMaxVal );
- rvec(0,1) = rng.uniform( rMinVal, rMaxVal );
- rvec(0,2) = rng.uniform( rMinVal, rMaxVal );
- Rodrigues( rvec, rmat );
+ rvec(0,0) = rng.uniform( rMinVal, rMaxVal );
+ rvec(0,1) = rng.uniform( rMinVal, rMaxVal );
+ rvec(0,2) = rng.uniform( rMinVal, rMaxVal );
+ Rodrigues( rvec, rmat );
- tvec(0,0) = rng.uniform( tMinVal, tMaxVal );
- tvec(0,1) = rng.uniform( tMinVal, tMaxVal );
- tvec(0,2) = rng.uniform( tMinVal, tMaxVal );
+ tvec(0,0) = rng.uniform( tMinVal, tMaxVal );
+ tvec(0,1) = rng.uniform( tMinVal, tMaxVal );
+ tvec(0,2) = rng.uniform( tMinVal, tMaxVal );
for( int y = 0; y < objPoints.rows; y++ )
- {
- Mat point(1, 3, CV_32FC1, objPoints.ptr(y) );
- float z = rng.uniform( zMinVal, zMaxVal );
- point.at<float>(0,2) = z;
+ {
+ Mat point(1, 3, CV_32FC1, objPoints.ptr(y) );
+ float z = rng.uniform( zMinVal, zMaxVal );
+ point.at<float>(0,2) = z;
point.at<float>(0,0) = (rng.uniform(2.f,(float)(imgSize.width-2)) - cameraMatrix(0,2)) / cameraMatrix(0,0) * z;
point.at<float>(0,1) = (rng.uniform(2.f,(float)(imgSize.height-2)) - cameraMatrix(1,2)) / cameraMatrix(1,1) * z;
point = (point - tvec) * rmat;
- }
+ }
- vector<Point2f> imgPoints;
- vector<vector<Point2f> > leftImgPoints;
- vector<vector<Point2f> > rightImgPoints;
- Mat dpdrot, dpdt, dpdf, dpdc, dpddist,
- valDpdrot, valDpdt, valDpdf, valDpdc, valDpddist;
+ vector<Point2f> imgPoints;
+ vector<vector<Point2f> > leftImgPoints;
+ vector<vector<Point2f> > rightImgPoints;
+ Mat dpdrot, dpdt, dpdf, dpdc, dpddist,
+ valDpdrot, valDpdt, valDpdf, valDpdc, valDpddist;
- project( objPoints, rvec, tvec, cameraMatrix, distCoeffs,
- imgPoints, dpdrot, dpdt, dpdf, dpdc, dpddist, 0 );
+ project( objPoints, rvec, tvec, cameraMatrix, distCoeffs,
+ imgPoints, dpdrot, dpdt, dpdf, dpdc, dpddist, 0 );
// calculate and check image points
assert( (int)imgPoints.size() == pointCount );
- vector<Point2f>::const_iterator it = imgPoints.begin();
- for( int i = 0; i < pointCount; i++, ++it )
- {
- Point3d p( objPoints(i,0), objPoints(i,1), objPoints(i,2) );
- double z = p.x*rmat(2,0) + p.y*rmat(2,1) + p.z*rmat(2,2) + tvec(0,2),
+ vector<Point2f>::const_iterator it = imgPoints.begin();
+ for( int i = 0; i < pointCount; i++, ++it )
+ {
+ Point3d p( objPoints(i,0), objPoints(i,1), objPoints(i,2) );
+ double z = p.x*rmat(2,0) + p.y*rmat(2,1) + p.z*rmat(2,2) + tvec(0,2),
x = (p.x*rmat(0,0) + p.y*rmat(0,1) + p.z*rmat(0,2) + tvec(0,0)) / z,
y = (p.x*rmat(1,0) + p.y*rmat(1,1) + p.z*rmat(1,2) + tvec(0,1)) / z,
r2 = x*x + y*y,
- r4 = r2*r2;
- Point2f validImgPoint;
- double a1 = 2*x*y,
+ r4 = r2*r2;
+ Point2f validImgPoint;
+ double a1 = 2*x*y,
a2 = r2 + 2*x*x,
a3 = r2 + 2*y*y,
cdist = 1+distCoeffs(0,0)*r2+distCoeffs(0,1)*r4;
- validImgPoint.x = static_cast<float>((double)cameraMatrix(0,0)*(x*cdist + (double)distCoeffs(0,2)*a1 + (double)distCoeffs(0,3)*a2)
+ validImgPoint.x = static_cast<float>((double)cameraMatrix(0,0)*(x*cdist + (double)distCoeffs(0,2)*a1 + (double)distCoeffs(0,3)*a2)
+ (double)cameraMatrix(0,2));
- validImgPoint.y = static_cast<float>((double)cameraMatrix(1,1)*(y*cdist + (double)distCoeffs(0,2)*a3 + distCoeffs(0,3)*a1)
+ validImgPoint.y = static_cast<float>((double)cameraMatrix(1,1)*(y*cdist + (double)distCoeffs(0,2)*a3 + distCoeffs(0,3)*a1)
+ (double)cameraMatrix(1,2));
if( fabs(it->x - validImgPoint.x) > imgPointErr ||
fabs(it->y - validImgPoint.y) > imgPointErr )
- {
- ts->printf( cvtest::TS::LOG, "bad image point\n" );
- code = cvtest::TS::FAIL_BAD_ACCURACY;
- goto _exit_;
- }
- }
-
- // check derivatives
- // 1. rotation
- leftImgPoints.resize(3);
+ {
+ ts->printf( cvtest::TS::LOG, "bad image point\n" );
+ code = cvtest::TS::FAIL_BAD_ACCURACY;
+ goto _exit_;
+ }
+ }
+
+ // check derivatives
+ // 1. rotation
+ leftImgPoints.resize(3);
rightImgPoints.resize(3);
- for( int i = 0; i < 3; i++ )
- {
+ for( int i = 0; i < 3; i++ )
+ {
rvec.copyTo( leftRvec ); leftRvec(0,i) -= dEps;
project( objPoints, leftRvec, tvec, cameraMatrix, distCoeffs,
leftImgPoints[i], valDpdrot, valDpdt, valDpdf, valDpdc, valDpddist, 0 );
rvec.copyTo( rightRvec ); rightRvec(0,i) += dEps;
project( objPoints, rightRvec, tvec, cameraMatrix, distCoeffs,
rightImgPoints[i], valDpdrot, valDpdt, valDpdf, valDpdc, valDpddist, 0 );
- }
+ }
calcdfdx( leftImgPoints, rightImgPoints, dEps, valDpdrot );
err = norm( dpdrot, valDpdrot, NORM_INF );
if( err > 3 )
- {
- ts->printf( cvtest::TS::LOG, "bad dpdrot: too big difference = %g\n", err );
- code = cvtest::TS::FAIL_BAD_ACCURACY;
- }
+ {
+ ts->printf( cvtest::TS::LOG, "bad dpdrot: too big difference = %g\n", err );
+ code = cvtest::TS::FAIL_BAD_ACCURACY;
+ }
// 2. translation
for( int i = 0; i < 3; i++ )
- {
+ {
tvec.copyTo( leftTvec ); leftTvec(0,i) -= dEps;
project( objPoints, rvec, leftTvec, cameraMatrix, distCoeffs,
leftImgPoints[i], valDpdrot, valDpdt, valDpdf, valDpdc, valDpddist, 0 );
tvec.copyTo( rightTvec ); rightTvec(0,i) += dEps;
project( objPoints, rvec, rightTvec, cameraMatrix, distCoeffs,
rightImgPoints[i], valDpdrot, valDpdt, valDpdf, valDpdc, valDpddist, 0 );
- }
+ }
calcdfdx( leftImgPoints, rightImgPoints, dEps, valDpdt );
if( norm( dpdt, valDpdt, NORM_INF ) > 0.2 )
- {
- ts->printf( cvtest::TS::LOG, "bad dpdtvec\n" );
- code = cvtest::TS::FAIL_BAD_ACCURACY;
- }
+ {
+ ts->printf( cvtest::TS::LOG, "bad dpdtvec\n" );
+ code = cvtest::TS::FAIL_BAD_ACCURACY;
+ }
// 3. camera matrix
// 3.1. focus
// 4. distortion
leftImgPoints.resize(distCoeffs.cols);
rightImgPoints.resize(distCoeffs.cols);
- for( int i = 0; i < distCoeffs.cols; i++ )
- {
+ for( int i = 0; i < distCoeffs.cols; i++ )
+ {
distCoeffs.copyTo( leftDistCoeffs ); leftDistCoeffs(0,i) -= dEps;
project( objPoints, rvec, tvec, cameraMatrix, leftDistCoeffs,
leftImgPoints[i], valDpdrot, valDpdt, valDpdf, valDpdc, valDpddist, 0 );
distCoeffs.copyTo( rightDistCoeffs ); rightDistCoeffs(0,i) += dEps;
project( objPoints, rvec, tvec, cameraMatrix, rightDistCoeffs,
rightImgPoints[i], valDpdrot, valDpdt, valDpdf, valDpdc, valDpddist, 0 );
- }
+ }
calcdfdx( leftImgPoints, rightImgPoints, dEps, valDpddist );
if( norm( dpddist, valDpddist ) > 0.3 )
- {
- ts->printf( cvtest::TS::LOG, "bad dpddist\n" );
- code = cvtest::TS::FAIL_BAD_ACCURACY;
- }
+ {
+ ts->printf( cvtest::TS::LOG, "bad dpddist\n" );
+ code = cvtest::TS::FAIL_BAD_ACCURACY;
+ }
_exit_:
- RNG& _rng = ts->get_rng();
- _rng = rng;
- ts->set_failed_test_info( code );
+ RNG& _rng = ts->get_rng();
+ _rng = rng;
+ ts->set_failed_test_info( code );
}
//----------------------------------------- CV_ProjectPointsTest_C --------------------------------
class CV_ProjectPointsTest_C : public CV_ProjectPointsTest
{
public:
- CV_ProjectPointsTest_C() {}
+ CV_ProjectPointsTest_C() {}
protected:
- virtual void project( const Mat& objectPoints,
- const Mat& rvec, const Mat& tvec,
- const Mat& cameraMatrix,
- const Mat& distCoeffs,
- vector<Point2f>& imagePoints,
- Mat& dpdrot, Mat& dpdt, Mat& dpdf,
- Mat& dpdc, Mat& dpddist,
- double aspectRatio=0 );
+ virtual void project( const Mat& objectPoints,
+ const Mat& rvec, const Mat& tvec,
+ const Mat& cameraMatrix,
+ const Mat& distCoeffs,
+ vector<Point2f>& imagePoints,
+ Mat& dpdrot, Mat& dpdt, Mat& dpdf,
+ Mat& dpdc, Mat& dpddist,
+ double aspectRatio=0 );
};
void CV_ProjectPointsTest_C::project( const Mat& opoints, const Mat& rvec, const Mat& tvec,
- const Mat& cameraMatrix, const Mat& distCoeffs, vector<Point2f>& ipoints,
- Mat& dpdrot, Mat& dpdt, Mat& dpdf, Mat& dpdc, Mat& dpddist, double aspectRatio)
+ const Mat& cameraMatrix, const Mat& distCoeffs, vector<Point2f>& ipoints,
+ Mat& dpdrot, Mat& dpdt, Mat& dpdf, Mat& dpdc, Mat& dpddist, double aspectRatio)
{
int npoints = opoints.cols*opoints.rows*opoints.channels()/3;
ipoints.resize(npoints);
CvMat _rvec = rvec, _tvec = tvec, _cameraMatrix = cameraMatrix, _distCoeffs = distCoeffs;
CvMat _dpdrot = dpdrot, _dpdt = dpdt, _dpdf = dpdf, _dpdc = dpdc, _dpddist = dpddist;
- cvProjectPoints2( &_objectPoints, &_rvec, &_tvec, &_cameraMatrix, &_distCoeffs,
+ cvProjectPoints2( &_objectPoints, &_rvec, &_tvec, &_cameraMatrix, &_distCoeffs,
&_imagePoints, &_dpdrot, &_dpdt, &_dpdf, &_dpdc, &_dpddist, aspectRatio );
}
class CV_ProjectPointsTest_CPP : public CV_ProjectPointsTest
{
public:
- CV_ProjectPointsTest_CPP() {}
+ CV_ProjectPointsTest_CPP() {}
protected:
- virtual void project( const Mat& objectPoints,
- const Mat& rvec, const Mat& tvec,
- const Mat& cameraMatrix,
- const Mat& distCoeffs,
- vector<Point2f>& imagePoints,
- Mat& dpdrot, Mat& dpdt, Mat& dpdf,
- Mat& dpdc, Mat& dpddist,
- double aspectRatio=0 );
+ virtual void project( const Mat& objectPoints,
+ const Mat& rvec, const Mat& tvec,
+ const Mat& cameraMatrix,
+ const Mat& distCoeffs,
+ vector<Point2f>& imagePoints,
+ Mat& dpdrot, Mat& dpdt, Mat& dpdf,
+ Mat& dpdc, Mat& dpddist,
+ double aspectRatio=0 );
};
void CV_ProjectPointsTest_CPP::project( const Mat& objectPoints, const Mat& rvec, const Mat& tvec,
- const Mat& cameraMatrix, const Mat& distCoeffs, vector<Point2f>& imagePoints,
- Mat& dpdrot, Mat& dpdt, Mat& dpdf, Mat& dpdc, Mat& dpddist, double aspectRatio)
+ const Mat& cameraMatrix, const Mat& distCoeffs, vector<Point2f>& imagePoints,
+ Mat& dpdrot, Mat& dpdt, Mat& dpdf, Mat& dpdc, Mat& dpddist, double aspectRatio)
{
Mat J;
- projectPoints( objectPoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints, J, aspectRatio);
+ projectPoints( objectPoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints, J, aspectRatio);
J.colRange(0, 3).copyTo(dpdrot);
J.colRange(3, 6).copyTo(dpdt);
J.colRange(6, 8).copyTo(dpdf);
class CV_StereoCalibrationTest : public cvtest::BaseTest
{
public:
- CV_StereoCalibrationTest();
- ~CV_StereoCalibrationTest();
- void clear();
+ CV_StereoCalibrationTest();
+ ~CV_StereoCalibrationTest();
+ void clear();
protected:
- bool checkPandROI( int test_case_idx,
- const Mat& M, const Mat& D, const Mat& R,
- const Mat& P, Size imgsize, Rect roi );
-
- // covers of tested functions
- virtual double calibrateStereoCamera( const vector<vector<Point3f> >& objectPoints,
- const vector<vector<Point2f> >& imagePoints1,
- const vector<vector<Point2f> >& imagePoints2,
- Mat& cameraMatrix1, Mat& distCoeffs1,
- Mat& cameraMatrix2, Mat& distCoeffs2,
- Size imageSize, Mat& R, Mat& T,
- Mat& E, Mat& F, TermCriteria criteria, int flags ) = 0;
- virtual void rectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
- const Mat& cameraMatrix2, const Mat& distCoeffs2,
- Size imageSize, const Mat& R, const Mat& T,
- Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
- double alpha, Size newImageSize,
- Rect* validPixROI1, Rect* validPixROI2, int flags ) = 0;
- virtual bool rectifyUncalibrated( const Mat& points1,
- const Mat& points2, const Mat& F, Size imgSize,
- Mat& H1, Mat& H2, double threshold=5 ) = 0;
- virtual void triangulate( const Mat& P1, const Mat& P2,
+ bool checkPandROI( int test_case_idx,
+ const Mat& M, const Mat& D, const Mat& R,
+ const Mat& P, Size imgsize, Rect roi );
+
+ // covers of tested functions
+ virtual double calibrateStereoCamera( const vector<vector<Point3f> >& objectPoints,
+ const vector<vector<Point2f> >& imagePoints1,
+ const vector<vector<Point2f> >& imagePoints2,
+ Mat& cameraMatrix1, Mat& distCoeffs1,
+ Mat& cameraMatrix2, Mat& distCoeffs2,
+ Size imageSize, Mat& R, Mat& T,
+ Mat& E, Mat& F, TermCriteria criteria, int flags ) = 0;
+ virtual void rectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
+ const Mat& cameraMatrix2, const Mat& distCoeffs2,
+ Size imageSize, const Mat& R, const Mat& T,
+ Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
+ double alpha, Size newImageSize,
+ Rect* validPixROI1, Rect* validPixROI2, int flags ) = 0;
+ virtual bool rectifyUncalibrated( const Mat& points1,
+ const Mat& points2, const Mat& F, Size imgSize,
+ Mat& H1, Mat& H2, double threshold=5 ) = 0;
+ virtual void triangulate( const Mat& P1, const Mat& P2,
const Mat &points1, const Mat &points2,
Mat &points4D ) = 0;
- virtual void correct( const Mat& F,
+ virtual void correct( const Mat& F,
const Mat &points1, const Mat &points2,
Mat &newPoints1, Mat &newPoints2 ) = 0;
- void run(int);
+ void run(int);
};
CV_StereoCalibrationTest::~CV_StereoCalibrationTest()
{
- clear();
+ clear();
}
void CV_StereoCalibrationTest::clear()
{
- cvtest::BaseTest::clear();
+ cvtest::BaseTest::clear();
}
bool CV_StereoCalibrationTest::checkPandROI( int test_case_idx, const Mat& M, const Mat& D, const Mat& R,
- const Mat& P, Size imgsize, Rect roi )
+ const Mat& P, Size imgsize, Rect roi )
{
- const double eps = 0.05;
- const int N = 21;
- int x, y, k;
- vector<Point2f> pts, upts;
-
- // step 1. check that all the original points belong to the destination image
- for( y = 0; y < N; y++ )
- for( x = 0; x < N; x++ )
- pts.push_back(Point2f((float)x*imgsize.width/(N-1), (float)y*imgsize.height/(N-1)));
-
- undistortPoints(Mat(pts), upts, M, D, R, P );
- for( k = 0; k < N*N; k++ )
- if( upts[k].x < -imgsize.width*eps || upts[k].x > imgsize.width*(1+eps) ||
- upts[k].y < -imgsize.height*eps || upts[k].y > imgsize.height*(1+eps) )
- {
- ts->printf(cvtest::TS::LOG, "Test #%d. The point (%g, %g) was mapped to (%g, %g) which is out of image\n",
- test_case_idx, pts[k].x, pts[k].y, upts[k].x, upts[k].y);
- return false;
- }
-
- // step 2. check that all the points inside ROI belong to the original source image
- Mat temp(imgsize, CV_8U), utemp, map1, map2;
- temp = Scalar::all(1);
- initUndistortRectifyMap(M, D, R, P, imgsize, CV_16SC2, map1, map2);
- remap(temp, utemp, map1, map2, INTER_LINEAR);
-
- if(roi.x < 0 || roi.y < 0 || roi.x + roi.width > imgsize.width || roi.y + roi.height > imgsize.height)
- {
- ts->printf(cvtest::TS::LOG, "Test #%d. The ROI=(%d, %d, %d, %d) is outside of the imge rectangle\n",
- test_case_idx, roi.x, roi.y, roi.width, roi.height);
- return false;
- }
- double s = sum(utemp(roi))[0];
- if( s > roi.area() || roi.area() - s > roi.area()*(1-eps) )
- {
- ts->printf(cvtest::TS::LOG, "Test #%d. The ratio of black pixels inside the valid ROI (~%g%%) is too large\n",
- test_case_idx, s*100./roi.area());
- return false;
- }
-
- return true;
+ const double eps = 0.05;
+ const int N = 21;
+ int x, y, k;
+ vector<Point2f> pts, upts;
+
+ // step 1. check that all the original points belong to the destination image
+ for( y = 0; y < N; y++ )
+ for( x = 0; x < N; x++ )
+ pts.push_back(Point2f((float)x*imgsize.width/(N-1), (float)y*imgsize.height/(N-1)));
+
+ undistortPoints(Mat(pts), upts, M, D, R, P );
+ for( k = 0; k < N*N; k++ )
+ if( upts[k].x < -imgsize.width*eps || upts[k].x > imgsize.width*(1+eps) ||
+ upts[k].y < -imgsize.height*eps || upts[k].y > imgsize.height*(1+eps) )
+ {
+ ts->printf(cvtest::TS::LOG, "Test #%d. The point (%g, %g) was mapped to (%g, %g) which is out of image\n",
+ test_case_idx, pts[k].x, pts[k].y, upts[k].x, upts[k].y);
+ return false;
+ }
+
+ // step 2. check that all the points inside ROI belong to the original source image
+ Mat temp(imgsize, CV_8U), utemp, map1, map2;
+ temp = Scalar::all(1);
+ initUndistortRectifyMap(M, D, R, P, imgsize, CV_16SC2, map1, map2);
+ remap(temp, utemp, map1, map2, INTER_LINEAR);
+
+ if(roi.x < 0 || roi.y < 0 || roi.x + roi.width > imgsize.width || roi.y + roi.height > imgsize.height)
+ {
+ ts->printf(cvtest::TS::LOG, "Test #%d. The ROI=(%d, %d, %d, %d) is outside of the imge rectangle\n",
+ test_case_idx, roi.x, roi.y, roi.width, roi.height);
+ return false;
+ }
+ double s = sum(utemp(roi))[0];
+ if( s > roi.area() || roi.area() - s > roi.area()*(1-eps) )
+ {
+ ts->printf(cvtest::TS::LOG, "Test #%d. The ratio of black pixels inside the valid ROI (~%g%%) is too large\n",
+ test_case_idx, s*100./roi.area());
+ return false;
+ }
+
+ return true;
}
void CV_StereoCalibrationTest::run( int )
{
- const int ntests = 1;
- const double maxReprojErr = 2;
- const double maxScanlineDistErr_c = 3;
- const double maxScanlineDistErr_uc = 4;
- FILE* f = 0;
-
- for(int testcase = 1; testcase <= ntests; testcase++)
- {
- char filepath[1000];
- char buf[1000];
- sprintf( filepath, "%sstereo/case%d/stereo_calib.txt", ts->get_data_path().c_str(), testcase );
- f = fopen(filepath, "rt");
- Size patternSize;
- vector<string> imglist;
-
- if( !f || !fgets(buf, sizeof(buf)-3, f) || sscanf(buf, "%d%d", &patternSize.width, &patternSize.height) != 2 )
- {
- ts->printf( cvtest::TS::LOG, "The file %s can not be opened or has invalid content\n", filepath );
- ts->set_failed_test_info( f ? cvtest::TS::FAIL_INVALID_TEST_DATA : cvtest::TS::FAIL_MISSING_TEST_DATA );
- return;
- }
-
- for(;;)
- {
- if( !fgets( buf, sizeof(buf)-3, f ))
- break;
- size_t len = strlen(buf);
- while( len > 0 && isspace(buf[len-1]))
- buf[--len] = '\0';
- if( buf[0] == '#')
- continue;
- sprintf(filepath, "%sstereo/case%d/%s", ts->get_data_path().c_str(), testcase, buf );
- imglist.push_back(string(filepath));
- }
- fclose(f);
-
- if( imglist.size() == 0 || imglist.size() % 2 != 0 )
- {
- ts->printf( cvtest::TS::LOG, "The number of images is 0 or an odd number in the case #%d\n", testcase );
- ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
- return;
- }
-
- int nframes = (int)(imglist.size()/2);
- int npoints = patternSize.width*patternSize.height;
- vector<vector<Point3f> > objpt(nframes);
- vector<vector<Point2f> > imgpt1(nframes);
- vector<vector<Point2f> > imgpt2(nframes);
- Size imgsize;
- int total = 0;
-
- for( int i = 0; i < nframes; i++ )
- {
- Mat left = imread(imglist[i*2]);
- Mat right = imread(imglist[i*2+1]);
- if(!left.data || !right.data)
- {
- ts->printf( cvtest::TS::LOG, "Can not load images %s and %s, testcase %d\n",
- imglist[i*2].c_str(), imglist[i*2+1].c_str(), testcase );
- ts->set_failed_test_info( cvtest::TS::FAIL_MISSING_TEST_DATA );
- return;
- }
- imgsize = left.size();
- bool found1 = findChessboardCorners(left, patternSize, imgpt1[i]);
- bool found2 = findChessboardCorners(right, patternSize, imgpt2[i]);
- if(!found1 || !found2)
- {
- ts->printf( cvtest::TS::LOG, "The function could not detect boards on the images %s and %s, testcase %d\n",
- imglist[i*2].c_str(), imglist[i*2+1].c_str(), testcase );
- ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
- return;
- }
- total += (int)imgpt1[i].size();
- for( int j = 0; j < npoints; j++ )
- objpt[i].push_back(Point3f((float)(j%patternSize.width), (float)(j/patternSize.width), 0.f));
- }
-
- // rectify (calibrated)
- Mat M1 = Mat::eye(3,3,CV_64F), M2 = Mat::eye(3,3,CV_64F), D1(5,1,CV_64F), D2(5,1,CV_64F), R, T, E, F;
- M1.at<double>(0,2) = M2.at<double>(0,2)=(imgsize.width-1)*0.5;
- M1.at<double>(1,2) = M2.at<double>(1,2)=(imgsize.height-1)*0.5;
- D1 = Scalar::all(0);
- D2 = Scalar::all(0);
- double err = calibrateStereoCamera(objpt, imgpt1, imgpt2, M1, D1, M2, D2, imgsize, R, T, E, F,
- TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 30, 1e-6),
- CV_CALIB_SAME_FOCAL_LENGTH
- //+ CV_CALIB_FIX_ASPECT_RATIO
- + CV_CALIB_FIX_PRINCIPAL_POINT
- + CV_CALIB_ZERO_TANGENT_DIST
+ const int ntests = 1;
+ const double maxReprojErr = 2;
+ const double maxScanlineDistErr_c = 3;
+ const double maxScanlineDistErr_uc = 4;
+ FILE* f = 0;
+
+ for(int testcase = 1; testcase <= ntests; testcase++)
+ {
+ char filepath[1000];
+ char buf[1000];
+ sprintf( filepath, "%sstereo/case%d/stereo_calib.txt", ts->get_data_path().c_str(), testcase );
+ f = fopen(filepath, "rt");
+ Size patternSize;
+ vector<string> imglist;
+
+ if( !f || !fgets(buf, sizeof(buf)-3, f) || sscanf(buf, "%d%d", &patternSize.width, &patternSize.height) != 2 )
+ {
+ ts->printf( cvtest::TS::LOG, "The file %s can not be opened or has invalid content\n", filepath );
+ ts->set_failed_test_info( f ? cvtest::TS::FAIL_INVALID_TEST_DATA : cvtest::TS::FAIL_MISSING_TEST_DATA );
+ return;
+ }
+
+ for(;;)
+ {
+ if( !fgets( buf, sizeof(buf)-3, f ))
+ break;
+ size_t len = strlen(buf);
+ while( len > 0 && isspace(buf[len-1]))
+ buf[--len] = '\0';
+ if( buf[0] == '#')
+ continue;
+ sprintf(filepath, "%sstereo/case%d/%s", ts->get_data_path().c_str(), testcase, buf );
+ imglist.push_back(string(filepath));
+ }
+ fclose(f);
+
+ if( imglist.size() == 0 || imglist.size() % 2 != 0 )
+ {
+ ts->printf( cvtest::TS::LOG, "The number of images is 0 or an odd number in the case #%d\n", testcase );
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
+ return;
+ }
+
+ int nframes = (int)(imglist.size()/2);
+ int npoints = patternSize.width*patternSize.height;
+ vector<vector<Point3f> > objpt(nframes);
+ vector<vector<Point2f> > imgpt1(nframes);
+ vector<vector<Point2f> > imgpt2(nframes);
+ Size imgsize;
+ int total = 0;
+
+ for( int i = 0; i < nframes; i++ )
+ {
+ Mat left = imread(imglist[i*2]);
+ Mat right = imread(imglist[i*2+1]);
+ if(!left.data || !right.data)
+ {
+ ts->printf( cvtest::TS::LOG, "Can not load images %s and %s, testcase %d\n",
+ imglist[i*2].c_str(), imglist[i*2+1].c_str(), testcase );
+ ts->set_failed_test_info( cvtest::TS::FAIL_MISSING_TEST_DATA );
+ return;
+ }
+ imgsize = left.size();
+ bool found1 = findChessboardCorners(left, patternSize, imgpt1[i]);
+ bool found2 = findChessboardCorners(right, patternSize, imgpt2[i]);
+ if(!found1 || !found2)
+ {
+ ts->printf( cvtest::TS::LOG, "The function could not detect boards on the images %s and %s, testcase %d\n",
+ imglist[i*2].c_str(), imglist[i*2+1].c_str(), testcase );
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
+ return;
+ }
+ total += (int)imgpt1[i].size();
+ for( int j = 0; j < npoints; j++ )
+ objpt[i].push_back(Point3f((float)(j%patternSize.width), (float)(j/patternSize.width), 0.f));
+ }
+
+ // rectify (calibrated)
+ Mat M1 = Mat::eye(3,3,CV_64F), M2 = Mat::eye(3,3,CV_64F), D1(5,1,CV_64F), D2(5,1,CV_64F), R, T, E, F;
+ M1.at<double>(0,2) = M2.at<double>(0,2)=(imgsize.width-1)*0.5;
+ M1.at<double>(1,2) = M2.at<double>(1,2)=(imgsize.height-1)*0.5;
+ D1 = Scalar::all(0);
+ D2 = Scalar::all(0);
+ double err = calibrateStereoCamera(objpt, imgpt1, imgpt2, M1, D1, M2, D2, imgsize, R, T, E, F,
+ TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 30, 1e-6),
+ CV_CALIB_SAME_FOCAL_LENGTH
+ //+ CV_CALIB_FIX_ASPECT_RATIO
+ + CV_CALIB_FIX_PRINCIPAL_POINT
+ + CV_CALIB_ZERO_TANGENT_DIST
+ CV_CALIB_FIX_K3
+ CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5 //+ CV_CALIB_FIX_K6
- );
- err /= nframes*npoints;
- if( err > maxReprojErr )
- {
- ts->printf( cvtest::TS::LOG, "The average reprojection error is too big (=%g), testcase %d\n", err, testcase);
- ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
- return;
- }
-
- Mat R1, R2, P1, P2, Q;
- Rect roi1, roi2;
- rectify(M1, D1, M2, D2, imgsize, R, T, R1, R2, P1, P2, Q, 1, imgsize, &roi1, &roi2, 0);
- Mat eye33 = Mat::eye(3,3,CV_64F);
- Mat R1t = R1.t(), R2t = R2.t();
-
- if( norm(R1t*R1 - eye33) > 0.01 ||
- norm(R2t*R2 - eye33) > 0.01 ||
- abs(determinant(F)) > 0.01)
- {
- ts->printf( cvtest::TS::LOG, "The computed (by rectify) R1 and R2 are not orthogonal,"
- "or the computed (by calibrate) F is not singular, testcase %d\n", testcase);
- ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
- return;
- }
-
- if(!checkPandROI(testcase, M1, D1, R1, P1, imgsize, roi1))
- {
- ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
- return;
- }
-
- if(!checkPandROI(testcase, M2, D2, R2, P2, imgsize, roi2))
- {
- ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
- return;
- }
+ );
+ err /= nframes*npoints;
+ if( err > maxReprojErr )
+ {
+ ts->printf( cvtest::TS::LOG, "The average reprojection error is too big (=%g), testcase %d\n", err, testcase);
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
+ return;
+ }
+
+ Mat R1, R2, P1, P2, Q;
+ Rect roi1, roi2;
+ rectify(M1, D1, M2, D2, imgsize, R, T, R1, R2, P1, P2, Q, 1, imgsize, &roi1, &roi2, 0);
+ Mat eye33 = Mat::eye(3,3,CV_64F);
+ Mat R1t = R1.t(), R2t = R2.t();
+
+ if( norm(R1t*R1 - eye33) > 0.01 ||
+ norm(R2t*R2 - eye33) > 0.01 ||
+ abs(determinant(F)) > 0.01)
+ {
+ ts->printf( cvtest::TS::LOG, "The computed (by rectify) R1 and R2 are not orthogonal,"
+ "or the computed (by calibrate) F is not singular, testcase %d\n", testcase);
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
+ return;
+ }
+
+ if(!checkPandROI(testcase, M1, D1, R1, P1, imgsize, roi1))
+ {
+ ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
+ return;
+ }
+
+ if(!checkPandROI(testcase, M2, D2, R2, P2, imgsize, roi2))
+ {
+ ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
+ return;
+ }
//check that Tx after rectification is equal to distance between cameras
double tx = fabs(P2.at<double>(0, 3) / P2.at<double>(0, 0));
if (fabs(tx - norm(T)) > 1e-5)
{
- ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
- return;
+ ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
+ return;
}
//check that Q reprojects points before the camera
CV_Assert(reprojectedTestPoint.type() == CV_64FC1);
if( reprojectedTestPoint.at<double>(2) / reprojectedTestPoint.at<double>(3) < 0 )
{
- ts->printf( cvtest::TS::LOG, "A point after rectification is reprojected behind the camera, testcase %d\n", testcase);
- ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
+ ts->printf( cvtest::TS::LOG, "A point after rectification is reprojected behind the camera, testcase %d\n", testcase);
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
}
//check that Q reprojects the same points as reconstructed by triangulation
if (norm(triangulatedPoints - reprojectedPoints) / sqrt((double)pointsCount) > requiredAccuracy)
{
- ts->printf( cvtest::TS::LOG, "Points reprojected with a matrix Q and points reconstructed by triangulation are different, testcase %d\n", testcase);
- ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
+ ts->printf( cvtest::TS::LOG, "Points reprojected with a matrix Q and points reconstructed by triangulation are different, testcase %d\n", testcase);
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
}
//check correctMatches
}
}
- // rectifyUncalibrated
- CV_Assert( imgpt1.size() == imgpt2.size() );
- Mat _imgpt1( total, 1, CV_32FC2 ), _imgpt2( total, 1, CV_32FC2 );
- vector<vector<Point2f> >::const_iterator iit1 = imgpt1.begin();
- vector<vector<Point2f> >::const_iterator iit2 = imgpt2.begin();
- for( int pi = 0; iit1 != imgpt1.end(); ++iit1, ++iit2 )
- {
- vector<Point2f>::const_iterator pit1 = iit1->begin();
- vector<Point2f>::const_iterator pit2 = iit2->begin();
- CV_Assert( iit1->size() == iit2->size() );
- for( ; pit1 != iit1->end(); ++pit1, ++pit2, pi++ )
- {
- _imgpt1.at<Point2f>(pi,0) = Point2f( pit1->x, pit1->y );
- _imgpt2.at<Point2f>(pi,0) = Point2f( pit2->x, pit2->y );
- }
- }
-
- Mat _M1, _M2, _D1, _D2;
- vector<Mat> _R1, _R2, _T1, _T2;
- calibrateCamera( objpt, imgpt1, imgsize, _M1, _D1, _R1, _T1, 0 );
- calibrateCamera( objpt, imgpt2, imgsize, _M2, _D2, _R2, _T1, 0 );
- undistortPoints( _imgpt1, _imgpt1, _M1, _D1, Mat(), _M1 );
- undistortPoints( _imgpt2, _imgpt2, _M2, _D2, Mat(), _M2 );
-
- Mat matF, _H1, _H2;
- matF = findFundamentalMat( _imgpt1, _imgpt2 );
- rectifyUncalibrated( _imgpt1, _imgpt2, matF, imgsize, _H1, _H2 );
-
- Mat rectifPoints1, rectifPoints2;
- perspectiveTransform( _imgpt1, rectifPoints1, _H1 );
- perspectiveTransform( _imgpt2, rectifPoints2, _H2 );
-
- bool verticalStereo = abs(P2.at<double>(0,3)) < abs(P2.at<double>(1,3));
- double maxDiff_c = 0, maxDiff_uc = 0;
- for( int i = 0, k = 0; i < nframes; i++ )
- {
- vector<Point2f> temp[2];
- undistortPoints(Mat(imgpt1[i]), temp[0], M1, D1, R1, P1);
- undistortPoints(Mat(imgpt2[i]), temp[1], M2, D2, R2, P2);
-
- for( int j = 0; j < npoints; j++, k++ )
- {
- double diff_c = verticalStereo ? abs(temp[0][j].x - temp[1][j].x) : abs(temp[0][j].y - temp[1][j].y);
- Point2f d = rectifPoints1.at<Point2f>(k,0) - rectifPoints2.at<Point2f>(k,0);
- double diff_uc = verticalStereo ? abs(d.x) : abs(d.y);
- maxDiff_c = max(maxDiff_c, diff_c);
- maxDiff_uc = max(maxDiff_uc, diff_uc);
- if( maxDiff_c > maxScanlineDistErr_c )
- {
- ts->printf( cvtest::TS::LOG, "The distance between %s coordinates is too big(=%g) (used calibrated stereo), testcase %d\n",
- verticalStereo ? "x" : "y", diff_c, testcase);
- ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
- return;
- }
- if( maxDiff_uc > maxScanlineDistErr_uc )
- {
- ts->printf( cvtest::TS::LOG, "The distance between %s coordinates is too big(=%g) (used uncalibrated stereo), testcase %d\n",
- verticalStereo ? "x" : "y", diff_uc, testcase);
- ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
- return;
- }
- }
- }
-
- ts->printf( cvtest::TS::LOG, "Testcase %d. Max distance (calibrated) =%g\n"
- "Max distance (uncalibrated) =%g\n", testcase, maxDiff_c, maxDiff_uc );
- }
+ // rectifyUncalibrated
+ CV_Assert( imgpt1.size() == imgpt2.size() );
+ Mat _imgpt1( total, 1, CV_32FC2 ), _imgpt2( total, 1, CV_32FC2 );
+ vector<vector<Point2f> >::const_iterator iit1 = imgpt1.begin();
+ vector<vector<Point2f> >::const_iterator iit2 = imgpt2.begin();
+ for( int pi = 0; iit1 != imgpt1.end(); ++iit1, ++iit2 )
+ {
+ vector<Point2f>::const_iterator pit1 = iit1->begin();
+ vector<Point2f>::const_iterator pit2 = iit2->begin();
+ CV_Assert( iit1->size() == iit2->size() );
+ for( ; pit1 != iit1->end(); ++pit1, ++pit2, pi++ )
+ {
+ _imgpt1.at<Point2f>(pi,0) = Point2f( pit1->x, pit1->y );
+ _imgpt2.at<Point2f>(pi,0) = Point2f( pit2->x, pit2->y );
+ }
+ }
+
+ Mat _M1, _M2, _D1, _D2;
+ vector<Mat> _R1, _R2, _T1, _T2;
+ calibrateCamera( objpt, imgpt1, imgsize, _M1, _D1, _R1, _T1, 0 );
+ calibrateCamera( objpt, imgpt2, imgsize, _M2, _D2, _R2, _T1, 0 );
+ undistortPoints( _imgpt1, _imgpt1, _M1, _D1, Mat(), _M1 );
+ undistortPoints( _imgpt2, _imgpt2, _M2, _D2, Mat(), _M2 );
+
+ Mat matF, _H1, _H2;
+ matF = findFundamentalMat( _imgpt1, _imgpt2 );
+ rectifyUncalibrated( _imgpt1, _imgpt2, matF, imgsize, _H1, _H2 );
+
+ Mat rectifPoints1, rectifPoints2;
+ perspectiveTransform( _imgpt1, rectifPoints1, _H1 );
+ perspectiveTransform( _imgpt2, rectifPoints2, _H2 );
+
+ bool verticalStereo = abs(P2.at<double>(0,3)) < abs(P2.at<double>(1,3));
+ double maxDiff_c = 0, maxDiff_uc = 0;
+ for( int i = 0, k = 0; i < nframes; i++ )
+ {
+ vector<Point2f> temp[2];
+ undistortPoints(Mat(imgpt1[i]), temp[0], M1, D1, R1, P1);
+ undistortPoints(Mat(imgpt2[i]), temp[1], M2, D2, R2, P2);
+
+ for( int j = 0; j < npoints; j++, k++ )
+ {
+ double diff_c = verticalStereo ? abs(temp[0][j].x - temp[1][j].x) : abs(temp[0][j].y - temp[1][j].y);
+ Point2f d = rectifPoints1.at<Point2f>(k,0) - rectifPoints2.at<Point2f>(k,0);
+ double diff_uc = verticalStereo ? abs(d.x) : abs(d.y);
+ maxDiff_c = max(maxDiff_c, diff_c);
+ maxDiff_uc = max(maxDiff_uc, diff_uc);
+ if( maxDiff_c > maxScanlineDistErr_c )
+ {
+ ts->printf( cvtest::TS::LOG, "The distance between %s coordinates is too big(=%g) (used calibrated stereo), testcase %d\n",
+ verticalStereo ? "x" : "y", diff_c, testcase);
+ ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
+ return;
+ }
+ if( maxDiff_uc > maxScanlineDistErr_uc )
+ {
+ ts->printf( cvtest::TS::LOG, "The distance between %s coordinates is too big(=%g) (used uncalibrated stereo), testcase %d\n",
+ verticalStereo ? "x" : "y", diff_uc, testcase);
+ ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
+ return;
+ }
+ }
+ }
+
+ ts->printf( cvtest::TS::LOG, "Testcase %d. Max distance (calibrated) =%g\n"
+ "Max distance (uncalibrated) =%g\n", testcase, maxDiff_c, maxDiff_uc );
+ }
}
//-------------------------------- CV_StereoCalibrationTest_C ------------------------------
class CV_StereoCalibrationTest_C : public CV_StereoCalibrationTest
{
public:
- CV_StereoCalibrationTest_C() {}
+ CV_StereoCalibrationTest_C() {}
protected:
- virtual double calibrateStereoCamera( const vector<vector<Point3f> >& objectPoints,
- const vector<vector<Point2f> >& imagePoints1,
- const vector<vector<Point2f> >& imagePoints2,
- Mat& cameraMatrix1, Mat& distCoeffs1,
- Mat& cameraMatrix2, Mat& distCoeffs2,
- Size imageSize, Mat& R, Mat& T,
- Mat& E, Mat& F, TermCriteria criteria, int flags );
- virtual void rectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
- const Mat& cameraMatrix2, const Mat& distCoeffs2,
- Size imageSize, const Mat& R, const Mat& T,
- Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
- double alpha, Size newImageSize,
- Rect* validPixROI1, Rect* validPixROI2, int flags );
- virtual bool rectifyUncalibrated( const Mat& points1,
- const Mat& points2, const Mat& F, Size imgSize,
- Mat& H1, Mat& H2, double threshold=5 );
- virtual void triangulate( const Mat& P1, const Mat& P2,
+ virtual double calibrateStereoCamera( const vector<vector<Point3f> >& objectPoints,
+ const vector<vector<Point2f> >& imagePoints1,
+ const vector<vector<Point2f> >& imagePoints2,
+ Mat& cameraMatrix1, Mat& distCoeffs1,
+ Mat& cameraMatrix2, Mat& distCoeffs2,
+ Size imageSize, Mat& R, Mat& T,
+ Mat& E, Mat& F, TermCriteria criteria, int flags );
+ virtual void rectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
+ const Mat& cameraMatrix2, const Mat& distCoeffs2,
+ Size imageSize, const Mat& R, const Mat& T,
+ Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
+ double alpha, Size newImageSize,
+ Rect* validPixROI1, Rect* validPixROI2, int flags );
+ virtual bool rectifyUncalibrated( const Mat& points1,
+ const Mat& points2, const Mat& F, Size imgSize,
+ Mat& H1, Mat& H2, double threshold=5 );
+ virtual void triangulate( const Mat& P1, const Mat& P2,
const Mat &points1, const Mat &points2,
Mat &points4D );
- virtual void correct( const Mat& F,
+ virtual void correct( const Mat& F,
const Mat &points1, const Mat &points2,
Mat &newPoints1, Mat &newPoints2 );
};
double CV_StereoCalibrationTest_C::calibrateStereoCamera( const vector<vector<Point3f> >& objectPoints,
- const vector<vector<Point2f> >& imagePoints1,
- const vector<vector<Point2f> >& imagePoints2,
- Mat& cameraMatrix1, Mat& distCoeffs1,
- Mat& cameraMatrix2, Mat& distCoeffs2,
- Size imageSize, Mat& R, Mat& T,
- Mat& E, Mat& F, TermCriteria criteria, int flags )
+ const vector<vector<Point2f> >& imagePoints1,
+ const vector<vector<Point2f> >& imagePoints2,
+ Mat& cameraMatrix1, Mat& distCoeffs1,
+ Mat& cameraMatrix2, Mat& distCoeffs2,
+ Size imageSize, Mat& R, Mat& T,
+ Mat& E, Mat& F, TermCriteria criteria, int flags )
{
- cameraMatrix1.create( 3, 3, CV_64F );
- cameraMatrix2.create( 3, 3, CV_64F);
- distCoeffs1.create( 1, 5, CV_64F);
- distCoeffs2.create( 1, 5, CV_64F);
- R.create(3, 3, CV_64F);
- T.create(3, 1, CV_64F);
- E.create(3, 3, CV_64F);
- F.create(3, 3, CV_64F);
-
- int nimages = (int)objectPoints.size(), total = 0;
- for( int i = 0; i < nimages; i++ )
- {
- total += (int)objectPoints[i].size();
- }
-
- Mat npoints( 1, nimages, CV_32S ),
- objPt( 1, total, DataType<Point3f>::type ),
- imgPt( 1, total, DataType<Point2f>::type ),
- imgPt2( 1, total, DataType<Point2f>::type );
-
- Point2f* imgPtData2 = imgPt2.ptr<Point2f>();
- Point3f* objPtData = objPt.ptr<Point3f>();
- Point2f* imgPtData = imgPt.ptr<Point2f>();
- for( int i = 0, ni = 0, j = 0; i < nimages; i++, j += ni )
- {
- ni = (int)objectPoints[i].size();
- ((int*)npoints.data)[i] = ni;
- std::copy(objectPoints[i].begin(), objectPoints[i].end(), objPtData + j);
- std::copy(imagePoints1[i].begin(), imagePoints1[i].end(), imgPtData + j);
- std::copy(imagePoints2[i].begin(), imagePoints2[i].end(), imgPtData2 + j);
- }
- CvMat _objPt = objPt, _imgPt = imgPt, _imgPt2 = imgPt2, _npoints = npoints;
- CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;
- CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;
- CvMat matR = R, matT = T, matE = E, matF = F;
-
- return cvStereoCalibrate(&_objPt, &_imgPt, &_imgPt2, &_npoints, &_cameraMatrix1,
- &_distCoeffs1, &_cameraMatrix2, &_distCoeffs2, imageSize,
- &matR, &matT, &matE, &matF, criteria, flags );
+ cameraMatrix1.create( 3, 3, CV_64F );
+ cameraMatrix2.create( 3, 3, CV_64F);
+ distCoeffs1.create( 1, 5, CV_64F);
+ distCoeffs2.create( 1, 5, CV_64F);
+ R.create(3, 3, CV_64F);
+ T.create(3, 1, CV_64F);
+ E.create(3, 3, CV_64F);
+ F.create(3, 3, CV_64F);
+
+ int nimages = (int)objectPoints.size(), total = 0;
+ for( int i = 0; i < nimages; i++ )
+ {
+ total += (int)objectPoints[i].size();
+ }
+
+ Mat npoints( 1, nimages, CV_32S ),
+ objPt( 1, total, DataType<Point3f>::type ),
+ imgPt( 1, total, DataType<Point2f>::type ),
+ imgPt2( 1, total, DataType<Point2f>::type );
+
+ Point2f* imgPtData2 = imgPt2.ptr<Point2f>();
+ Point3f* objPtData = objPt.ptr<Point3f>();
+ Point2f* imgPtData = imgPt.ptr<Point2f>();
+ for( int i = 0, ni = 0, j = 0; i < nimages; i++, j += ni )
+ {
+ ni = (int)objectPoints[i].size();
+ ((int*)npoints.data)[i] = ni;
+ std::copy(objectPoints[i].begin(), objectPoints[i].end(), objPtData + j);
+ std::copy(imagePoints1[i].begin(), imagePoints1[i].end(), imgPtData + j);
+ std::copy(imagePoints2[i].begin(), imagePoints2[i].end(), imgPtData2 + j);
+ }
+ CvMat _objPt = objPt, _imgPt = imgPt, _imgPt2 = imgPt2, _npoints = npoints;
+ CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;
+ CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;
+ CvMat matR = R, matT = T, matE = E, matF = F;
+
+ return cvStereoCalibrate(&_objPt, &_imgPt, &_imgPt2, &_npoints, &_cameraMatrix1,
+ &_distCoeffs1, &_cameraMatrix2, &_distCoeffs2, imageSize,
+ &matR, &matT, &matE, &matF, criteria, flags );
}
void CV_StereoCalibrationTest_C::rectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
- const Mat& cameraMatrix2, const Mat& distCoeffs2,
- Size imageSize, const Mat& R, const Mat& T,
- Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
- double alpha, Size newImageSize,
- Rect* validPixROI1, Rect* validPixROI2, int flags )
+ const Mat& cameraMatrix2, const Mat& distCoeffs2,
+ Size imageSize, const Mat& R, const Mat& T,
+ Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
+ double alpha, Size newImageSize,
+ Rect* validPixROI1, Rect* validPixROI2, int flags )
{
- int rtype = CV_64F;
- R1.create(3, 3, rtype);
- R2.create(3, 3, rtype);
- P1.create(3, 4, rtype);
- P2.create(3, 4, rtype);
- Q.create(4, 4, rtype);
- CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;
- CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;
- CvMat matR = R, matT = T, _R1 = R1, _R2 = R2, _P1 = P1, _P2 = P2, matQ = Q;
- cvStereoRectify( &_cameraMatrix1, &_cameraMatrix2, &_distCoeffs1, &_distCoeffs2,
- imageSize, &matR, &matT, &_R1, &_R2, &_P1, &_P2, &matQ, flags,
- alpha, newImageSize, (CvRect*)validPixROI1, (CvRect*)validPixROI2);
+ int rtype = CV_64F;
+ R1.create(3, 3, rtype);
+ R2.create(3, 3, rtype);
+ P1.create(3, 4, rtype);
+ P2.create(3, 4, rtype);
+ Q.create(4, 4, rtype);
+ CvMat _cameraMatrix1 = cameraMatrix1, _distCoeffs1 = distCoeffs1;
+ CvMat _cameraMatrix2 = cameraMatrix2, _distCoeffs2 = distCoeffs2;
+ CvMat matR = R, matT = T, _R1 = R1, _R2 = R2, _P1 = P1, _P2 = P2, matQ = Q;
+ cvStereoRectify( &_cameraMatrix1, &_cameraMatrix2, &_distCoeffs1, &_distCoeffs2,
+ imageSize, &matR, &matT, &_R1, &_R2, &_P1, &_P2, &matQ, flags,
+ alpha, newImageSize, (CvRect*)validPixROI1, (CvRect*)validPixROI2);
}
bool CV_StereoCalibrationTest_C::rectifyUncalibrated( const Mat& points1,
- const Mat& points2, const Mat& F, Size imgSize, Mat& H1, Mat& H2, double threshold )
+ const Mat& points2, const Mat& F, Size imgSize, Mat& H1, Mat& H2, double threshold )
{
- H1.create(3, 3, CV_64F);
- H2.create(3, 3, CV_64F);
- CvMat _pt1 = points1, _pt2 = points2, matF, *pF=0, _H1 = H1, _H2 = H2;
- if( F.size() == Size(3, 3) )
- pF = &(matF = F);
- return cvStereoRectifyUncalibrated(&_pt1, &_pt2, pF, imgSize, &_H1, &_H2, threshold) > 0;
+ H1.create(3, 3, CV_64F);
+ H2.create(3, 3, CV_64F);
+ CvMat _pt1 = points1, _pt2 = points2, matF, *pF=0, _H1 = H1, _H2 = H2;
+ if( F.size() == Size(3, 3) )
+ pF = &(matF = F);
+ return cvStereoRectifyUncalibrated(&_pt1, &_pt2, pF, imgSize, &_H1, &_H2, threshold) > 0;
}
void CV_StereoCalibrationTest_C::triangulate( const Mat& P1, const Mat& P2,
class CV_StereoCalibrationTest_CPP : public CV_StereoCalibrationTest
{
public:
- CV_StereoCalibrationTest_CPP() {}
+ CV_StereoCalibrationTest_CPP() {}
protected:
- virtual double calibrateStereoCamera( const vector<vector<Point3f> >& objectPoints,
- const vector<vector<Point2f> >& imagePoints1,
- const vector<vector<Point2f> >& imagePoints2,
- Mat& cameraMatrix1, Mat& distCoeffs1,
- Mat& cameraMatrix2, Mat& distCoeffs2,
- Size imageSize, Mat& R, Mat& T,
- Mat& E, Mat& F, TermCriteria criteria, int flags );
- virtual void rectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
- const Mat& cameraMatrix2, const Mat& distCoeffs2,
- Size imageSize, const Mat& R, const Mat& T,
- Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
- double alpha, Size newImageSize,
- Rect* validPixROI1, Rect* validPixROI2, int flags );
- virtual bool rectifyUncalibrated( const Mat& points1,
- const Mat& points2, const Mat& F, Size imgSize,
- Mat& H1, Mat& H2, double threshold=5 );
- virtual void triangulate( const Mat& P1, const Mat& P2,
+ virtual double calibrateStereoCamera( const vector<vector<Point3f> >& objectPoints,
+ const vector<vector<Point2f> >& imagePoints1,
+ const vector<vector<Point2f> >& imagePoints2,
+ Mat& cameraMatrix1, Mat& distCoeffs1,
+ Mat& cameraMatrix2, Mat& distCoeffs2,
+ Size imageSize, Mat& R, Mat& T,
+ Mat& E, Mat& F, TermCriteria criteria, int flags );
+ virtual void rectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
+ const Mat& cameraMatrix2, const Mat& distCoeffs2,
+ Size imageSize, const Mat& R, const Mat& T,
+ Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
+ double alpha, Size newImageSize,
+ Rect* validPixROI1, Rect* validPixROI2, int flags );
+ virtual bool rectifyUncalibrated( const Mat& points1,
+ const Mat& points2, const Mat& F, Size imgSize,
+ Mat& H1, Mat& H2, double threshold=5 );
+ virtual void triangulate( const Mat& P1, const Mat& P2,
const Mat &points1, const Mat &points2,
Mat &points4D );
virtual void correct( const Mat& F,
};
double CV_StereoCalibrationTest_CPP::calibrateStereoCamera( const vector<vector<Point3f> >& objectPoints,
- const vector<vector<Point2f> >& imagePoints1,
- const vector<vector<Point2f> >& imagePoints2,
- Mat& cameraMatrix1, Mat& distCoeffs1,
- Mat& cameraMatrix2, Mat& distCoeffs2,
- Size imageSize, Mat& R, Mat& T,
- Mat& E, Mat& F, TermCriteria criteria, int flags )
+ const vector<vector<Point2f> >& imagePoints1,
+ const vector<vector<Point2f> >& imagePoints2,
+ Mat& cameraMatrix1, Mat& distCoeffs1,
+ Mat& cameraMatrix2, Mat& distCoeffs2,
+ Size imageSize, Mat& R, Mat& T,
+ Mat& E, Mat& F, TermCriteria criteria, int flags )
{
- return stereoCalibrate( objectPoints, imagePoints1, imagePoints2,
- cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
- imageSize, R, T, E, F, criteria, flags );
+ return stereoCalibrate( objectPoints, imagePoints1, imagePoints2,
+ cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
+ imageSize, R, T, E, F, criteria, flags );
}
void CV_StereoCalibrationTest_CPP::rectify( const Mat& cameraMatrix1, const Mat& distCoeffs1,
- const Mat& cameraMatrix2, const Mat& distCoeffs2,
- Size imageSize, const Mat& R, const Mat& T,
- Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
- double alpha, Size newImageSize,
- Rect* validPixROI1, Rect* validPixROI2, int flags )
+ const Mat& cameraMatrix2, const Mat& distCoeffs2,
+ Size imageSize, const Mat& R, const Mat& T,
+ Mat& R1, Mat& R2, Mat& P1, Mat& P2, Mat& Q,
+ double alpha, Size newImageSize,
+ Rect* validPixROI1, Rect* validPixROI2, int flags )
{
- stereoRectify( cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
- imageSize, R, T, R1, R2, P1, P2, Q, flags, alpha, newImageSize,validPixROI1, validPixROI2 );
+ stereoRectify( cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2,
+ imageSize, R, T, R1, R2, P1, P2, Q, flags, alpha, newImageSize,validPixROI1, validPixROI2 );
}
bool CV_StereoCalibrationTest_CPP::rectifyUncalibrated( const Mat& points1,
- const Mat& points2, const Mat& F, Size imgSize, Mat& H1, Mat& H2, double threshold )
+ const Mat& points2, const Mat& F, Size imgSize, Mat& H1, Mat& H2, double threshold )
{
- return stereoRectifyUncalibrated( points1, points2, F, imgSize, H1, H2, threshold );
+ return stereoRectifyUncalibrated( points1, points2, F, imgSize, H1, H2, threshold );
}
void CV_StereoCalibrationTest_CPP::triangulate( const Mat& P1, const Mat& P2,
using namespace std;
//template<class T> ostream& operator<<(ostream& out, const Mat_<T>& mat)
-//{
+//{
// for(Mat_<T>::const_iterator pos = mat.begin(), end = mat.end(); pos != end; ++pos)
// out << *pos << " ";
// return out;
//}
-//ostream& operator<<(ostream& out, const Mat& mat) { return out << Mat_<double>(mat); }
+//ostream& operator<<(ostream& out, const Mat& mat) { return out << Mat_<double>(mat); }
Mat calcRvec(const vector<Point3f>& points, const Size& cornerSize)
-{
+{
Point3f p00 = points[0];
Point3f p10 = points[1];
- Point3f p01 = points[cornerSize.width];
+ Point3f p01 = points[cornerSize.width];
Vec3d ex(p10.x - p00.x, p10.y - p00.y, p10.z - p00.z);
- Vec3d ey(p01.x - p00.x, p01.y - p00.y, p01.z - p00.z);
- Vec3d ez = ex.cross(ey);
+ Vec3d ey(p01.x - p00.x, p01.y - p00.y, p01.z - p00.z);
+ Vec3d ez = ex.cross(ey);
Mat rot(3, 3, CV_64F);
*rot.ptr<Vec3d>(0) = ex;
{
}
~CV_CalibrateCameraArtificialTest() {}
-protected:
+protected:
int r;
const static int JUST_FIND_CORNERS = 0;
{
ts->printf( cvtest::TS::LOG, "Bad shape of camera matrix returned \n");
ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH);
- }
+ }
double fx_e = camMat_est.at<double>(0, 0), fy_e = camMat_est.at<double>(1, 1);
double cx_e = camMat_est.at<double>(0, 2), cy_e = camMat_est.at<double>(1, 2);
const double eps = 1e-2;
const double dlt = 1e-5;
- bool fail = checkErr(fx_e, fx, eps, dlt) || checkErr(fy_e, fy, eps, dlt) ||
- checkErr(cx_e, cx, eps, dlt) || checkErr(cy_e, cy, eps, dlt);
+ bool fail = checkErr(fx_e, fx, eps, dlt) || checkErr(fy_e, fy, eps, dlt) ||
+ checkErr(cx_e, cx, eps, dlt) || checkErr(cy_e, cy, eps, dlt);
if (fail)
{
- ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
- }
+ ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
+ }
ts->printf( cvtest::TS::LOG, "%d) Expected [Fx Fy Cx Cy] = [%.3f %.3f %.3f %.3f]\n", r, fx, fy, cx, cy);
- ts->printf( cvtest::TS::LOG, "%d) Estimated [Fx Fy Cx Cy] = [%.3f %.3f %.3f %.3f]\n", r, fx_e, fy_e, cx_e, cy_e);
+ ts->printf( cvtest::TS::LOG, "%d) Estimated [Fx Fy Cx Cy] = [%.3f %.3f %.3f %.3f]\n", r, fx_e, fy_e, cx_e, cy_e);
}
void compareDistCoeffs(const Mat_<double>& distCoeffs, const Mat& distCoeffs_est)
- {
+ {
const double *dt_e = distCoeffs_est.ptr<double>();
double k1_e = dt_e[0], k2_e = dt_e[1], k3_e = dt_e[4];
double p1 = distCoeffs(0, 2), p2 = distCoeffs(0, 3);
const double eps = 5e-2;
- const double dlt = 1e-3;
+ const double dlt = 1e-3;
const double eps_k3 = 5;
- const double dlt_k3 = 1e-3;
+ const double dlt_k3 = 1e-3;
- bool fail = checkErr(k1_e, k1, eps, dlt) || checkErr(k2_e, k2, eps, dlt) || checkErr(k3_e, k3, eps_k3, dlt_k3) ||
- checkErr(p1_e, p1, eps, dlt) || checkErr(p2_e, p2, eps, dlt);
+ bool fail = checkErr(k1_e, k1, eps, dlt) || checkErr(k2_e, k2, eps, dlt) || checkErr(k3_e, k3, eps_k3, dlt_k3) ||
+ checkErr(p1_e, p1, eps, dlt) || checkErr(p2_e, p2, eps, dlt);
if (fail)
{
// commented according to vp123's recomendation. TODO - improve accuaracy
- //ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); ss
- }
+ //ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); ss
+ }
ts->printf( cvtest::TS::LOG, "%d) DistCoeff exp=(%.2f, %.2f, %.4f, %.4f %.2f)\n", r, k1, k2, p1, p2, k3);
- ts->printf( cvtest::TS::LOG, "%d) DistCoeff est=(%.2f, %.2f, %.4f, %.4f %.2f)\n", r, k1_e, k2_e, p1_e, p2_e, k3_e);
+ ts->printf( cvtest::TS::LOG, "%d) DistCoeff est=(%.2f, %.2f, %.4f, %.4f %.2f)\n", r, k1_e, k2_e, p1_e, p2_e, k3_e);
ts->printf( cvtest::TS::LOG, "%d) AbsError = [%.5f %.5f %.5f %.5f %.5f]\n", r, fabs(k1-k1_e), fabs(k2-k2_e), fabs(p1-p1_e), fabs(p2-p2_e), fabs(k3-k3_e));
}
const Point3d& tvec = *tvecs[i].ptr<Point3d>();
const Point3d& tvec_est = *tvecs_est[i].ptr<Point3d>();
- if (norm(tvec_est - tvec) > eps* (norm(tvec) + dlt))
+ if (norm(tvec_est - tvec) > eps* (norm(tvec) + dlt))
{
if (err_count++ < errMsgNum)
{
- if (err_count == errMsgNum)
- ts->printf( cvtest::TS::LOG, "%d) ...\n", r);
- else
+ if (err_count == errMsgNum)
+ ts->printf( cvtest::TS::LOG, "%d) ...\n", r);
+ else
{
- ts->printf( cvtest::TS::LOG, "%d) Bad accuracy in returned tvecs. Index = %d\n", r, i);
+ ts->printf( cvtest::TS::LOG, "%d) Bad accuracy in returned tvecs. Index = %d\n", r, i);
ts->printf( cvtest::TS::LOG, "%d) norm(tvec_est - tvec) = %f, norm(tvec_exp) = %f \n", r, norm(tvec_est - tvec), norm(tvec));
}
}
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
- }
+ }
}
}
int err_count = 0;
const int errMsgNum = 4;
for(size_t i = 0; i < rvecs.size(); ++i)
- {
+ {
Rodrigues(rvecs[i], rmat);
- Rodrigues(rvecs_est[i], rmat_est);
+ Rodrigues(rvecs_est[i], rmat_est);
if (norm(rmat_est, rmat) > eps* (norm(rmat) + dlt))
{
if (err_count++ < errMsgNum)
{
if (err_count == errMsgNum)
- ts->printf( cvtest::TS::LOG, "%d) ...\n", r);
+ ts->printf( cvtest::TS::LOG, "%d) ...\n", r);
else
{
- ts->printf( cvtest::TS::LOG, "%d) Bad accuracy in returned rvecs (rotation matrs). Index = %d\n", r, i);
- ts->printf( cvtest::TS::LOG, "%d) norm(rot_mat_est - rot_mat_exp) = %f, norm(rot_mat_exp) = %f \n", r, norm(rmat_est, rmat), norm(rmat));
+ ts->printf( cvtest::TS::LOG, "%d) Bad accuracy in returned rvecs (rotation matrs). Index = %d\n", r, i);
+ ts->printf( cvtest::TS::LOG, "%d) norm(rot_mat_est - rot_mat_exp) = %f, norm(rot_mat_exp) = %f \n", r, norm(rmat_est, rmat), norm(rmat));
}
}
}
}
- double reprojectErrorWithoutIntrinsics(const vector<Point3f>& cb3d, const vector<Mat>& rvecs_exp, const vector<Mat>& tvecs_exp,
+ double reprojectErrorWithoutIntrinsics(const vector<Point3f>& cb3d, const vector<Mat>& _rvecs_exp, const vector<Mat>& _tvecs_exp,
const vector<Mat>& rvecs_est, const vector<Mat>& tvecs_est)
- {
+ {
const static Mat eye33 = Mat::eye(3, 3, CV_64F);
const static Mat zero15 = Mat::zeros(1, 5, CV_64F);
- Mat chessboard3D(cb3d);
+ Mat _chessboard3D(cb3d);
vector<Point2f> uv_exp, uv_est;
- double res = 0;
+ double res = 0;
- for(size_t i = 0; i < rvecs_exp.size(); ++i)
- {
- projectPoints(chessboard3D, rvecs_exp[i], tvecs_exp[i], eye33, zero15, uv_exp);
- projectPoints(chessboard3D, rvecs_est[i], tvecs_est[i], eye33, zero15, uv_est);
+ for(size_t i = 0; i < rvecs_exp.size(); ++i)
+ {
+ projectPoints(_chessboard3D, _rvecs_exp[i], _tvecs_exp[i], eye33, zero15, uv_exp);
+ projectPoints(_chessboard3D, rvecs_est[i], tvecs_est[i], eye33, zero15, uv_est);
for(size_t j = 0; j < cb3d.size(); ++j)
res += norm(uv_exp[i] - uv_est[i]);
}
Size2f sqSile;
vector<Point3f> chessboard3D;
- vector<Mat> boards, rvecs_exp, tvecs_exp, rvecs_spnp, tvecs_spnp;
+ vector<Mat> boards, rvecs_exp, tvecs_exp, rvecs_spnp, tvecs_spnp;
vector< vector<Point3f> > objectPoints;
vector< vector<Point2f> > imagePoints_art;
vector< vector<Point2f> > imagePoints_findCb;
imagePoints_findCb.clear();
vector<Point2f> corners_art, corners_fcb;
- for(size_t i = 0; i < brdsNum; ++i)
- {
+ for(size_t i = 0; i < brdsNum; ++i)
+ {
for(;;)
{
boards[i] = cbg(bg, camMat, distCoeffs, sqSile, corners_art);
- if(findChessboardCorners(boards[i], cornersSize, corners_fcb))
- break;
- }
+ if(findChessboardCorners(boards[i], cornersSize, corners_fcb))
+ break;
+ }
//cv::namedWindow("CB"); imshow("CB", boards[i]); cv::waitKey();
- imagePoints_art.push_back(corners_art);
+ imagePoints_art.push_back(corners_art);
imagePoints_findCb.push_back(corners_fcb);
tvecs_exp[i].create(1, 3, CV_64F);
*tvecs_exp[i].ptr<Point3d>() = cbg.corners3d[0];
- rvecs_exp[i] = calcRvec(cbg.corners3d, cbg.cornersSize());
+ rvecs_exp[i] = calcRvec(cbg.corners3d, cbg.cornersSize());
}
}
void runTest(const Size& imgSize, const Mat_<double>& camMat, const Mat_<double>& distCoeffs, size_t brdsNum, const Size& cornersSize, int flag = 0)
- {
+ {
const TermCriteria tc(TermCriteria::EPS|TermCriteria::MAX_ITER, 30, 0.1);
vector< vector<Point2f> > imagePoints;
case JUST_FIND_CORNERS: imagePoints = imagePoints_findCb; break;
case ARTIFICIAL_CORNERS: imagePoints = imagePoints_art; break;
- case USE_CORNERS_SUBPIX:
+ case USE_CORNERS_SUBPIX:
for(size_t i = 0; i < brdsNum; ++i)
- {
+ {
Mat gray;
cvtColor(boards[i], gray, CV_BGR2GRAY);
vector<Point2f> tmp = imagePoints_findCb[i];
break;
case USE_4QUAD_CORNERS:
for(size_t i = 0; i < brdsNum; ++i)
- {
+ {
Mat gray;
- cvtColor(boards[i], gray, CV_BGR2GRAY);
+ cvtColor(boards[i], gray, CV_BGR2GRAY);
vector<Point2f> tmp = imagePoints_findCb[i];
find4QuadCornerSubpix(gray, tmp, Size(5, 5));
imagePoints.push_back(tmp);
default:
throw std::exception();
}
-
+
Mat camMat_est = Mat::eye(3, 3, CV_64F), distCoeffs_est = Mat::zeros(1, 5, CV_64F);
vector<Mat> rvecs_est, tvecs_est;
compareCameraMatrs(camMat, camMat_est);
compareDistCoeffs(distCoeffs, distCoeffs_est);
compareShiftVecs(tvecs_exp, tvecs_est);
- compareRotationVecs(rvecs_exp, rvecs_est);
+ compareRotationVecs(rvecs_exp, rvecs_est);
- double rep_errorWOI = reprojectErrorWithoutIntrinsics(chessboard3D, rvecs_exp, tvecs_exp, rvecs_est, tvecs_est);
+ double rep_errorWOI = reprojectErrorWithoutIntrinsics(chessboard3D, rvecs_exp, tvecs_exp, rvecs_est, tvecs_est);
rep_errorWOI /= brdsNum * cornersSize.area();
const double thres2 = 0.01;
{
ts->printf( cvtest::TS::LOG, "%d) Too big reproject error without intrinsics = %f\n", r, rep_errorWOI);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
- }
-
+ }
+
ts->printf( cvtest::TS::LOG, "%d) Testing solvePnP...\n", r);
rvecs_spnp.resize(brdsNum);
tvecs_spnp.resize(brdsNum);
solvePnP(Mat(objectPoints[i]), Mat(imagePoints[i]), camMat, distCoeffs, rvecs_spnp[i], tvecs_spnp[i]);
compareShiftVecs(tvecs_exp, tvecs_spnp);
- compareRotationVecs(rvecs_exp, rvecs_spnp);
+ compareRotationVecs(rvecs_exp, rvecs_spnp);
}
void run(int)
- {
+ {
ts->set_failed_test_info(cvtest::TS::OK);
RNG& rng = theRNG();
int progress = 0;
int repeat_num = 3;
for(r = 0; r < repeat_num; ++r)
- {
- const int brds_num = 20;
+ {
+ const int brds_num = 20;
- Mat bg(Size(640, 480), CV_8UC3);
- randu(bg, Scalar::all(32), Scalar::all(255));
+ Mat bg(Size(640, 480), CV_8UC3);
+ randu(bg, Scalar::all(32), Scalar::all(255));
GaussianBlur(bg, bg, Size(5, 5), 2);
double fx = 300 + (20 * (double)rng - 10);
Mat_<double> distCoeffs(1, 5, 0.0);
distCoeffs << k1, k2, p1, p2, k3;
- ChessBoardGenerator cbg(Size(9, 8));
+ ChessBoardGenerator cbg(Size(9, 8));
cbg.min_cos = 0.9;
cbg.cov = 0.8;
progress = update_progress(progress, r, repeat_num, 0);
- ts->printf( cvtest::TS::LOG, "\n");
+ ts->printf( cvtest::TS::LOG, "\n");
prepareForTest(bg, camMat, distCoeffs, brds_num, cbg);
- ts->printf( cvtest::TS::LOG, "artificial corners\n");
- runTest(bg.size(), camMat, distCoeffs, brds_num, cbg.cornersSize(), ARTIFICIAL_CORNERS);
+ ts->printf( cvtest::TS::LOG, "artificial corners\n");
+ runTest(bg.size(), camMat, distCoeffs, brds_num, cbg.cornersSize(), ARTIFICIAL_CORNERS);
progress = update_progress(progress, r, repeat_num, 0);
ts->printf( cvtest::TS::LOG, "findChessboard corners\n");
- runTest(bg.size(), camMat, distCoeffs, brds_num, cbg.cornersSize(), JUST_FIND_CORNERS);
+ runTest(bg.size(), camMat, distCoeffs, brds_num, cbg.cornersSize(), JUST_FIND_CORNERS);
progress = update_progress(progress, r, repeat_num, 0);
ts->printf( cvtest::TS::LOG, "cornersSubPix corners\n");
progress = update_progress(progress, r, repeat_num, 0);
}
}
-};
+};
TEST(Calib3d_CalibrateCamera_CPP, accuracy_on_artificial_data) { CV_CalibrateCameraArtificialTest test; test.safe_run(); }
{
Mat rgb( gray.size(), CV_8U);
merge(vector<Mat>(3, gray), rgb);
-
+
for(size_t i = 0; i < v.size(); i++ )
- circle( rgb, v[i], 3, CV_RGB(255, 0, 0), CV_FILLED);
+ circle( rgb, v[i], 3, CV_RGB(255, 0, 0), CV_FILLED);
if( !u.empty() )
{
}
if (!v.empty())
{
- Mat corners((int)v.size(), 1, CV_32FC2, (void*)&v[0]);
+ Mat corners((int)v.size(), 1, CV_32FC2, (void*)&v[0]);
drawChessboardCorners( rgb, pattern_size, corners, was_found );
}
//namedWindow( "test", 0 ); imshow( "test", rgb ); waitKey(0);
//printf("\n");
err = min(err, err1);
}
-
+
#if defined(_L2_ERR)
err = sqrt(err/count_exp);
#endif //_L2_ERR
-
+
return err;
}
/* ///////////////////// chess_corner_test ///////////////////////// */
void CV_ChessboardDetectorTest::run( int /*start_from */)
{
- cvtest::TS& ts = *this->ts;
- ts.set_failed_test_info( cvtest::TS::OK );
+ ts->set_failed_test_info( cvtest::TS::OK );
/*if (!checkByGenerator())
return;*/
{
case CHESSBOARD:
checkByGenerator();
- if (ts.get_err_code() != cvtest::TS::OK)
+ if (ts->get_err_code() != cvtest::TS::OK)
{
break;
}
run_batch("negative_list.dat");
- if (ts.get_err_code() != cvtest::TS::OK)
+ if (ts->get_err_code() != cvtest::TS::OK)
{
break;
}
run_batch("chessboard_list.dat");
- if (ts.get_err_code() != cvtest::TS::OK)
+ if (ts->get_err_code() != cvtest::TS::OK)
{
break;
}
-
+
run_batch("chessboard_list_subpixel.dat");
break;
case CIRCLES_GRID:
void CV_ChessboardDetectorTest::run_batch( const string& filename )
{
- cvtest::TS& ts = *this->ts;
-
- ts.printf(cvtest::TS::LOG, "\nRunning batch %s\n", filename.c_str());
+ ts->printf(cvtest::TS::LOG, "\nRunning batch %s\n", filename.c_str());
//#define WRITE_POINTS 1
-#ifndef WRITE_POINTS
+#ifndef WRITE_POINTS
double max_rough_error = 0, max_precise_error = 0;
#endif
string folder;
switch( pattern )
{
case CHESSBOARD:
- folder = string(ts.get_data_path()) + "cameracalibration/";
+ folder = string(ts->get_data_path()) + "cameracalibration/";
break;
case CIRCLES_GRID:
- folder = string(ts.get_data_path()) + "cameracalibration/circles/";
+ folder = string(ts->get_data_path()) + "cameracalibration/circles/";
break;
case ASYMMETRIC_CIRCLES_GRID:
- folder = string(ts.get_data_path()) + "cameracalibration/asymmetric_circles/";
+ folder = string(ts->get_data_path()) + "cameracalibration/asymmetric_circles/";
break;
}
FileStorage fs( folder + filename, FileStorage::READ );
FileNode board_list = fs["boards"];
-
+
if( !fs.isOpened() || board_list.empty() || !board_list.isSeq() || board_list.size() % 2 != 0 )
{
- ts.printf( cvtest::TS::LOG, "%s can not be readed or is not valid\n", (folder + filename).c_str() );
- ts.printf( cvtest::TS::LOG, "fs.isOpened=%d, board_list.empty=%d, board_list.isSeq=%d,board_list.size()%2=%d\n",
+ ts->printf( cvtest::TS::LOG, "%s can not be readed or is not valid\n", (folder + filename).c_str() );
+ ts->printf( cvtest::TS::LOG, "fs.isOpened=%d, board_list.empty=%d, board_list.isSeq=%d,board_list.size()%2=%d\n",
fs.isOpened(), (int)board_list.empty(), board_list.isSeq(), board_list.size()%2);
- ts.set_failed_test_info( cvtest::TS::FAIL_MISSING_TEST_DATA );
+ ts->set_failed_test_info( cvtest::TS::FAIL_MISSING_TEST_DATA );
return;
}
for(int idx = 0; idx < max_idx; ++idx )
{
- ts.update_context( this, idx, true );
-
+ ts->update_context( this, idx, true );
+
/* read the image */
- string img_file = board_list[idx * 2];
+ string img_file = board_list[idx * 2];
Mat gray = imread( folder + img_file, 0);
-
+
if( gray.empty() )
{
- ts.printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", img_file.c_str() );
- ts.set_failed_test_info( cvtest::TS::FAIL_MISSING_TEST_DATA );
+ ts->printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", img_file.c_str() );
+ ts->set_failed_test_info( cvtest::TS::FAIL_MISSING_TEST_DATA );
return;
}
- string filename = folder + (string)board_list[idx * 2 + 1];
+ string _filename = folder + (string)board_list[idx * 2 + 1];
bool doesContatinChessboard;
Mat expected;
{
- FileStorage fs(filename, FileStorage::READ);
- fs["corners"] >> expected;
- fs["isFound"] >> doesContatinChessboard;
- fs.release();
- }
- size_t count_exp = static_cast<size_t>(expected.cols * expected.rows);
+ FileStorage fs1(_filename, FileStorage::READ);
+ fs1["corners"] >> expected;
+ fs1["isFound"] >> doesContatinChessboard;
+ fs1.release();
+ }
+ size_t count_exp = static_cast<size_t>(expected.cols * expected.rows);
Size pattern_size = expected.size();
vector<Point2f> v;
break;
}
show_points( gray, Mat(), v, pattern_size, result );
-
+
if( result ^ doesContatinChessboard || v.size() != count_exp )
{
- ts.printf( cvtest::TS::LOG, "chessboard is detected incorrectly in %s\n", img_file.c_str() );
- ts.set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
+ ts->printf( cvtest::TS::LOG, "chessboard is detected incorrectly in %s\n", img_file.c_str() );
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
return;
}
#if 1
if( err > precise_success_error_level )
{
- ts.printf( cvtest::TS::LOG, "Image %s: bad accuracy of adjusted corners %f\n", img_file.c_str(), err );
- ts.set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
+ ts->printf( cvtest::TS::LOG, "Image %s: bad accuracy of adjusted corners %f\n", img_file.c_str(), err );
+ ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
return;
}
#endif
- ts.printf(cvtest::TS::LOG, "Error on %s is %f\n", img_file.c_str(), err);
+ ts->printf(cvtest::TS::LOG, "Error on %s is %f\n", img_file.c_str(), err);
max_precise_error = MAX( max_precise_error, err );
-#endif
+#endif
}
#ifdef WRITE_POINTS
Mat mat_v(pattern_size, CV_32FC2, (void*)&v[0]);
- FileStorage fs(filename, FileStorage::WRITE);
+ FileStorage fs(_filename, FileStorage::WRITE);
fs << "isFound" << result;
fs << "corners" << mat_v;
fs.release();
#endif
progress = update_progress( progress, idx, max_idx, 0 );
- }
-
+ }
+
sum_error /= count;
- ts.printf(cvtest::TS::LOG, "Average error is %f\n", sum_error);
+ ts->printf(cvtest::TS::LOG, "Average error is %f\n", sum_error);
}
double calcErrorMinError(const Size& cornSz, const vector<Point2f>& corners_found, const vector<Point2f>& corners_generated)
{
- Mat m1(cornSz, CV_32FC2, (Point2f*)&corners_generated[0]);
+ Mat m1(cornSz, CV_32FC2, (Point2f*)&corners_generated[0]);
Mat m2; flip(m1, m2, 0);
Mat m3; flip(m1, m3, 1); m3 = m3.t(); flip(m3, m3, 1);
-
+
Mat m4 = m1.t(); flip(m4, m4, 1);
- double min1 = min(calcError(corners_found, m1), calcError(corners_found, m2));
- double min2 = min(calcError(corners_found, m3), calcError(corners_found, m4));
+ double min1 = min(calcError(corners_found, m1), calcError(corners_found, m2));
+ double min2 = min(calcError(corners_found, m3), calcError(corners_found, m4));
return min(min1, min2);
}
-bool validateData(const ChessBoardGenerator& cbg, const Size& imgSz,
+bool validateData(const ChessBoardGenerator& cbg, const Size& imgSz,
const vector<Point2f>& corners_generated)
{
Size cornersSize = cbg.cornersSize();
for(int j = 1; j < mat.cols - 2; ++j)
{
const Point2f& cur = mat(i, j);
-
+
tmp = norm( cur - mat(i + 1, j + 1) );
if (tmp < minNeibDist)
tmp = minNeibDist;
const double threshold = 0.25;
double cbsize = (max(cornersSize.width, cornersSize.height) + 1) * minNeibDist;
- int imgsize = min(imgSz.height, imgSz.width);
+ int imgsize = min(imgSz.height, imgSz.width);
return imgsize * threshold < cbsize;
}
bool CV_ChessboardDetectorTest::checkByGenerator()
-{
+{
bool res = true;
//theRNG() = 0x58e6e895b9913160;
//cv::DefaultRngAuto dra;
//theRNG() = *ts->get_rng();
- Mat bg(Size(800, 600), CV_8UC3, Scalar::all(255));
- randu(bg, Scalar::all(0), Scalar::all(255));
- GaussianBlur(bg, bg, Size(7,7), 3.0);
-
+ Mat bg(Size(800, 600), CV_8UC3, Scalar::all(255));
+ randu(bg, Scalar::all(0), Scalar::all(255));
+ GaussianBlur(bg, bg, Size(7,7), 3.0);
+
Mat_<float> camMat(3, 3);
camMat << 300.f, 0.f, bg.cols/2.f, 0, 300.f, bg.rows/2.f, 0.f, 0.f, 1.f;
-
+
Mat_<float> distCoeffs(1, 5);
distCoeffs << 1.2f, 0.2f, 0.f, 0.f, 0.f;
const Size sizes[] = { Size(6, 6), Size(8, 6), Size(11, 12), Size(5, 4) };
- const size_t sizes_num = sizeof(sizes)/sizeof(sizes[0]);
- const int test_num = 16;
+ const size_t sizes_num = sizeof(sizes)/sizeof(sizes[0]);
+ const int test_num = 16;
int progress = 0;
for(int i = 0; i < test_num; ++i)
- {
+ {
progress = update_progress( progress, i, test_num, 0 );
ChessBoardGenerator cbg(sizes[i % sizes_num]);
if(!validateData(cbg, cb.size(), corners_generated))
{
ts->printf( cvtest::TS::LOG, "Chess board skipped - too small" );
- continue;
+ continue;
}
- /*cb = cb * 0.8 + Scalar::all(30);
+ /*cb = cb * 0.8 + Scalar::all(30);
GaussianBlur(cb, cb, Size(3, 3), 0.8); */
- //cv::addWeighted(cb, 0.8, bg, 0.2, 20, cb);
+ //cv::addWeighted(cb, 0.8, bg, 0.2, 20, cb);
//cv::namedWindow("CB"); cv::imshow("CB", cb); cv::waitKey();
-
+
vector<Point2f> corners_found;
int flags = i % 8; // need to check branches for all flags
bool found = findChessboardCorners(cb, cbg.cornersSize(), corners_found, flags);
- if (!found)
- {
+ if (!found)
+ {
ts->printf( cvtest::TS::LOG, "Chess board corners not found\n" );
ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
res = false;
- return res;
+ return res;
}
- double err = calcErrorMinError(cbg.cornersSize(), corners_found, corners_generated);
+ double err = calcErrorMinError(cbg.cornersSize(), corners_found, corners_generated);
if( err > rough_success_error_level )
{
ts->printf( cvtest::TS::LOG, "bad accuracy of corner guesses" );
ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
res = false;
return res;
- }
- }
+ }
+ }
/* ***** negative ***** */
- {
+ {
vector<Point2f> corners_found;
bool found = findChessboardCorners(bg, Size(8, 7), corners_found);
if (found)
ChessBoardGenerator cbg(Size(8, 7));
vector<Point2f> cg;
- Mat cb = cbg(bg, camMat, distCoeffs, cg);
+ Mat cb = cbg(bg, camMat, distCoeffs, cg);
found = findChessboardCorners(cb, Size(3, 4), corners_found);
if (found)
- res = false;
+ res = false;
Point2f c = std::accumulate(cg.begin(), cg.end(), Point2f(), plus<Point2f>()) * (1.f/cg.size());
Mat_<double> aff(2, 3);
aff << 1.0, 0.0, -(double)c.x, 0.0, 1.0, 0.0;
Mat sh;
- warpAffine(cb, sh, aff, cb.size());
+ warpAffine(cb, sh, aff, cb.size());
found = findChessboardCorners(sh, cbg.cornersSize(), corners_found);
if (found)
- res = false;
-
+ res = false;
+
vector< vector<Point> > cnts(1);
vector<Point>& cnt = cnts[0];
- cnt.push_back(cg[ 0]); cnt.push_back(cg[0+2]);
- cnt.push_back(cg[7+0]); cnt.push_back(cg[7+2]);
+ cnt.push_back(cg[ 0]); cnt.push_back(cg[0+2]);
+ cnt.push_back(cg[7+0]); cnt.push_back(cg[7+2]);
cv::drawContours(cb, cnts, -1, Scalar::all(128), CV_FILLED);
found = findChessboardCorners(cb, cbg.cornersSize(), corners_found);
cv::drawChessboardCorners(cb, cbg.cornersSize(), Mat(corners_found), found);
}
-
+
return res;
}
class Differential
{
-public:
- typedef Mat_<double> mat_t;
+public:
+ typedef Mat_<double> mat_t;
- Differential(double eps_, const mat_t& rv1_, const mat_t& tv1_, const mat_t& rv2_, const mat_t& tv2_)
+ Differential(double eps_, const mat_t& rv1_, const mat_t& tv1_, const mat_t& rv2_, const mat_t& tv2_)
: rv1(rv1_), tv1(tv1_), rv2(rv2_), tv2(tv2_), eps(eps_), ev(3, 1) {}
void dRv1(mat_t& dr3_dr1, mat_t& dt3_dr1)
- {
+ {
dr3_dr1.create(3, 3); dt3_dr1.create(3, 3);
-
- for(int i = 0; i < 3; ++i)
+
+ for(int i = 0; i < 3; ++i)
{
- ev.setTo(Scalar(0)); ev(i, 0) = eps;
-
- composeRT( rv1 + ev, tv1, rv2, tv2, rv3_p, tv3_p);
+ ev.setTo(Scalar(0)); ev(i, 0) = eps;
+
+ composeRT( rv1 + ev, tv1, rv2, tv2, rv3_p, tv3_p);
composeRT( rv1 - ev, tv1, rv2, tv2, rv3_m, tv3_m);
- dr3_dr1.col(i) = rv3_p - rv3_m;
- dt3_dr1.col(i) = tv3_p - tv3_m;
+ dr3_dr1.col(i) = rv3_p - rv3_m;
+ dt3_dr1.col(i) = tv3_p - tv3_m;
}
dr3_dr1 /= 2 * eps; dt3_dr1 /= 2 * eps;
}
void dRv2(mat_t& dr3_dr2, mat_t& dt3_dr2)
- {
+ {
dr3_dr2.create(3, 3); dt3_dr2.create(3, 3);
-
- for(int i = 0; i < 3; ++i)
+
+ for(int i = 0; i < 3; ++i)
{
- ev.setTo(Scalar(0)); ev(i, 0) = eps;
-
- composeRT( rv1, tv1, rv2 + ev, tv2, rv3_p, tv3_p);
+ ev.setTo(Scalar(0)); ev(i, 0) = eps;
+
+ composeRT( rv1, tv1, rv2 + ev, tv2, rv3_p, tv3_p);
composeRT( rv1, tv1, rv2 - ev, tv2, rv3_m, tv3_m);
- dr3_dr2.col(i) = rv3_p - rv3_m;
- dt3_dr2.col(i) = tv3_p - tv3_m;
+ dr3_dr2.col(i) = rv3_p - rv3_m;
+ dt3_dr2.col(i) = tv3_p - tv3_m;
}
dr3_dr2 /= 2 * eps; dt3_dr2 /= 2 * eps;
}
void dTv1(mat_t& drt3_dt1, mat_t& dt3_dt1)
- {
+ {
drt3_dt1.create(3, 3); dt3_dt1.create(3, 3);
-
- for(int i = 0; i < 3; ++i)
+
+ for(int i = 0; i < 3; ++i)
{
- ev.setTo(Scalar(0)); ev(i, 0) = eps;
-
- composeRT( rv1, tv1 + ev, rv2, tv2, rv3_p, tv3_p);
+ ev.setTo(Scalar(0)); ev(i, 0) = eps;
+
+ composeRT( rv1, tv1 + ev, rv2, tv2, rv3_p, tv3_p);
composeRT( rv1, tv1 - ev, rv2, tv2, rv3_m, tv3_m);
- drt3_dt1.col(i) = rv3_p - rv3_m;
- dt3_dt1.col(i) = tv3_p - tv3_m;
+ drt3_dt1.col(i) = rv3_p - rv3_m;
+ dt3_dt1.col(i) = tv3_p - tv3_m;
}
drt3_dt1 /= 2 * eps; dt3_dt1 /= 2 * eps;
}
void dTv2(mat_t& dr3_dt2, mat_t& dt3_dt2)
- {
+ {
dr3_dt2.create(3, 3); dt3_dt2.create(3, 3);
-
- for(int i = 0; i < 3; ++i)
+
+ for(int i = 0; i < 3; ++i)
{
- ev.setTo(Scalar(0)); ev(i, 0) = eps;
-
- composeRT( rv1, tv1, rv2, tv2 + ev, rv3_p, tv3_p);
+ ev.setTo(Scalar(0)); ev(i, 0) = eps;
+
+ composeRT( rv1, tv1, rv2, tv2 + ev, rv3_p, tv3_p);
composeRT( rv1, tv1, rv2, tv2 - ev, rv3_m, tv3_m);
- dr3_dt2.col(i) = rv3_p - rv3_m;
- dt3_dt2.col(i) = tv3_p - tv3_m;
+ dr3_dt2.col(i) = rv3_p - rv3_m;
+ dt3_dt2.col(i) = tv3_p - tv3_m;
}
dr3_dt2 /= 2 * eps; dt3_dt2 /= 2 * eps;
}
-
+
private:
const mat_t& rv1, tv1, rv2, tv2;
double eps;
Mat_<double> ev;
-
+
Differential& operator=(const Differential&);
- Mat rv3_m, tv3_m, rv3_p, tv3_p;
+ Mat rv3_m, tv3_m, rv3_p, tv3_p;
};
class CV_composeRT_Test : public cvtest::BaseTest
public:
CV_composeRT_Test() {}
~CV_composeRT_Test() {}
-protected:
-
+protected:
+
void run(int)
{
- cvtest::TS& ts = *this->ts;
- ts.set_failed_test_info(cvtest::TS::OK);
-
- Mat_<double> rvec1(3, 1), tvec1(3, 1), rvec2(3, 1), tvec2(3, 1);
+ ts->set_failed_test_info(cvtest::TS::OK);
+
+ Mat_<double> rvec1(3, 1), tvec1(3, 1), rvec2(3, 1), tvec2(3, 1);
randu(rvec1, Scalar(0), Scalar(6.29));
randu(rvec2, Scalar(0), Scalar(6.29));
randu(tvec1, Scalar(-2), Scalar(2));
randu(tvec2, Scalar(-2), Scalar(2));
-
+
Mat rvec3, tvec3;
composeRT(rvec1, tvec1, rvec2, tvec2, rvec3, tvec3);
-
+
Mat rvec3_exp, tvec3_exp;
Mat rmat1, rmat2;
const double thres = 1e-5;
if (norm(rvec3_exp, rvec3) > thres || norm(tvec3_exp, tvec3) > thres)
- ts.set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
+ ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
const double eps = 1e-3;
Differential diff(eps, rvec1, tvec1, rvec2, tvec2);
-
+
Mat dr3dr1, dr3dt1, dr3dr2, dr3dt2, dt3dr1, dt3dt1, dt3dr2, dt3dt2;
- composeRT(rvec1, tvec1, rvec2, tvec2, rvec3, tvec3,
+ composeRT(rvec1, tvec1, rvec2, tvec2, rvec3, tvec3,
dr3dr1, dr3dt1, dr3dr2, dr3dt2, dt3dr1, dt3dt1, dt3dr2, dt3dt2);
-
+
Mat_<double> dr3_dr1, dt3_dr1;
diff.dRv1(dr3_dr1, dt3_dr1);
if (norm(dr3_dr1, dr3dr1) > thres || norm(dt3_dr1, dt3dr1) > thres)
- {
- ts.printf( cvtest::TS::LOG, "Invalid derivates by r1\n" );
- ts.set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
+ {
+ ts->printf( cvtest::TS::LOG, "Invalid derivates by r1\n" );
+ ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
}
Mat_<double> dr3_dr2, dt3_dr2;
diff.dRv2(dr3_dr2, dt3_dr2);
if (norm(dr3_dr2, dr3dr2) > thres || norm(dt3_dr2, dt3dr2) > thres)
- {
- ts.printf( cvtest::TS::LOG, "Invalid derivates by r2\n" );
- ts.set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
+ {
+ ts->printf( cvtest::TS::LOG, "Invalid derivates by r2\n" );
+ ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
}
Mat_<double> dr3_dt1, dt3_dt1;
diff.dTv1(dr3_dt1, dt3_dt1);
if (norm(dr3_dt1, dr3dt1) > thres || norm(dt3_dt1, dt3dt1) > thres)
- {
- ts.printf( cvtest::TS::LOG, "Invalid derivates by t1\n" );
- ts.set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
+ {
+ ts->printf( cvtest::TS::LOG, "Invalid derivates by t1\n" );
+ ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
}
-
+
Mat_<double> dr3_dt2, dt3_dt2;
diff.dTv2(dr3_dt2, dt3_dt2);
if (norm(dr3_dt2, dr3dt2) > thres || norm(dt3_dt2, dt3dt2) > thres)
- {
- ts.printf( cvtest::TS::LOG, "Invalid derivates by t2\n" );
- ts.set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
+ {
+ ts->printf( cvtest::TS::LOG, "Invalid derivates by t2\n" );
+ ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
}
- }
-};
-
+ }
+};
+
TEST(Calib3d_ComposeRT, accuracy) { CV_composeRT_Test test; test.safe_run(); }
double sigma;\r
\r
private:\r
- float max_diff, max_2diff;\r
- bool check_matrix_size(const cv::Mat& H);\r
- bool check_matrix_diff(const cv::Mat& original, const cv::Mat& found, const int norm_type, double &diff);\r
+ float max_diff, max_2diff;\r
+ bool check_matrix_size(const cv::Mat& H);\r
+ bool check_matrix_diff(const cv::Mat& original, const cv::Mat& found, const int norm_type, double &diff);\r
int check_ransac_mask_1(const Mat& src, const Mat& mask);\r
- int check_ransac_mask_2(const Mat& original_mask, const Mat& found_mask);\r
-\r
- void print_information_1(int j, int N, int method, const Mat& H);\r
- void print_information_2(int j, int N, int method, const Mat& H, const Mat& H_res, int k, double diff);\r
- void print_information_3(int j, int N, const Mat& mask);\r
- void print_information_4(int method, int j, int N, int k, int l, double diff);\r
- void print_information_5(int method, int j, int N, int l, double diff);\r
- void print_information_6(int j, int N, int k, double diff, bool value);\r
- void print_information_7(int j, int N, int k, double diff, bool original_value, bool found_value);\r
- void print_information_8(int j, int N, int k, int l, double diff);\r
+ int check_ransac_mask_2(const Mat& original_mask, const Mat& found_mask);\r
+\r
+ void print_information_1(int j, int N, int method, const Mat& H);\r
+ void print_information_2(int j, int N, int method, const Mat& H, const Mat& H_res, int k, double diff);\r
+ void print_information_3(int j, int N, const Mat& mask);\r
+ void print_information_4(int method, int j, int N, int k, int l, double diff);\r
+ void print_information_5(int method, int j, int N, int l, double diff);\r
+ void print_information_6(int j, int N, int k, double diff, bool value);\r
+ void print_information_7(int j, int N, int k, double diff, bool original_value, bool found_value);\r
+ void print_information_8(int j, int N, int k, int l, double diff);\r
};\r
\r
CV_HomographyTest::CV_HomographyTest() : max_diff(1e-2f), max_2diff(2e-2f)\r
\r
CV_HomographyTest::~CV_HomographyTest() {}\r
\r
-bool CV_HomographyTest::check_matrix_size(const cv::Mat& H) \r
+bool CV_HomographyTest::check_matrix_size(const cv::Mat& H)\r
{\r
return (H.rows == 3) && (H.cols == 3);\r
}\r
return 0;\r
}\r
\r
-void CV_HomographyTest::print_information_1(int j, int N, int method, const Mat& H)\r
+void CV_HomographyTest::print_information_1(int j, int N, int _method, const Mat& H)\r
{\r
cout << endl; cout << "Checking for homography matrix sizes..." << endl; cout << endl;\r
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";\r
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;\r
cout << "Count of points: " << N << endl; cout << endl;\r
- cout << "Method: "; if (method == 0) cout << 0; else if (method == 8) cout << "RANSAC"; else cout << "LMEDS"; cout << endl;\r
+ cout << "Method: "; if (_method == 0) cout << 0; else if (_method == 8) cout << "RANSAC"; else cout << "LMEDS"; cout << endl;\r
cout << "Homography matrix:" << endl; cout << endl;\r
cout << H << endl; cout << endl;\r
cout << "Number of rows: " << H.rows << " Number of cols: " << H.cols << endl; cout << endl;\r
}\r
\r
-void CV_HomographyTest::print_information_2(int j, int N, int method, const Mat& H, const Mat& H_res, int k, double diff)\r
+void CV_HomographyTest::print_information_2(int j, int N, int _method, const Mat& H, const Mat& H_res, int k, double diff)\r
{\r
cout << endl; cout << "Checking for accuracy of homography matrix computing..." << endl; cout << endl;\r
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";\r
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;\r
cout << "Count of points: " << N << endl; cout << endl;\r
- cout << "Method: "; if (method == 0) cout << 0; else if (method == 8) cout << "RANSAC"; else cout << "LMEDS"; cout << endl;\r
+ cout << "Method: "; if (_method == 0) cout << 0; else if (_method == 8) cout << "RANSAC"; else cout << "LMEDS"; cout << endl;\r
cout << "Original matrix:" << endl; cout << endl;\r
cout << H << endl; cout << endl;\r
cout << "Found matrix:" << endl; cout << endl;\r
cout << "Number of rows: " << mask.rows << " Number of cols: " << mask.cols << endl; cout << endl;\r
}\r
\r
-void CV_HomographyTest::print_information_4(int method, int j, int N, int k, int l, double diff)\r
+void CV_HomographyTest::print_information_4(int _method, int j, int N, int k, int l, double diff)\r
{\r
cout << endl; cout << "Checking for accuracy of reprojection error computing..." << endl; cout << endl;\r
- cout << "Method: "; if (method == 0) cout << 0 << endl; else cout << "CV_LMEDS" << endl;\r
+ cout << "Method: "; if (_method == 0) cout << 0 << endl; else cout << "CV_LMEDS" << endl;\r
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";\r
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;\r
cout << "Sigma of normal noise: " << sigma << endl;\r
cout << "Maxumum allowed difference: " << max_2diff << endl; cout << endl;\r
}\r
\r
-void CV_HomographyTest::print_information_5(int method, int j, int N, int l, double diff)\r
-{ \r
+void CV_HomographyTest::print_information_5(int _method, int j, int N, int l, double diff)\r
+{\r
cout << endl; cout << "Checking for accuracy of reprojection error computing..." << endl; cout << endl;\r
- cout << "Method: "; if (method == 0) cout << 0 << endl; else cout << "CV_LMEDS" << endl;\r
+ cout << "Method: "; if (_method == 0) cout << 0 << endl; else cout << "CV_LMEDS" << endl;\r
cout << "Type of srcPoints: "; if ((j>-1) && (j<2)) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>";\r
cout << " Type of dstPoints: "; if (j % 2 == 0) cout << "Mat of CV_32FC2"; else cout << "vector <Point2f>"; cout << endl;\r
cout << "Sigma of normal noise: " << sigma << endl;\r
if (code)\r
{\r
print_information_3(j, N, mask[j]);\r
- \r
+\r
switch (code)\r
{\r
case 1: { CV_Error(CALIB3D_HOMOGRAPHY_ERROR_RANSAC_MASK, MESSAGE_RANSAC_MASK_1); break; }\r
\r
default: break;\r
}\r
- \r
+\r
return;\r
}\r
\r
{\r
case 0:\r
case CV_LMEDS:\r
- {\r
+ {\r
Mat H_res_64 [4] = { cv::findHomography(src_mat_2f, dst_mat_2f),\r
cv::findHomography(src_mat_2f, dst_vec),\r
cv::findHomography(src_vec, dst_mat_2f),\r
}\r
\r
continue;\r
- }\r
+ }\r
case CV_RANSAC:\r
{\r
cv::Mat mask_res [4];\r
}\r
}\r
}\r
- \r
+\r
continue;\r
}\r
\r
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
}
}
- virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, const double* eps, double& maxError)
+ virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, const double* epsilon, double& maxError)
{
Mat rvec, tvec;
vector<int> inliers;
bool isTestSuccess = inliers.size() >= points.size()*0.95;
double rvecDiff = norm(rvec-trueRvec), tvecDiff = norm(tvec-trueTvec);
- isTestSuccess = isTestSuccess && rvecDiff < eps[method] && tvecDiff < eps[method];
+ isTestSuccess = isTestSuccess && rvecDiff < epsilon[method] && tvecDiff < epsilon[method];
double error = rvecDiff > tvecDiff ? rvecDiff : tvecDiff;
//cout << error << " " << inliers.size() << " " << eps[method] << endl;
if (error > maxError)
void run(int)
{
- cvtest::TS& ts = *this->ts;
- ts.set_failed_test_info(cvtest::TS::OK);
+ ts->set_failed_test_info(cvtest::TS::OK);
vector<Point3f> points;
const int pointsCount = 500;
const int methodsCount = 3;
- RNG rng = ts.get_rng();
+ RNG rng = ts->get_rng();
for (int mode = 0; mode < 2; mode++)
//cout << maxError << " " << successfulTestsCount << endl;
if (successfulTestsCount < 0.7*totalTestsCount)
{
- ts.printf( cvtest::TS::LOG, "Invalid accuracy for method %d, failed %d tests from %d, maximum error equals %f, distortion mode equals %d\n",
+ ts->printf( cvtest::TS::LOG, "Invalid accuracy for method %d, failed %d tests from %d, maximum error equals %f, distortion mode equals %d\n",
method, totalTestsCount - successfulTestsCount, totalTestsCount, maxError, mode);
- ts.set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
+ ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
}
}
}
~CV_solvePnP_Test() {}
protected:
- virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, const double* eps, double& maxError)
+ virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, const double* epsilon, double& maxError)
{
Mat rvec, tvec;
Mat trueRvec, trueTvec;
false, method);
double rvecDiff = norm(rvec-trueRvec), tvecDiff = norm(tvec-trueTvec);
- bool isTestSuccess = rvecDiff < eps[method] && tvecDiff < eps[method];
+ bool isTestSuccess = rvecDiff < epsilon[method] && tvecDiff < epsilon[method];
double error = rvecDiff > tvecDiff ? rvecDiff : tvecDiff;
if (error > maxError)
ts->set_failed_test_info( code );
return;
}
-
+
string fullResultFilename = dataPath + ALGORITHMS_DIR + algorithmName + RESULT_FILE;
FileStorage resFS( fullResultFilename, FileStorage::READ );
bool isWrite = true; // write or compare results
assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=3 )
{
- string name = fn[i];
+ string _name = fn[i];
DatasetParams params;
string sf = fn[i+1]; params.dispScaleFactor = atoi(sf.c_str());
string uv = fn[i+2]; params.dispUnknVal = atoi(uv.c_str());
- datasetsParams[name] = params;
+ datasetsParams[_name] = params;
}
return cvtest::TS::OK;
}
public:
CV_StereoSGBMTest()
{
- name = "stereosgbm";
+ name = "stereosgbm";
fill(rmsEps.begin(), rmsEps.end(), 0.25f);
fill(fracEps.begin(), fracEps.end(), 0.01f);
}
GSD_INTENSITY_LT = 15,
GSD_INTENSITY_UT = 250
};
-
+
class CV_EXPORTS Histogram
{
private:
enum {
HistogramSize = (GSD_HUE_UT - GSD_HUE_LT + 1)
};
-
+
protected:
int findCoverageIndex(double surfaceToCover, int defaultValue = 0);
-
+
public:
CvHistogram *fHistogram;
Histogram();
virtual ~Histogram();
-
+
void findCurveThresholds(int &x1, int &x2, double percent = 0.05);
void mergeWith(Histogram *source, double weight);
};
-
+
int nStartCounter, nFrameCount, nSkinHueLowerBound, nSkinHueUpperBound, nMorphingMethod, nSamplingDivider;
double fHistogramMergeFactor, fHuePercentCovered;
Histogram histogramHueMotion, skinHueHistogram;
IplImage *imgHueFrame, *imgSaturationFrame, *imgLastGrayFrame, *imgMotionFrame, *imgFilteredFrame;
IplImage *imgShrinked, *imgTemp, *imgGrayFrame, *imgHSVFrame;
-
+
protected:
void initData(IplImage *src, int widthDivider, int heightDivider);
void adaptiveFilter();
-
+
public:
-
+
enum {
MORPHING_METHOD_NONE = 0,
MORPHING_METHOD_ERODE = 1,
MORPHING_METHOD_ERODE_ERODE = 2,
MORPHING_METHOD_ERODE_DILATE = 3
};
-
+
CvAdaptiveSkinDetector(int samplingDivider = 1, int morphingMethod = MORPHING_METHOD_NONE);
virtual ~CvAdaptiveSkinDetector();
-
+
virtual void process(IplImage *inputBGRImage, IplImage *outputHueMask);
};
class CV_EXPORTS CvFuzzyPoint {
public:
double x, y, value;
-
+
CvFuzzyPoint(double _x, double _y);
};
private:
std::vector<CvFuzzyPoint> points;
double value, centre;
-
+
bool between(double x, double x1, double x2);
-
+
public:
CvFuzzyCurve();
~CvFuzzyCurve();
-
+
void setCentre(double _centre);
double getCentre();
void clear();
class CV_EXPORTS CvFuzzyFunction {
public:
std::vector<CvFuzzyCurve> curves;
-
+
CvFuzzyFunction();
~CvFuzzyFunction();
void addCurve(CvFuzzyCurve *curve, double value = 0);
FuzzyResizer();
int calcOutput(double edgeDensity, double density);
};
-
+
class SearchWindow
{
public:
double density;
unsigned int depthLow, depthHigh;
int verticalEdgeLeft, verticalEdgeRight, horizontalEdgeTop, horizontalEdgeBottom;
-
+
SearchWindow();
~SearchWindow();
void setSize(int _x, int _y, int _width, int _height);
void getResizeAttribsEdgeDensityFuzzy(int &resizeDx, int &resizeDy, int &resizeDw, int &resizeDh);
bool meanShift(IplImage *maskImage, IplImage *depthMap, int maxIteration, bool initDepth);
};
-
+
public:
enum TrackingState
{
tsSetWindow = 3,
tsDisabled = 10
};
-
+
enum ResizeMethod {
rmEdgeDensityLinear = 0,
rmEdgeDensityFuzzy = 1,
rmInnerDensity = 2
};
-
+
enum {
MinKernelMass = 1000
};
-
+
SearchWindow kernel;
int searchMode;
-
+
private:
enum
{
MaxMeanShiftIteration = 5,
MaxSetSizeIteration = 5
};
-
+
void findOptimumSearchWindow(SearchWindow &searchWindow, IplImage *maskImage, IplImage *depthMap, int maxIteration, int resizeMethod, bool initDepth);
-
+
public:
CvFuzzyMeanShiftTracker();
~CvFuzzyMeanShiftTracker();
-
+
void track(IplImage *maskImage, IplImage *depthMap, int resizeMethod, bool resetSearch, int minKernelMass = MinKernelMass);
};
namespace cv
{
-
+
class CV_EXPORTS Octree
{
public:
bool isLeaf;
int children[8];
};
-
+
Octree();
Octree( const vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
virtual ~Octree();
-
+
virtual void buildTree( const vector<Point3f>& points, int maxLevels = 10, int minPoints = 20 );
virtual void getPointsWithinSphere( const Point3f& center, float radius,
vector<Point3f>& points ) const;
int minPoints;
vector<Point3f> points;
vector<Node> nodes;
-
+
virtual void buildNext(size_t node_ind);
};
-
-
+
+
class CV_EXPORTS Mesh3D
{
public:
struct EmptyMeshException {};
-
+
Mesh3D();
Mesh3D(const vector<Point3f>& vtx);
~Mesh3D();
-
+
void buildOctree();
void clearOctree();
float estimateResolution(float tryRatio = 0.1f);
void computeNormals(float normalRadius, int minNeighbors = 20);
void computeNormals(const vector<int>& subset, float normalRadius, int minNeighbors = 20);
-
+
void writeAsVrml(const String& file, const vector<Scalar>& colors = vector<Scalar>()) const;
-
+
vector<Point3f> vtx;
vector<Point3f> normals;
float resolution;
Octree octree;
-
+
const static Point3f allzero;
};
-
+
class CV_EXPORTS SpinImageModel
{
public:
-
+
/* model parameters, leave unset for default or auto estimate */
float normalRadius;
int minNeighbors;
-
+
float binSize;
int imageWidth;
-
+
float lambda;
float gamma;
-
+
float T_GeometriccConsistency;
float T_GroupingCorespondances;
-
+
/* public interface */
SpinImageModel();
explicit SpinImageModel(const Mesh3D& mesh);
~SpinImageModel();
-
+
void setLogger(std::ostream* log);
void selectRandomSubset(float ratio);
void setSubset(const vector<int>& subset);
void compute();
-
+
void match(const SpinImageModel& scene, vector< vector<Vec2i> >& result);
-
+
Mat packRandomScaledSpins(bool separateScale = false, size_t xCount = 10, size_t yCount = 10) const;
-
+
size_t getSpinCount() const { return spinImages.rows; }
Mat getSpinImage(size_t index) const { return spinImages.row((int)index); }
const Point3f& getSpinVertex(size_t index) const { return mesh.vtx[subset[index]]; }
const Point3f& getSpinNormal(size_t index) const { return mesh.normals[subset[index]]; }
-
+
const Mesh3D& getMesh() const { return mesh; }
Mesh3D& getMesh() { return mesh; }
-
+
/* static utility functions */
static bool spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result);
-
+
static Point2f calcSpinMapCoo(const Point3f& point, const Point3f& vertex, const Point3f& normal);
-
+
static float geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1,
const Point3f& pointModel1, const Point3f& normalModel1,
const Point3f& pointScene2, const Point3f& normalScene2,
const Point3f& pointModel2, const Point3f& normalModel2);
-
+
static float groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1,
const Point3f& pointModel1, const Point3f& normalModel1,
const Point3f& pointScene2, const Point3f& normalScene2,
float gamma);
protected:
void defaultParams();
-
+
void matchSpinToModel(const Mat& spin, vector<int>& indeces,
vector<float>& corrCoeffs, bool useExtremeOutliers = true) const;
-
+
void repackSpinImages(const vector<uchar>& mask, Mat& spinImages, bool reAlloc = true) const;
-
+
vector<int> subset;
Mesh3D mesh;
Mat spinImages;
std::ostream* out;
};
-
+
class CV_EXPORTS TickMeter
{
public:
TickMeter();
void start();
void stop();
-
+
int64 getTimeTicks() const;
double getTimeMicro() const;
double getTimeMilli() const;
double getTimeSec() const;
int64 getCounter() const;
-
+
void reset();
private:
int64 counter;
int64 sumTime;
int64 startTime;
};
-
+
CV_EXPORTS std::ostream& operator<<(std::ostream& out, const TickMeter& tm);
-
+
class CV_EXPORTS SelfSimDescriptor
{
public:
SelfSimDescriptor(const SelfSimDescriptor& ss);
virtual ~SelfSimDescriptor();
SelfSimDescriptor& operator = (const SelfSimDescriptor& ss);
-
+
size_t getDescriptorSize() const;
Size getGridSize( Size imgsize, Size winStride ) const;
-
+
virtual void compute(const Mat& img, vector<float>& descriptors, Size winStride=Size(),
const vector<Point>& locations=vector<Point>()) const;
virtual void computeLogPolarMapping(Mat& mappingMask) const;
virtual void SSD(const Mat& img, Point pt, Mat& ssd) const;
-
+
int smallSize;
int largeSize;
int startDistanceBucket;
int numberOfDistanceBuckets;
int numberOfAngles;
-
+
enum { DEFAULT_SMALL_SIZE = 5, DEFAULT_LARGE_SIZE = 41,
DEFAULT_NUM_ANGLES = 20, DEFAULT_START_DISTANCE_BUCKET = 3,
DEFAULT_NUM_DISTANCE_BUCKETS = 7 };
};
-
-
+
+
typedef bool (*BundleAdjustCallback)(int iteration, double norm_error, void* user_data);
-
+
class LevMarqSparse {
public:
LevMarqSparse();
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
- Mat& X, // measurements, in order of visibility. non visible cases are skipped
+ Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
-
+
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
void* data, // user-specific data passed to the callbacks
BundleAdjustCallback cb, void* user_data
);
-
+
virtual ~LevMarqSparse();
-
+
virtual void run( int npoints, // number of points
int ncameras, // number of cameras
int nPointParams, // number of params per one point (3 in case of 3D points)
Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
// 1 - point is visible for the camera, 0 - invisible
Mat& P0, // starting vector of parameters, first cameras then points
- Mat& X, // measurements, in order of visibility. non visible cases are skipped
+ Mat& X, // measurements, in order of visibility. non visible cases are skipped
TermCriteria criteria, // termination criteria
-
+
// callback for estimation of Jacobian matrices
void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
Mat& cam_params, Mat& A, Mat& B, void* data),
Mat& cam_params, Mat& estim, void* data),
void* data // user-specific data passed to the callbacks
);
-
+
virtual void clear();
-
+
// useful function to do simple bundle adjustment tasks
static void bundleAdjust(vector<Point3d>& points, // positions of points in global coordinate system (input and output)
const vector<vector<Point2d> >& imagePoints, // projections of 3d points for every camera
- const vector<vector<int> >& visibility, // visibility of 3d points for every camera
+ const vector<vector<int> >& visibility, // visibility of 3d points for every camera
vector<Mat>& cameraMatrix, // intrinsic matrices of all cameras (input and output)
vector<Mat>& R, // rotation matrices of all cameras (input and output)
vector<Mat>& T, // translation vector of all cameras (input and output)
const TermCriteria& criteria=
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, DBL_EPSILON),
BundleAdjustCallback cb = 0, void* user_data = 0);
-
+
public:
virtual void optimize(CvMat &_vis); //main function that runs minimization
-
+
//iteratively asks for measurement for visible camera-point pairs
void ask_for_proj(CvMat &_vis,bool once=false);
//iteratively asks for Jacobians for every camera_point pair
void ask_for_projac(CvMat &_vis);
-
+
CvMat* err; //error X-hX
double prevErrNorm, errNorm;
double lambda;
CvTermCriteria criteria;
int iters;
-
+
CvMat** U; //size of array is equal to number of cameras
CvMat** V; //size of array is equal to number of points
CvMat** inv_V_star; //inverse of V*
-
+
CvMat** A;
CvMat** B;
CvMat** W;
-
- CvMat* X; //measurement
- CvMat* hX; //current measurement extimation given new parameter vector
-
- CvMat* prevP; //current already accepted parameter.
+
+ CvMat* X; //measurement
+ CvMat* hX; //current measurement extimation given new parameter vector
+
+ CvMat* prevP; //current already accepted parameter.
CvMat* P; // parameters used to evaluate function with new params
- // this parameters may be rejected
-
+ // this parameters may be rejected
+
CvMat* deltaP; //computed increase of parameters (result of normal system solution )
-
+
CvMat** ea; // sum_i AijT * e_ij , used as right part of normal equation
- // length of array is j = number of cameras
+ // length of array is j = number of cameras
CvMat** eb; // sum_j BijT * e_ij , used as right part of normal equation
// length of array is i = number of points
-
+
CvMat** Yj; //length of array is i = num_points
-
- CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
-
+
+ CvMat* S; //big matrix of block Sjk , each block has size num_cam_params x num_cam_params
+
CvMat* JtJ_diag; //diagonal of JtJ, used to backup diagonal elements before augmentation
-
+
CvMat* Vis_index; // matrix which element is index of measurement for point i and camera j
-
+
int num_cams;
int num_points;
int num_err_param;
int num_cam_param;
int num_point_param;
-
- //target function and jacobian pointers, which needs to be initialized
+
+ //target function and jacobian pointers, which needs to be initialized
void (*fjac)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data);
void (*func)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data);
-
+
void* data;
-
+
BundleAdjustCallback cb;
void* user_data;
- };
-
+ };
+
CV_EXPORTS int chamerMatching( Mat& img, Mat& templ,
vector<vector<Point> >& results, vector<float>& cost,
double templScale=1, int maxMatches = 20,
double minMatchDistance = 1.0, int padX = 3,
int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
double orientationWeight = 0.5, double truncate = 20);
-
-
+
+
class CV_EXPORTS StereoVar
{
public:
- // Flags
+ // Flags
enum {USE_INITIAL_DISPARITY = 1, USE_EQUALIZE_HIST = 2, USE_SMART_ID = 4, USE_AUTO_PARAMS = 8, USE_MEDIAN_FILTERING = 16};
enum {CYCLE_O, CYCLE_V};
enum {PENALIZATION_TICHONOV, PENALIZATION_CHARBONNIER, PENALIZATION_PERONA_MALIK};
-
+
//! the default constructor
CV_WRAP StereoVar();
-
+
//! the full constructor taking all the necessary algorithm parameters
CV_WRAP StereoVar(int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags);
-
+
//! the destructor
virtual ~StereoVar();
-
+
//! the stereo correspondence operator that computes disparity map for the specified rectified stereo pair
CV_WRAP_AS(compute) virtual void operator()(const Mat& left, const Mat& right, Mat& disp);
-
- CV_PROP_RW int levels;
- CV_PROP_RW double pyrScale;
- CV_PROP_RW int nIt;
- CV_PROP_RW int minDisp;
- CV_PROP_RW int maxDisp;
- CV_PROP_RW int poly_n;
- CV_PROP_RW double poly_sigma;
- CV_PROP_RW float fi;
- CV_PROP_RW float lambda;
- CV_PROP_RW int penalization;
- CV_PROP_RW int cycle;
- CV_PROP_RW int flags;
-
+
+ CV_PROP_RW int levels;
+ CV_PROP_RW double pyrScale;
+ CV_PROP_RW int nIt;
+ CV_PROP_RW int minDisp;
+ CV_PROP_RW int maxDisp;
+ CV_PROP_RW int poly_n;
+ CV_PROP_RW double poly_sigma;
+ CV_PROP_RW float fi;
+ CV_PROP_RW float lambda;
+ CV_PROP_RW int penalization;
+ CV_PROP_RW int cycle;
+ CV_PROP_RW int flags;
+
private:
void autoParams();
- void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level);
+ void FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level);
void VCycle_MyFAS(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
void VariationalSolver(Mat &I1_h, Mat &I2_h, Mat &I2x_h, Mat &u_h, int level);
};
-
+
CV_EXPORTS void polyfit(const Mat& srcx, const Mat& srcy, Mat& dst, int order);
- class CV_EXPORTS Directory
+ class CV_EXPORTS Directory
{
- public:
- static std::vector<std::string> GetListFiles ( const std::string& path, const std::string & exten = "*", bool addPath = true );
- static std::vector<std::string> GetListFilesR ( const std::string& path, const std::string & exten = "*", bool addPath = true );
- static std::vector<std::string> GetListFolders( const std::string& path, const std::string & exten = "*", bool addPath = true );
+ public:
+ static std::vector<std::string> GetListFiles ( const std::string& path, const std::string & exten = "*", bool addPath = true );
+ static std::vector<std::string> GetListFilesR ( const std::string& path, const std::string & exten = "*", bool addPath = true );
+ static std::vector<std::string> GetListFolders( const std::string& path, const std::string & exten = "*", bool addPath = true );
};
/*
class CV_EXPORTS LogPolar_Interp
{
public:
-
+
LogPolar_Interp() {}
/**
*\param center the transformation center: where the output precision is maximal
*\param R the number of rings of the cortical image (default value 70 pixel)
*\param ro0 the radius of the blind spot (default value 3 pixel)
- *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
+ *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
* \a 0 means that the retinal image is computed within the inscribed circle.
*\param S the number of sectors of the cortical image (default value 70 pixel).
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
- *\param sp \a 1 (default value) means that the parameter \a S is internally computed.
+ *\param sp \a 1 (default value) means that the parameter \a S is internally computed.
* \a 0 means that the parameter \a S is provided by the user.
*/
LogPolar_Interp(int w, int h, Point2i center, int R=70, double ro0=3.0,
*Destructor
*/
~LogPolar_Interp();
-
+
protected:
-
+
Mat Rsri;
Mat Csri;
*More details can be found in http://dx.doi.org/10.1007/978-3-642-23968-7_5
*/
class CV_EXPORTS LogPolar_Overlapping
- {
+ {
public:
LogPolar_Overlapping() {}
-
+
/**
*Constructor
*\param w the width of the input image
*\param center the transformation center: where the output precision is maximal
*\param R the number of rings of the cortical image (default value 70 pixel)
*\param ro0 the radius of the blind spot (default value 3 pixel)
- *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
+ *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
* \a 0 means that the retinal image is computed within the inscribed circle.
*\param S the number of sectors of the cortical image (default value 70 pixel).
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
- *\param sp \a 1 (default value) means that the parameter \a S is internally computed.
+ *\param sp \a 1 (default value) means that the parameter \a S is internally computed.
* \a 0 means that the parameter \a S is provided by the user.
*/
LogPolar_Overlapping(int w, int h, Point2i center, int R=70,
*Destructor
*/
~LogPolar_Overlapping();
-
+
protected:
-
+
Mat Rsri;
Mat Csri;
vector<int> Rsr;
{
public:
LogPolar_Adjacent() {}
-
+
/**
*Constructor
*\param w the width of the input image
*\param R the number of rings of the cortical image (default value 70 pixel)
*\param ro0 the radius of the blind spot (default value 3 pixel)
*\param smin the size of the subpixel (default value 0.25 pixel)
- *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
+ *\param full \a 1 (default value) means that the retinal image (the inverse transform) is computed within the circumscribing circle.
* \a 0 means that the retinal image is computed within the inscribed circle.
*\param S the number of sectors of the cortical image (default value 70 pixel).
* Its value is usually internally computed to obtain a pixel aspect ratio equals to 1.
- *\param sp \a 1 (default value) means that the parameter \a S is internally computed.
+ *\param sp \a 1 (default value) means that the parameter \a S is internally computed.
* \a 0 means that the parameter \a S is provided by the user.
- */
+ */
LogPolar_Adjacent(int w, int h, Point2i center, int R=70, double ro0=3.0, double smin=0.25, int full=1, int S=117, int sp=1);
/**
*Transformation from Cartesian image to cortical (log-polar) image.
bool get_uv(double x, double y, int&u, int&v);
void create_map(int M, int N, int R, int S, double ro0, double smin);
};
-
+
CV_EXPORTS Mat subspaceProject(InputArray W, InputArray mean, InputArray src);
CV_EXPORTS Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
-
+
class CV_EXPORTS LDA
{
public:
// Returns the eigenvalues of this LDA.
Mat eigenvalues() const { return _eigenvalues; }
-
+
protected:
bool _dataAsRow;
int _num_components;
void lda(InputArray src, InputArray labels);
};
-
+
class CV_EXPORTS FaceRecognizer
{
public:
// Deserializes this object from a given cv::FileStorage.
virtual void load(const FileStorage& fs) = 0;
-
+
// Returns eigenvectors (if any)
virtual Mat eigenvectors() const { return Mat(); }
};
-
+
CV_EXPORTS Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0);
CV_EXPORTS Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0);
CV_EXPORTS Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8,
int grid_x=8, int grid_y=8);
-
+
enum
{
COLORMAP_AUTUMN = 0,
COLORMAP_MKPJ1 = 12,
COLORMAP_MKPJ2 = 13
};
-
+
CV_EXPORTS void applyColorMap(InputArray src, OutputArray dst, int colormap);
-
+
CV_EXPORTS bool initModule_contrib();
}
struct CV_EXPORTS CvFeatureTrackerParams
{
enum { SIFT = 0, SURF = 1, OPTICAL_FLOW = 2 };
- CvFeatureTrackerParams(int feature_type = 0, int window_size = 0)
+ CvFeatureTrackerParams(int featureType = 0, int windowSize = 0)
{
- feature_type = 0;
- window_size = 0;
+ featureType = 0;
+ windowSize = 0;
}
int feature_type; // Feature type to use
LevMarqSparse::~LevMarqSparse() {
clear();
-}
+}
LevMarqSparse::LevMarqSparse(int npoints, // number of points
- int ncameras, // number of cameras
- int nPointParams, // number of params per one point (3 in case of 3D points)
- int nCameraParams, // number of parameters per one camera
- int nErrParams, // number of parameters in measurement vector
- // for 1 point at one camera (2 in case of 2D projections)
- Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
- // 1 - point is visible for the camera, 0 - invisible
- Mat& P0, // starting vector of parameters, first cameras then points
- Mat& X_, // measurements, in order of visibility. non visible cases are skipped
- TermCriteria criteria, // termination criteria
-
- // callback for estimation of Jacobian matrices
- void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
- Mat& cam_params, Mat& A, Mat& B, void* data),
- // callback for estimation of backprojection errors
- void (CV_CDECL * func)(int i, int j, Mat& point_params,
- Mat& cam_params, Mat& estim, void* data),
- void* data, // user-specific data passed to the callbacks
- BundleAdjustCallback _cb, void* _user_data
- ) {
+ int ncameras, // number of cameras
+ int nPointParams, // number of params per one point (3 in case of 3D points)
+ int nCameraParams, // number of parameters per one camera
+ int nErrParams, // number of parameters in measurement vector
+ // for 1 point at one camera (2 in case of 2D projections)
+ Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
+ // 1 - point is visible for the camera, 0 - invisible
+ Mat& P0, // starting vector of parameters, first cameras then points
+ Mat& X_, // measurements, in order of visibility. non visible cases are skipped
+ TermCriteria _criteria, // termination criteria
+
+ // callback for estimation of Jacobian matrices
+ void (CV_CDECL * _fjac)(int i, int j, Mat& point_params,
+ Mat& cam_params, Mat& A, Mat& B, void* data),
+ // callback for estimation of backprojection errors
+ void (CV_CDECL * _func)(int i, int j, Mat& point_params,
+ Mat& cam_params, Mat& estim, void* data),
+ void* _data, // user-specific data passed to the callbacks
+ BundleAdjustCallback _cb, void* _user_data
+ ) {
Vis_index = X = prevP = P = deltaP = err = JtJ_diag = S = hX = NULL;
U = ea = V = inv_V_star = eb = Yj = NULL;
A = B = W = NULL;
cb = _cb;
user_data = _user_data;
-
+
run(npoints, ncameras, nPointParams, nCameraParams, nErrParams, visibility,
- P0, X_, criteria, fjac, func, data);
+ P0, X_, _criteria, _fjac, _func, _data);
}
void LevMarqSparse::clear() {
//CvMat* tmp = ((CvMat**)(A->data.ptr + i * A->step))[j];
CvMat* tmp = A[j+i*num_cams];
if (tmp)
- cvReleaseMat( &tmp );
+ cvReleaseMat( &tmp );
//tmp = ((CvMat**)(B->data.ptr + i * B->step))[j];
tmp = B[j+i*num_cams];
if (tmp)
- cvReleaseMat( &tmp );
-
+ cvReleaseMat( &tmp );
+
//tmp = ((CvMat**)(W->data.ptr + j * W->step))[i];
tmp = W[j+i*num_cams];
if (tmp)
- cvReleaseMat( &tmp );
+ cvReleaseMat( &tmp );
}
- }
+ }
delete A; //cvReleaseMat(&A);
delete B;//cvReleaseMat(&B);
delete W;//cvReleaseMat(&W);
cvReleaseMat( &ea[j] );
}
delete ea;
-
+
//allocate V and inv_V_star
for( int i = 0; i < num_points; i++ ) {
cvReleaseMat(&V[i]);
for( int i = 0; i < num_points; i++ ) {
cvReleaseMat(&Yj[i]);
- }
+ }
delete Yj;
-
+
cvReleaseMat(&X);
cvReleaseMat(&prevP);
cvReleaseMat(&P);
cvReleaseMat(&deltaP);
- cvReleaseMat(&err);
-
+ cvReleaseMat(&err);
+
cvReleaseMat(&JtJ_diag);
cvReleaseMat(&S);
cvReleaseMat(&hX);
//num_errors - number of measurements.
void LevMarqSparse::run( int num_points_, //number of points
- int num_cams_, //number of cameras
- int num_point_param_, //number of params per one point (3 in case of 3D points)
- int num_cam_param_, //number of parameters per one camera
- int num_err_param_, //number of parameters in measurement vector for 1 point at one camera (2 in case of 2D projections)
- Mat& visibility, //visibility matrix . rows correspond to points, columns correspond to cameras
- // 0 - point is visible for the camera, 0 - invisible
- Mat& P0, //starting vector of parameters, first cameras then points
- Mat& X_init, //measurements, in order of visibility. non visible cases are skipped
- TermCriteria criteria_init,
- void (*fjac_)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data),
- void (*func_)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data),
- void* data_
- ) { //termination criteria
+ int num_cams_, //number of cameras
+ int num_point_param_, //number of params per one point (3 in case of 3D points)
+ int num_cam_param_, //number of parameters per one camera
+ int num_err_param_, //number of parameters in measurement vector for 1 point at one camera (2 in case of 2D projections)
+ Mat& visibility, //visibility matrix . rows correspond to points, columns correspond to cameras
+ // 0 - point is visible for the camera, 0 - invisible
+ Mat& P0, //starting vector of parameters, first cameras then points
+ Mat& X_init, //measurements, in order of visibility. non visible cases are skipped
+ TermCriteria criteria_init,
+ void (*fjac_)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data),
+ void (*func_)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data),
+ void* data_
+ ) { //termination criteria
//clear();
-
+
func = func_; //assign evaluation function
fjac = fjac_; //assign jacobian
data = data_;
num_cams = num_cams_;
num_points = num_points_;
- num_err_param = num_err_param_;
+ num_err_param = num_err_param_;
num_cam_param = num_cam_param_;
num_point_param = num_point_param_;
int Wij_width = Bij_width;
//allocate memory for all Aij, Bij, U, V, W
-
+
//allocate num_points*num_cams matrices A
-
+
//Allocate matrix A whose elements are nointers to Aij
//if Aij is zero (point i is not visible in camera j) then A(i,j) contains NULL
//A = cvCreateMat( num_points, num_cams, CV_32S /*pointer is stored here*/ );
//cvSetZero( B );
//cvSetZero( W );
cvSet( Vis_index, cvScalar(-1) );
-
+
//fill matrices A and B based on visibility
CvMat _vis = visibility;
int index = 0;
for (int i = 0; i < num_points; i++ ) {
for (int j = 0; j < num_cams; j++ ) {
if (((int*)(_vis.data.ptr+ i * _vis.step))[j] ) {
- ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j] = index;
- index += num_err_param;
-
- //create matrices Aij, Bij
- CvMat* tmp = cvCreateMat(Aij_height, Aij_width, CV_64F );
- //((CvMat**)(A->data.ptr + i * A->step))[j] = tmp;
- cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
- A[j+i*num_cams] = tmp;
-
- tmp = cvCreateMat( Bij_height, Bij_width, CV_64F );
- //((CvMat**)(B->data.ptr + i * B->step))[j] = tmp;
- cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
- B[j+i*num_cams] = tmp;
-
- tmp = cvCreateMat( Wij_height, Wij_width, CV_64F );
- //((CvMat**)(W->data.ptr + j * W->step))[i] = tmp; //note indices i and j swapped
- cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
- W[j+i*num_cams] = tmp;
+ ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j] = index;
+ index += num_err_param;
+
+ //create matrices Aij, Bij
+ CvMat* tmp = cvCreateMat(Aij_height, Aij_width, CV_64F );
+ //((CvMat**)(A->data.ptr + i * A->step))[j] = tmp;
+ cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
+ A[j+i*num_cams] = tmp;
+
+ tmp = cvCreateMat( Bij_height, Bij_width, CV_64F );
+ //((CvMat**)(B->data.ptr + i * B->step))[j] = tmp;
+ cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
+ B[j+i*num_cams] = tmp;
+
+ tmp = cvCreateMat( Wij_height, Wij_width, CV_64F );
+ //((CvMat**)(W->data.ptr + j * W->step))[i] = tmp; //note indices i and j swapped
+ cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
+ W[j+i*num_cams] = tmp;
} else{
- A[j+i*num_cams] = NULL;
- B[j+i*num_cams] = NULL;
- W[j+i*num_cams] = NULL;
+ A[j+i*num_cams] = NULL;
+ B[j+i*num_cams] = NULL;
+ W[j+i*num_cams] = NULL;
}
- }
+ }
}
-
+
//allocate U
U = new CvMat* [num_cams];
for (int j = 0; j < num_cams; j++ ) {
ea[j] = cvCreateMat( U_size, 1, CV_64F );
cvSetZero(ea[j]);
}
-
+
//allocate V and inv_V_star
V = new CvMat* [num_points];
inv_V_star = new CvMat* [num_points];
cvSetZero(V[i]);
cvSetZero(inv_V_star[i]);
}
-
+
//allocate eb
eb = new CvMat* [num_points];
for (int i = 0; i < num_points; i++ ) {
eb[i] = cvCreateMat( V_size, 1, CV_64F );
cvSetZero(eb[i]);
- }
-
+ }
+
//allocate Yj
Yj = new CvMat* [num_points];
for (int i = 0; i < num_points; i++ ) {
Yj[i] = cvCreateMat( Wij_height, Wij_width, CV_64F ); //Yij has the same size as Wij
cvSetZero(Yj[i]);
- }
-
+ }
+
//allocate matrix S
S = cvCreateMat( num_cams * num_cam_param, num_cams * num_cam_param, CV_64F);
cvSetZero(S);
JtJ_diag = cvCreateMat( num_cams * num_cam_param + num_points * num_point_param, 1, CV_64F );
cvSetZero(JtJ_diag);
-
+
//set starting parameters
- CvMat _tmp_ = CvMat(P0);
- prevP = cvCloneMat( &_tmp_ );
+ CvMat _tmp_ = CvMat(P0);
+ prevP = cvCloneMat( &_tmp_ );
P = cvCloneMat( &_tmp_ );
deltaP = cvCloneMat( &_tmp_ );
-
+
//set measurements
_tmp_ = CvMat(X_init);
- X = cvCloneMat( &_tmp_ );
+ X = cvCloneMat( &_tmp_ );
//create vector for estimated measurements
hX = cvCreateMat( X->rows, X->cols, CV_64F );
cvSetZero(hX);
prevErrNorm = cvNorm( err, 0, CV_L2 );
// std::cerr<<"prevErrNorm = "<<prevErrNorm<<std::endl;
- iters = 0;
+ iters = 0;
criteria = criteria_init;
-
+
optimize(_vis);
ask_for_proj(_vis,true);
func( i, j, _point_mat, _cam_mat, _measur_mat, data);
assert( ind*num_err_param == ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j]);
ind+=1;
- }
- }
+ }
+ }
}
}
void LevMarqSparse::ask_for_projac(CvMat &/*_vis*/) //should be evaluated at point prevP
{
// compute jacobians Aij and Bij
- for (int i = 0; i < num_points; i++ )
+ for (int i = 0; i < num_points; i++ )
{
CvMat point_mat;
cvGetSubRect( prevP, &point_mat, cvRect( 0, num_cams * num_cam_param + num_point_param * i, 1, num_point_param ));
//CvMat** A_line = (CvMat**)(A->data.ptr + A->step * i);
//CvMat** B_line = (CvMat**)(B->data.ptr + B->step * i);
- for( int j = 0; j < num_cams; j++ )
+ for( int j = 0; j < num_cams; j++ )
{
//CvMat* Aij = A_line[j];
//if( Aij ) //Aij is not zero
CvMat* Aij = A[j+i*num_cams];
CvMat* Bij = B[j+i*num_cams];
- if(Aij)
+ if(Aij)
{
//CvMat** A_line = (CvMat**)(A->data.ptr + A->step * i);
//CvMat** B_line = (CvMat**)(B->data.ptr + B->step * i);
}
}
}
-}
+}
void LevMarqSparse::optimize(CvMat &_vis) { //main function that runs minimization
bool done = false;
-
- CvMat* YWt = cvCreateMat( num_cam_param, num_cam_param, CV_64F ); //this matrix used to store Yij*Wik'
- CvMat* E = cvCreateMat( S->height, 1 , CV_64F ); //this is right part of system with S
+
+ CvMat* YWt = cvCreateMat( num_cam_param, num_cam_param, CV_64F ); //this matrix used to store Yij*Wik'
+ CvMat* E = cvCreateMat( S->height, 1 , CV_64F ); //this is right part of system with S
cvSetZero(YWt);
cvSetZero(E);
int invisible_count=0;
//compute U_j and ea_j
for (int j = 0; j < num_cams; j++ ) {
- cvSetZero(U[j]);
+ cvSetZero(U[j]);
cvSetZero(ea[j]);
//summ by i (number of points)
for (int i = 0; i < num_points; i++ ) {
- //get Aij
- //CvMat* Aij = ((CvMat**)(A->data.ptr + A->step * i))[j];
- CvMat* Aij = A[j+i*num_cams];
- if (Aij ) {
- //Uj+= AijT*Aij
- cvGEMM( Aij, Aij, 1, U[j], 1, U[j], CV_GEMM_A_T );
- //ea_j += AijT * e_ij
- CvMat eij;
-
- int index = ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j];
-
- cvGetSubRect( err, &eij, cvRect( 0, index, 1, Aij->height ) ); //width of transposed Aij
- cvGEMM( Aij, &eij, 1, ea[j], 1, ea[j], CV_GEMM_A_T );
- }
- else
- invisible_count++;
+ //get Aij
+ //CvMat* Aij = ((CvMat**)(A->data.ptr + A->step * i))[j];
+ CvMat* Aij = A[j+i*num_cams];
+ if (Aij ) {
+ //Uj+= AijT*Aij
+ cvGEMM( Aij, Aij, 1, U[j], 1, U[j], CV_GEMM_A_T );
+ //ea_j += AijT * e_ij
+ CvMat eij;
+
+ int index = ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j];
+
+ cvGetSubRect( err, &eij, cvRect( 0, index, 1, Aij->height ) ); //width of transposed Aij
+ cvGEMM( Aij, &eij, 1, ea[j], 1, ea[j], CV_GEMM_A_T );
+ }
+ else
+ invisible_count++;
}
} //U_j and ea_j computed for all j
// if (!(iters%100))
- int nviz = X->rows / num_err_param;
- double e2 = prevErrNorm*prevErrNorm, e2n = e2 / nviz;
- std::cerr<<"Iteration: "<<iters<<", normError: "<<e2<<" ("<<e2n<<")"<<std::endl;
+ {
+ int nviz = X->rows / num_err_param;
+ double e2 = prevErrNorm*prevErrNorm, e2n = e2 / nviz;
+ std::cerr<<"Iteration: "<<iters<<", normError: "<<e2<<" ("<<e2n<<")"<<std::endl;
+ }
if (cb)
cb(iters, prevErrNorm, user_data);
//compute V_i and eb_i
for (int i = 0; i < num_points; i++ ) {
- cvSetZero(V[i]);
+ cvSetZero(V[i]);
cvSetZero(eb[i]);
-
+
//summ by i (number of points)
for( int j = 0; j < num_cams; j++ ) {
- //get Bij
- //CvMat* Bij = ((CvMat**)(B->data.ptr + B->step * i))[j];
- CvMat* Bij = B[j+i*num_cams];
- if (Bij ) {
- //Vi+= BijT*Bij
- cvGEMM( Bij, Bij, 1, V[i], 1, V[i], CV_GEMM_A_T );
-
- //eb_i += BijT * e_ij
- int index = ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j];
-
- CvMat eij;
- cvGetSubRect( err, &eij, cvRect( 0, index, 1, Bij->height ) ); //width of transposed Bij
- cvGEMM( Bij, &eij, 1, eb[i], 1, eb[i], CV_GEMM_A_T );
- }
+ //get Bij
+ //CvMat* Bij = ((CvMat**)(B->data.ptr + B->step * i))[j];
+ CvMat* Bij = B[j+i*num_cams];
+ if (Bij ) {
+ //Vi+= BijT*Bij
+ cvGEMM( Bij, Bij, 1, V[i], 1, V[i], CV_GEMM_A_T );
+
+ //eb_i += BijT * e_ij
+ int index = ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j];
+
+ CvMat eij;
+ cvGetSubRect( err, &eij, cvRect( 0, index, 1, Bij->height ) ); //width of transposed Bij
+ cvGEMM( Bij, &eij, 1, eb[i], 1, eb[i], CV_GEMM_A_T );
+ }
}
} //V_i and eb_i computed for all i
//compute W_ij
for( int i = 0; i < num_points; i++ ) {
for( int j = 0; j < num_cams; j++ ) {
- //CvMat* Aij = ((CvMat**)(A->data.ptr + A->step * i))[j];
- CvMat* Aij = A[j+i*num_cams];
- if( Aij ) { //visible
- //CvMat* Bij = ((CvMat**)(B->data.ptr + B->step * i))[j];
- CvMat* Bij = B[j+i*num_cams];
- //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
- CvMat* Wij = W[j+i*num_cams];
-
- //multiply
- cvGEMM( Aij, Bij, 1, NULL, 0, Wij, CV_GEMM_A_T );
- }
+ //CvMat* Aij = ((CvMat**)(A->data.ptr + A->step * i))[j];
+ CvMat* Aij = A[j+i*num_cams];
+ if( Aij ) { //visible
+ //CvMat* Bij = ((CvMat**)(B->data.ptr + B->step * i))[j];
+ CvMat* Bij = B[j+i*num_cams];
+ //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+ CvMat* Wij = W[j+i*num_cams];
+
+ //multiply
+ cvGEMM( Aij, Bij, 1, NULL, 0, Wij, CV_GEMM_A_T );
+ }
}
} //Wij computed
//backup diagonal of JtJ before we start augmenting it
- {
+ {
CvMat dia;
CvMat subr;
for( int j = 0; j < num_cams; j++ ) {
- cvGetDiag(U[j], &dia);
- cvGetSubRect(JtJ_diag, &subr,
- cvRect(0, j*num_cam_param, 1, num_cam_param ));
- cvCopy( &dia, &subr );
- }
+ cvGetDiag(U[j], &dia);
+ cvGetSubRect(JtJ_diag, &subr,
+ cvRect(0, j*num_cam_param, 1, num_cam_param ));
+ cvCopy( &dia, &subr );
+ }
for( int i = 0; i < num_points; i++ ) {
- cvGetDiag(V[i], &dia);
- cvGetSubRect(JtJ_diag, &subr,
- cvRect(0, num_cams*num_cam_param + i * num_point_param, 1, num_point_param ));
- cvCopy( &dia, &subr );
- }
- }
+ cvGetDiag(V[i], &dia);
+ cvGetSubRect(JtJ_diag, &subr,
+ cvRect(0, num_cams*num_cam_param + i * num_point_param, 1, num_point_param ));
+ cvCopy( &dia, &subr );
+ }
+ }
if( iters == 0 ) {
//initialize lambda. It is set to 1e-3 * average diagonal element in JtJ
double average_diag = 0;
for( int j = 0; j < num_cams; j++ ) {
- average_diag += cvTrace( U[j] ).val[0];
+ average_diag += cvTrace( U[j] ).val[0];
}
for( int i = 0; i < num_points; i++ ) {
- average_diag += cvTrace( V[i] ).val[0];
+ average_diag += cvTrace( V[i] ).val[0];
}
average_diag /= (num_cams*num_cam_param + num_points * num_point_param );
-
- // lambda = 1e-3 * average_diag;
- lambda = 1e-3 * average_diag;
+
+ // lambda = 1e-3 * average_diag;
+ lambda = 1e-3 * average_diag;
lambda = 0.245560;
}
-
+
//now we are going to find good step and make it
for(;;) {
//augmentation of diagonal
for(int j = 0; j < num_cams; j++ ) {
- CvMat diag;
- cvGetDiag( U[j], &diag );
+ CvMat diag;
+ cvGetDiag( U[j], &diag );
#if 1
- cvAddS( &diag, cvScalar( lambda ), &diag );
+ cvAddS( &diag, cvScalar( lambda ), &diag );
#else
- cvScale( &diag, &diag, 1 + lambda );
+ cvScale( &diag, &diag, 1 + lambda );
#endif
}
for(int i = 0; i < num_points; i++ ) {
- CvMat diag;
- cvGetDiag( V[i], &diag );
+ CvMat diag;
+ cvGetDiag( V[i], &diag );
#if 1
- cvAddS( &diag, cvScalar( lambda ), &diag );
+ cvAddS( &diag, cvScalar( lambda ), &diag );
#else
- cvScale( &diag, &diag, 1 + lambda );
+ cvScale( &diag, &diag, 1 + lambda );
#endif
- }
+ }
bool error = false;
//compute inv(V*)
bool inverted_ok = true;
for(int i = 0; i < num_points; i++ ) {
- double det = cvInvert( V[i], inv_V_star[i] );
+ double det = cvInvert( V[i], inv_V_star[i] );
- if( fabs(det) <= FLT_EPSILON ) {
- inverted_ok = false;
- std::cerr<<"V["<<i<<"] failed"<<std::endl;
- break;
- } //means we did wrong augmentation, try to choose different lambda
+ if( fabs(det) <= FLT_EPSILON ) {
+ inverted_ok = false;
+ std::cerr<<"V["<<i<<"] failed"<<std::endl;
+ break;
+ } //means we did wrong augmentation, try to choose different lambda
}
if( inverted_ok ) {
- cvSetZero( E );
- //loop through cameras, compute upper diagonal blocks of matrix S
- for( int j = 0; j < num_cams; j++ ) {
- //compute Yij = Wij (V*_i)^-1 for all i (if Wij exists/nonzero)
- for( int i = 0; i < num_points; i++ ) {
- //
- //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
- CvMat* Wij = W[j+i*num_cams];
- if( Wij ) {
- cvMatMul( Wij, inv_V_star[i], Yj[i] );
- }
- }
-
- //compute Sjk for k>=j (because Sjk = Skj)
- for( int k = j; k < num_cams; k++ ) {
- cvSetZero( YWt );
- for( int i = 0; i < num_points; i++ ) {
- //check that both Wij and Wik exist
- // CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
- CvMat* Wij = W[j+i*num_cams];
- //CvMat* Wik = ((CvMat**)(W->data.ptr + W->step * k))[i];
- CvMat* Wik = W[k+i*num_cams];
-
- if( Wij && Wik ) {
- //multiply YWt += Yj[i]*Wik'
- cvGEMM( Yj[i], Wik, 1, YWt, 1, YWt, CV_GEMM_B_T ); ///*transpose Wik
- }
- }
-
- //copy result to matrix S
-
- CvMat Sjk;
- //extract submat
- cvGetSubRect( S, &Sjk, cvRect( k * num_cam_param, j * num_cam_param, num_cam_param, num_cam_param ));
-
-
- //if j==k, add diagonal
- if( j != k ) {
- //just copy with minus
- cvScale( YWt, &Sjk, -1 ); //if we set initial S to zero then we can use cvSub( Sjk, YWt, Sjk);
- } else {
- //add diagonal value
-
- //subtract YWt from augmented Uj
- cvSub( U[j], YWt, &Sjk );
- }
- }
-
- //compute right part of equation involving matrix S
- // e_j=ea_j - \sum_i Y_ij eb_i
- {
- CvMat e_j;
-
- //select submat
- cvGetSubRect( E, &e_j, cvRect( 0, j * num_cam_param, 1, num_cam_param ) );
-
- for( int i = 0; i < num_points; i++ ) {
- //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
- CvMat* Wij = W[j+i*num_cams];
- if( Wij )
- cvMatMulAdd( Yj[i], eb[i], &e_j, &e_j );
- }
-
- cvSub( ea[j], &e_j, &e_j );
- }
-
- }
- //fill below diagonal elements of matrix S
- cvCompleteSymm( S, 0 ); ///*from upper to low //operation may be done by nonzero blocks or during upper diagonal computation
-
- //Solve linear system S * deltaP_a = E
- CvMat dpa;
- cvGetSubRect( deltaP, &dpa, cvRect(0, 0, 1, S->width ) );
- int res = cvSolve( S, E, &dpa, CV_CHOLESKY );
-
- if( res ) { //system solved ok
- //compute db_i
- for( int i = 0; i < num_points; i++ ) {
- CvMat dbi;
- cvGetSubRect( deltaP, &dbi, cvRect( 0, dpa.height + i * num_point_param, 1, num_point_param ) );
-
- // compute \sum_j W_ij^T da_j
- for( int j = 0; j < num_cams; j++ ) {
- //get Wij
- //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
- CvMat* Wij = W[j+i*num_cams];
- if( Wij ) {
- //get da_j
- CvMat daj;
- cvGetSubRect( &dpa, &daj, cvRect( 0, j * num_cam_param, 1, num_cam_param ));
- cvGEMM( Wij, &daj, 1, &dbi, 1, &dbi, CV_GEMM_A_T ); ///* transpose Wij
- }
- }
- //finalize dbi
- cvSub( eb[i], &dbi, &dbi );
- cvMatMul(inv_V_star[i], &dbi, &dbi ); //here we get final dbi
- } //now we computed whole deltaP
-
- //add deltaP to delta
- cvAdd( prevP, deltaP, P );
-
- //evaluate function with new parameters
- ask_for_proj(_vis); // func( P, hX );
-
- //compute error
- errNorm = cvNorm( X, hX, CV_L2 );
-
- } else {
- error = true;
- }
+ cvSetZero( E );
+ //loop through cameras, compute upper diagonal blocks of matrix S
+ for( int j = 0; j < num_cams; j++ ) {
+ //compute Yij = Wij (V*_i)^-1 for all i (if Wij exists/nonzero)
+ for( int i = 0; i < num_points; i++ ) {
+ //
+ //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+ CvMat* Wij = W[j+i*num_cams];
+ if( Wij ) {
+ cvMatMul( Wij, inv_V_star[i], Yj[i] );
+ }
+ }
+
+ //compute Sjk for k>=j (because Sjk = Skj)
+ for( int k = j; k < num_cams; k++ ) {
+ cvSetZero( YWt );
+ for( int i = 0; i < num_points; i++ ) {
+ //check that both Wij and Wik exist
+ // CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+ CvMat* Wij = W[j+i*num_cams];
+ //CvMat* Wik = ((CvMat**)(W->data.ptr + W->step * k))[i];
+ CvMat* Wik = W[k+i*num_cams];
+
+ if( Wij && Wik ) {
+ //multiply YWt += Yj[i]*Wik'
+ cvGEMM( Yj[i], Wik, 1, YWt, 1, YWt, CV_GEMM_B_T ); ///*transpose Wik
+ }
+ }
+
+ //copy result to matrix S
+
+ CvMat Sjk;
+ //extract submat
+ cvGetSubRect( S, &Sjk, cvRect( k * num_cam_param, j * num_cam_param, num_cam_param, num_cam_param ));
+
+
+ //if j==k, add diagonal
+ if( j != k ) {
+ //just copy with minus
+ cvScale( YWt, &Sjk, -1 ); //if we set initial S to zero then we can use cvSub( Sjk, YWt, Sjk);
+ } else {
+ //add diagonal value
+
+ //subtract YWt from augmented Uj
+ cvSub( U[j], YWt, &Sjk );
+ }
+ }
+
+ //compute right part of equation involving matrix S
+ // e_j=ea_j - \sum_i Y_ij eb_i
+ {
+ CvMat e_j;
+
+ //select submat
+ cvGetSubRect( E, &e_j, cvRect( 0, j * num_cam_param, 1, num_cam_param ) );
+
+ for( int i = 0; i < num_points; i++ ) {
+ //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+ CvMat* Wij = W[j+i*num_cams];
+ if( Wij )
+ cvMatMulAdd( Yj[i], eb[i], &e_j, &e_j );
+ }
+
+ cvSub( ea[j], &e_j, &e_j );
+ }
+
+ }
+ //fill below diagonal elements of matrix S
+ cvCompleteSymm( S, 0 ); ///*from upper to low //operation may be done by nonzero blocks or during upper diagonal computation
+
+ //Solve linear system S * deltaP_a = E
+ CvMat dpa;
+ cvGetSubRect( deltaP, &dpa, cvRect(0, 0, 1, S->width ) );
+ int res = cvSolve( S, E, &dpa, CV_CHOLESKY );
+
+ if( res ) { //system solved ok
+ //compute db_i
+ for( int i = 0; i < num_points; i++ ) {
+ CvMat dbi;
+ cvGetSubRect( deltaP, &dbi, cvRect( 0, dpa.height + i * num_point_param, 1, num_point_param ) );
+
+ // compute \sum_j W_ij^T da_j
+ for( int j = 0; j < num_cams; j++ ) {
+ //get Wij
+ //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+ CvMat* Wij = W[j+i*num_cams];
+ if( Wij ) {
+ //get da_j
+ CvMat daj;
+ cvGetSubRect( &dpa, &daj, cvRect( 0, j * num_cam_param, 1, num_cam_param ));
+ cvGEMM( Wij, &daj, 1, &dbi, 1, &dbi, CV_GEMM_A_T ); ///* transpose Wij
+ }
+ }
+ //finalize dbi
+ cvSub( eb[i], &dbi, &dbi );
+ cvMatMul(inv_V_star[i], &dbi, &dbi ); //here we get final dbi
+ } //now we computed whole deltaP
+
+ //add deltaP to delta
+ cvAdd( prevP, deltaP, P );
+
+ //evaluate function with new parameters
+ ask_for_proj(_vis); // func( P, hX );
+
+ //compute error
+ errNorm = cvNorm( X, hX, CV_L2 );
+
+ } else {
+ error = true;
+ }
} else {
- error = true;
+ error = true;
}
//check solution
if( error || ///* singularities somewhere
- errNorm > prevErrNorm ) { //step was not accepted
- //increase lambda and reject change
- lambda *= 10;
- int nviz = X->rows / num_err_param;
- double e2 = errNorm*errNorm, e2_prev = prevErrNorm*prevErrNorm;
- double e2n = e2/nviz, e2n_prev = e2_prev/nviz;
- std::cerr<<"move failed: lambda = "<<lambda<<", e2 = "<<e2<<" ("<<e2n<<") > "<<e2_prev<<" ("<<e2n_prev<<")"<<std::endl;
-
- //restore diagonal from backup
- {
- CvMat dia;
- CvMat subr;
- for( int j = 0; j < num_cams; j++ ) {
- cvGetDiag(U[j], &dia);
- cvGetSubRect(JtJ_diag, &subr,
- cvRect(0, j*num_cam_param, 1, num_cam_param ));
- cvCopy( &subr, &dia );
- }
- for( int i = 0; i < num_points; i++ ) {
- cvGetDiag(V[i], &dia);
- cvGetSubRect(JtJ_diag, &subr,
- cvRect(0, num_cams*num_cam_param + i * num_point_param, 1, num_point_param ));
- cvCopy( &subr, &dia );
- }
- }
+ errNorm > prevErrNorm ) { //step was not accepted
+ //increase lambda and reject change
+ lambda *= 10;
+ {
+ int nviz = X->rows / num_err_param;
+ double e2 = errNorm*errNorm, e2_prev = prevErrNorm*prevErrNorm;
+ double e2n = e2/nviz, e2n_prev = e2_prev/nviz;
+ std::cerr<<"move failed: lambda = "<<lambda<<", e2 = "<<e2<<" ("<<e2n<<") > "<<e2_prev<<" ("<<e2n_prev<<")"<<std::endl;
+ }
+
+ //restore diagonal from backup
+ {
+ CvMat dia;
+ CvMat subr;
+ for( int j = 0; j < num_cams; j++ ) {
+ cvGetDiag(U[j], &dia);
+ cvGetSubRect(JtJ_diag, &subr,
+ cvRect(0, j*num_cam_param, 1, num_cam_param ));
+ cvCopy( &subr, &dia );
+ }
+ for( int i = 0; i < num_points; i++ ) {
+ cvGetDiag(V[i], &dia);
+ cvGetSubRect(JtJ_diag, &subr,
+ cvRect(0, num_cams*num_cam_param + i * num_point_param, 1, num_point_param ));
+ cvCopy( &subr, &dia );
+ }
+ }
} else { //all is ok
- //accept change and decrease lambda
- lambda /= 10;
- lambda = MAX(lambda, 1e-16);
- std::cerr<<"decreasing lambda to "<<lambda<<std::endl;
- prevErrNorm = errNorm;
-
- //compute new projection error vector
- cvSub( X, hX, err );
- break;
+ //accept change and decrease lambda
+ lambda /= 10;
+ lambda = MAX(lambda, 1e-16);
+ std::cerr<<"decreasing lambda to "<<lambda<<std::endl;
+ prevErrNorm = errNorm;
+
+ //compute new projection error vector
+ cvSub( X, hX, err );
+ break;
}
- }
+ }
iters++;
double param_change_norm = cvNorm(P, prevP, CV_RELATIVE_L2);
//check termination criteria
- if( (criteria.type&CV_TERMCRIT_ITER && iters > criteria.max_iter ) ||
- (criteria.type&CV_TERMCRIT_EPS && param_change_norm < criteria.epsilon) ) {
+ if( (criteria.type&CV_TERMCRIT_ITER && iters > criteria.max_iter ) ||
+ (criteria.type&CV_TERMCRIT_EPS && param_change_norm < criteria.epsilon) ) {
// std::cerr<<"relative norm change "<<param_change_norm<<" lower than eps "<<criteria.epsilon<<", stopping"<<std::endl;
done = true;
break;
//copy new params and continue iterations
cvCopy( P, prevP );
}
- }
- cvReleaseMat(&YWt);
+ }
+ cvReleaseMat(&YWt);
cvReleaseMat(&E);
-}
+}
//Utilities
-void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A, CvMat* B, void* /*data*/) {
+static void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A, CvMat* B, void* /*data*/) {
//compute jacobian per camera parameters (i.e. Aij)
//take i-th point 3D current coordinates
-
+
CvMat _Mi;
cvReshape(point_params, &_Mi, 3, 1 );
intr_data[2] = cam_params->data.db[8];
intr_data[5] = cam_params->data.db[9];
- CvMat _A = cvMat(3,3, CV_64F, intr_data );
+ CvMat _A = cvMat(3,3, CV_64F, intr_data );
CvMat _dpdr, _dpdt, _dpdf, _dpdc, _dpdk;
-
+
bool have_dk = cam_params->height - 10 ? true : false;
cvGetCols( A, &_dpdr, 0, 3 );
cvGetCols( A, &_dpdt, 3, 6 );
cvGetCols( A, &_dpdf, 6, 8 );
cvGetCols( A, &_dpdc, 8, 10 );
-
+
if( have_dk ) {
cvGetRows( cam_params, &_k, 10, cam_params->height );
cvGetCols( A, &_dpdk, 10, A->width );
}
cvProjectPoints2(&_Mi, &_ri, &_ti, &_A, have_dk ? &_k : NULL, _mp, &_dpdr, &_dpdt,
- &_dpdf, &_dpdc, have_dk ? &_dpdk : NULL, 0);
+ &_dpdf, &_dpdc, have_dk ? &_dpdk : NULL, 0);
- cvReleaseMat( &_mp );
+ cvReleaseMat( &_mp );
//compute jacobian for point params
//compute dMeasure/dPoint3D
// y' = y/z
//d(x') = ( dx*z - x*dz)/(z*z)
- //d(y') = ( dy*z - y*dz)/(z*z)
+ //d(y') = ( dy*z - y*dz)/(z*z)
//g = 1 + k1*r_2 + k2*r_4 + k3*r_6
//r_2 = x'*x' + y'*y'
//d(r_2) = 2*x'*dx' + 2*y'*dy'
- //dg = k1* d(r_2) + k2*2*r_2*d(r_2) + k3*3*r_2*r_2*d(r_2)
+ //dg = k1* d(r_2) + k2*2*r_2*d(r_2) + k3*3*r_2*r_2*d(r_2)
//x" = x'*g + 2*p1*x'*y' + p2(r_2+2*x'_2)
//y" = y'*g + p1(r_2+2*y'_2) + 2*p2*x'*y'
-
+
//d(x") = d(x') * g + x' * d(g) + 2*p1*( d(x')*y' + x'*dy) + p2*(d(r_2) + 2*2*x'* dx')
- //d(y") = d(y') * g + y' * d(g) + 2*p2*( d(x')*y' + x'*dy) + p1*(d(r_2) + 2*2*y'* dy')
+ //d(y") = d(y') * g + y' * d(g) + 2*p2*( d(x')*y' + x'*dy) + p1*(d(r_2) + 2*2*y'* dy')
// u = fx*( x") + cx
// v = fy*( y") + cy
-
+
// du = fx * d(x") = fx * ( dx*z - x*dz)/ (z*z)
// dv = fy * d(y") = fy * ( dy*z - y*dz)/ (z*z)
- // dx/dX = r11, dx/dY = r12, dx/dZ = r13
+ // dx/dX = r11, dx/dY = r12, dx/dZ = r13
// dy/dX = r21, dy/dY = r22, dy/dZ = r23
- // dz/dX = r31, dz/dY = r32, dz/dZ = r33
+ // dz/dX = r31, dz/dY = r32, dz/dZ = r33
// du/dX = fx*(r11*z-x*r31)/(z*z)
// du/dY = fx*(r12*z-x*r32)/(z*z)
double y = R[3] * X + R[4] * Y + R[5] * Z + t[1];
double z = R[6] * X + R[7] * Y + R[8] * Z + t[2];
-#if 1
+#if 1
//compute x',y'
double x_strike = x/z;
- double y_strike = y/z;
+ double y_strike = y/z;
//compute dx',dy' matrix
//
- // dx'/dX dx'/dY dx'/dZ =
+ // dx'/dX dx'/dY dx'/dZ =
// dy'/dX dy'/dY dy'/dZ
double coeff[6] = { z, 0, -x,
- 0, z, -y };
+ 0, z, -y };
CvMat coeffmat = cvMat( 2, 3, CV_64F, coeff );
CvMat* dstrike_dbig = cvCreateMat(2,3,CV_64F);
cvMatMul(&coeffmat, &_R, dstrike_dbig);
- cvScale(dstrike_dbig, dstrike_dbig, 1/(z*z) );
-
+ cvScale(dstrike_dbig, dstrike_dbig, 1/(z*z) );
+
if( have_dk ) {
double strike_[2] = {x_strike, y_strike};
- CvMat strike = cvMat(1, 2, CV_64F, strike_);
-
+ CvMat strike = cvMat(1, 2, CV_64F, strike_);
+
//compute r_2
double r_2 = x_strike*x_strike + y_strike*y_strike;
double r_4 = r_2*r_2;
double& k1 = _k.data.db[0];
double& k2 = _k.data.db[1];
double& p1 = _k.data.db[2];
- double& p2 = _k.data.db[3];
+ double& p2 = _k.data.db[3];
double k3 = 0;
if( _k.cols*_k.rows == 5 ) {
k3 = _k.data.db[4];
- }
+ }
//compute dg/dbig
double dg_dr2 = k1 + k2*2*r_2 + k3*3*r_4;
double g = 1+k1*r_2+k2*r_4+k3*r_6;
CvMat* dg_dbig = cvCreateMat(1,3,CV_64F);
- cvScale( dr2_dbig, dg_dbig, dg_dr2 );
+ cvScale( dr2_dbig, dg_dbig, dg_dr2 );
CvMat* tmp = cvCreateMat( 2, 3, CV_64F );
CvMat* dstrike2_dbig = cvCreateMat( 2, 3, CV_64F );
-
+
double c[4] = { g+2*p1*y_strike+4*p2*x_strike, 2*p1*x_strike,
- 2*p2*y_strike, g+2*p2*x_strike + 4*p1*y_strike };
+ 2*p2*y_strike, g+2*p2*x_strike + 4*p1*y_strike };
- CvMat coeffmat = cvMat(2,2,CV_64F, c );
+ CvMat coeffmat2 = cvMat(2,2,CV_64F, c );
- cvMatMul(&coeffmat, dstrike_dbig, dstrike2_dbig );
+ cvMatMul(&coeffmat2, dstrike_dbig, dstrike2_dbig );
cvGEMM( &strike, dg_dbig, 1, NULL, 0, tmp, CV_GEMM_A_T );
cvAdd( dstrike2_dbig, tmp, dstrike2_dbig );
CvMat pmat = cvMat(2, 1, CV_64F, p );
cvMatMul( &pmat, dr2_dbig ,tmp);
- cvAdd( dstrike2_dbig, tmp, dstrike2_dbig );
+ cvAdd( dstrike2_dbig, tmp, dstrike2_dbig );
cvCopy( dstrike2_dbig, B );
cvReleaseMat(&tmp);
cvReleaseMat(&dstrike2_dbig);
- cvReleaseMat(&tmp);
+ cvReleaseMat(&tmp);
} else {
cvCopy(dstrike_dbig, B);
}
//multiply by fx, fy
CvMat row;
cvGetRows( B, &row, 0, 1 );
- cvScale( &row, &row, fx );
-
+ cvScale( &row, &row, fx );
+
cvGetRows( B, &row, 1, 2 );
cvScale( &row, &row, fy );
cvmSet( B, 0, 0, k*(R[0]*z-x*R[6]));
cvmSet( B, 0, 1, k*(R[1]*z-x*R[7]));
cvmSet( B, 0, 2, k*(R[2]*z-x*R[8]));
-
- k = fy/(z*z);
-
+
+ k = fy/(z*z);
+
cvmSet( B, 1, 0, k*(R[3]*z-y*R[6]));
cvmSet( B, 1, 1, k*(R[4]*z-y*R[7]));
cvmSet( B, 1, 2, k*(R[5]*z-y*R[8]));
-
+
#endif
-
+
};
-void func(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* estim, void* /*data*/) {
+static void func(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* estim, void* /*data*/) {
//just do projections
CvMat _Mi;
cvReshape( point_params, &_Mi, 3, 1 );
intr_data[2] = cam_params->data.db[8];
intr_data[5] = cam_params->data.db[9];
- CvMat _A = cvMat(3,3, CV_64F, intr_data );
+ CvMat _A = cvMat(3,3, CV_64F, intr_data );
//int cn = CV_MAT_CN(_Mi.type);
bool have_dk = cam_params->height - 10 ? true : false;
-
+
if( have_dk ) {
- cvGetRows( cam_params, &_k, 10, cam_params->height );
- }
+ cvGetRows( cam_params, &_k, 10, cam_params->height );
+ }
cvProjectPoints2( &_Mi, &_ri, &_ti, &_A, have_dk ? &_k : NULL, _mp, NULL, NULL,
- NULL, NULL, NULL, 0);
+ NULL, NULL, NULL, 0);
// std::cerr<<"_mp = "<<_mp->data.db[0]<<","<<_mp->data.db[1]<<std::endl;
- //
+ //
_mp2->data.db[0] = _mp->data.db[0];
_mp2->data.db[1] = _mp->data.db[1];
cvTranspose( _mp2, estim );
cvReleaseMat( &_mp2 );
};
-void fjac_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data) {
+static void fjac_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data) {
CvMat _point_params = point_params, _cam_params = cam_params, _Al = A, _Bl = B;
fjac(i,j, &_point_params, &_cam_params, &_Al, &_Bl, data);
};
-void func_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data) {
+static void func_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data) {
CvMat _point_params = point_params, _cam_params = cam_params, _estim = estim;
func(i,j,&_point_params,&_cam_params,&_estim,data);
-};
+};
void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points in global coordinate system (input and output)
- const vector<vector<Point2d> >& imagePoints, //projections of 3d points for every camera
- const vector<vector<int> >& visibility, //visibility of 3d points for every camera
- vector<Mat>& cameraMatrix, //intrinsic matrices of all cameras (input and output)
- vector<Mat>& R, //rotation matrices of all cameras (input and output)
- vector<Mat>& T, //translation vector of all cameras (input and output)
- vector<Mat>& distCoeffs, //distortion coefficients of all cameras (input and output)
- const TermCriteria& criteria,
- BundleAdjustCallback cb, void* user_data) {
+ const vector<vector<Point2d> >& imagePoints, //projections of 3d points for every camera
+ const vector<vector<int> >& visibility, //visibility of 3d points for every camera
+ vector<Mat>& cameraMatrix, //intrinsic matrices of all cameras (input and output)
+ vector<Mat>& R, //rotation matrices of all cameras (input and output)
+ vector<Mat>& T, //translation vector of all cameras (input and output)
+ vector<Mat>& distCoeffs, //distortion coefficients of all cameras (input and output)
+ const TermCriteria& criteria,
+ BundleAdjustCallback cb, void* user_data) {
//,enum{MOTION_AND_STRUCTURE,MOTION,STRUCTURE})
int num_points = (int)points.size();
int num_cameras = (int)cameraMatrix.size();
- CV_Assert( imagePoints.size() == (size_t)num_cameras &&
- visibility.size() == (size_t)num_cameras &&
- R.size() == (size_t)num_cameras &&
- T.size() == (size_t)num_cameras &&
- (distCoeffs.size() == (size_t)num_cameras || distCoeffs.size() == 0) );
+ CV_Assert( imagePoints.size() == (size_t)num_cameras &&
+ visibility.size() == (size_t)num_cameras &&
+ R.size() == (size_t)num_cameras &&
+ T.size() == (size_t)num_cameras &&
+ (distCoeffs.size() == (size_t)num_cameras || distCoeffs.size() == 0) );
int numdist = distCoeffs.size() ? (distCoeffs[0].rows * distCoeffs[0].cols) : 0;
int num_cam_param = 3 /* rotation vector */ + 3 /* translation vector */
- + 2 /* fx, fy */ + 2 /* cx, cy */ + numdist;
+ + 2 /* fx, fy */ + 2 /* cx, cy */ + numdist;
- int num_point_param = 3;
+ int num_point_param = 3;
//collect camera parameters into vector
Mat params( num_cameras * num_cam_param + num_points * num_point_param, 1, CV_64F );
//translation
dst = params.rowRange(i*num_cam_param + 3, i*num_cam_param+6);
- T[i].copyTo(dst);
-
+ T[i].copyTo(dst);
+
//intrinsic camera matrix
double* intr_data = (double*)cameraMatrix[i].data;
double* intr = (double*)(params.data + params.step * (i*num_cam_param+6));
intr[1] = intr_data[4]; //fy
//center of projection
intr[2] = intr_data[2]; //cx
- intr[3] = intr_data[5]; //cy
+ intr[3] = intr_data[5]; //cy
//add distortion if exists
if( distCoeffs.size() ) {
dst = params.rowRange(i*num_cam_param + 10, i*num_cam_param+10+numdist);
- distCoeffs[i].copyTo(dst);
+ distCoeffs[i].copyTo(dst);
}
- }
+ }
//fill point params
Mat ptparams(num_points, 1, CV_64FC3, params.data + num_cameras*num_cam_param*params.step);
int num_proj = countNonZero(vismat); //total number of points projections
//collect measurements
- Mat X(num_proj*2,1,CV_64F); //measurement vector
-
+ Mat X(num_proj*2,1,CV_64F); //measurement vector
+
int counter = 0;
for(int i = 0; i < num_points; i++ ) {
for(int j = 0; j < num_cameras; j++ ) {
//check visibility
if( visibility[j][i] ) {
- //extract point and put tu vector
- Point2d p = imagePoints[j][i];
- ((double*)(X.data))[counter] = p.x;
- ((double*)(X.data))[counter+1] = p.y;
- assert(p.x != -1 || p.y != -1);
- counter+=2;
- }
- }
+ //extract point and put tu vector
+ Point2d p = imagePoints[j][i];
+ ((double*)(X.data))[counter] = p.x;
+ ((double*)(X.data))[counter+1] = p.y;
+ assert(p.x != -1 || p.y != -1);
+ counter+=2;
+ }
+ }
}
LevMarqSparse levmar( num_points, num_cameras, num_point_param, num_cam_param, 2, vismat, params, X,
- TermCriteria(criteria), fjac_new, func_new, NULL,
- cb, user_data);
+ TermCriteria(criteria), fjac_new, func_new, NULL,
+ cb, user_data);
//extract results
//fill point params
/*Mat final_points(num_points, 1, CV_64FC3,
Mat rot_vec = Mat(levmar.P).rowRange(i*num_cam_param, i*num_cam_param+3);
Rodrigues( rot_vec, R[i] );
//translation
- T[i] = Mat(levmar.P).rowRange(i*num_cam_param + 3, i*num_cam_param+6);
+ T[i] = Mat(levmar.P).rowRange(i*num_cam_param + 3, i*num_cam_param+6);
//intrinsic camera matrix
double* intr_data = (double*)cameraMatrix[i].data;
intr_data[4] = intr[1]; //fy
//center of projection
intr_data[2] = intr[2]; //cx
- intr_data[5] = intr[3]; //cy
+ intr_data[5] = intr[3]; //cy
//add distortion if exists
if( distCoeffs.size() ) {
Mat(levmar.P).rowRange(i*num_cam_param + 10, i*num_cam_param+10+numdist).copyTo(distCoeffs[i]);
}
- }
-}
+ }
+}
}
float _temp = (1.0f+_beta)/(2.0f*_mu*_alpha);
- float _a = _filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
- _filteringCoeficientsTable[1+tableOffset]=(1.0f-_a)*(1.0f-_a)*(1.0f-_a)*(1.0f-_a)/(1.0f+_beta);
+ float a = _filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
+ _filteringCoeficientsTable[1+tableOffset]=(1.0f-a)*(1.0f-a)*(1.0f-a)*(1.0f-a)/(1.0f+_beta);
_filteringCoeficientsTable[2+tableOffset] =tau;
- //std::cout<<"BasicRetinaFilter::normal:"<<(1.0-_a)*(1.0-_a)*(1.0-_a)*(1.0-_a)/(1.0+_beta)<<" -> old:"<<(1-_a)*(1-_a)*(1-_a)*(1-_a)/(1+_beta)<<std::endl;
+ //std::cout<<"BasicRetinaFilter::normal:"<<(1.0-a)*(1.0-a)*(1.0-a)*(1.0-a)/(1.0+_beta)<<" -> old:"<<(1-a)*(1-a)*(1-a)*(1-a)/(1+_beta)<<std::endl;
- //std::cout<<"BasicRetinaFilter::_a="<<_a<<", gain="<<_filteringCoeficientsTable[1+tableOffset]<<", tau="<<tau<<std::endl;
+ //std::cout<<"BasicRetinaFilter::a="<<a<<", gain="<<_filteringCoeficientsTable[1+tableOffset]<<", tau="<<tau<<std::endl;
}
void BasicRetinaFilter::setProgressiveFilterConstants_CentredAccuracy(const float beta, const float tau, const float alpha0, const unsigned int filterIndex)
float _alpha=0.8f;
float _temp = (1.0f+_beta)/(2.0f*_mu*_alpha);
- float _a=_filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
- _filteringCoeficientsTable[tableOffset+1]=(1.0f-_a)*(1.0f-_a)*(1.0f-_a)*(1.0f-_a)/(1.0f+_beta);
+ float a=_filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
+ _filteringCoeficientsTable[tableOffset+1]=(1.0f-a)*(1.0f-a)*(1.0f-a)*(1.0f-a)/(1.0f+_beta);
_filteringCoeficientsTable[tableOffset+2] =tau;
float commonFactor=alpha0/(float)sqrt(_halfNBcolumns*_halfNBcolumns+_halfNBrows*_halfNBrows+1.0f);
}
unsigned int tableOffset=filterIndex*3;
float _temp = (1.0f+_beta)/(2.0f*_mu*_alpha);
- float _a=_filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
- _filteringCoeficientsTable[tableOffset+1]=(1.0f-_a)*(1.0f-_a)*(1.0f-_a)*(1.0f-_a)/(1.0f+_beta);
+ float a=_filteringCoeficientsTable[tableOffset] = 1.0f + _temp - (float)sqrt( (1.0f+_temp)*(1.0f+_temp) - 1.0f);
+ _filteringCoeficientsTable[tableOffset+1]=(1.0f-a)*(1.0f-a)*(1.0f-a)*(1.0f-a)/(1.0f+_beta);
_filteringCoeficientsTable[tableOffset+2] =tau;
//memset(_progressiveSpatialConstant, 255, _filterOutput.getNBpixels());
{
float X0=*(localLuminancePTR++)*_localLuminanceFactor+_localLuminanceAddon;
// TODO : the following line can lead to a divide by zero ! A small offset is added, take care if the offset is too large in case of High Dynamic Range images which can use very small values...
- *(outputFramePTR++) = (_maxInputValue+X0)**inputFramePTR/(*inputFramePTR +X0+0.00000000001);
+ *(outputFramePTR++) = (_maxInputValue+X0)**inputFramePTR/(*inputFramePTR +X0+0.00000000001f);
//std::cout<<"BasicRetinaFilter::inputFrame[IDpixel]=%f, X0=%f, outputFrame[IDpixel]=%f\n", inputFrame[IDpixel], X0, outputFrame[IDpixel]);
}
}
{
public:
virtual ImageIterator* iterator() const = 0;
+ virtual ~ImageRange() {}
};
// Sliding window
#include "precomp.hpp"
#include <iostream>
-#if defined _MSC_VER && _MSC_VER >= 1400
+#ifdef _MSC_VER
#pragma warning( disable: 4305 )
#endif
return dst;
}
-
-Mat argsort(InputArray _src, bool ascending=true)
+
+static Mat argsort(InputArray _src, bool ascending=true)
{
Mat src = _src.getMat();
if (src.rows != 1 && src.cols != 1)
sortIdx(src.reshape(1,1),sorted_indices,flags);
return sorted_indices;
}
-
+
template <typename _Tp> static
Mat interp1_(const Mat& X_, const Mat& Y_, const Mat& XI)
{
int n = XI.rows;
// sort input table
vector<int> sort_indices = argsort(X_);
-
+
Mat X = sortMatrixRowsByIndices(X_,sort_indices);
Mat Y = sortMatrixRowsByIndices(Y_,sort_indices);
// interpolated values
}
return Mat();
}
-
+
namespace colormap
{
n); // number of sample points
}
};
-
+
void ColorMap::operator()(InputArray _src, OutputArray _dst) const
{
if(_lut.total() != 256)
// Apply the ColorMap.
LUT(src, _lut, _dst);
}
-
+
Mat ColorMap::linear_colormap(InputArray X,
InputArray r, InputArray g, InputArray b,
InputArray xi) {
colormap == COLORMAP_HOT ? (colormap::ColorMap*)(new colormap::Hot) :
colormap == COLORMAP_MKPJ1 ? (colormap::ColorMap*)(new colormap::MKPJ1) :
colormap == COLORMAP_MKPJ2 ? (colormap::ColorMap*)(new colormap::MKPJ2) : 0;
-
+
if( !cm )
CV_Error( CV_StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
-
+
(*cm)(src, dst);
-
+
delete cm;
}
}
mixChannels(&hsv, 1, &hue, 1, channels, 2);
Mat roi(hue, selection);
- Mat maskroi(mask, selection);
+ Mat mskroi(mask, selection);
int ch[] = {0, 1};
int chsize[] = {32, 32};
- calcHist(&roi, 1, ch, maskroi, hist, 1, chsize, ranges);
+ calcHist(&roi, 1, ch, mskroi, hist, 1, chsize, ranges);
normalize(hist, hist, 0, 255, CV_MINMAX);
prev_trackwindow = selection;
#define DEBUGLOGS 1
-#if ANDROID
+#ifdef ANDROID
#include <android/log.h>
#define LOG_TAG "OBJECT_DETECTOR"
#define LOGD0(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#define LOGI(_str, ...) LOGI0(_str , ## __VA_ARGS__)
#define LOGW(_str, ...) LOGW0(_str , ## __VA_ARGS__)
#define LOGE(_str, ...) LOGE0(_str , ## __VA_ARGS__)
-#else
+#else
#define LOGD(...) do{} while(0)
#define LOGI(...) do{} while(0)
#define LOGW(...) do{} while(0)
} catch(...) { \
LOGE0("\n ERROR: UNKNOWN Exception caught\n\n"); \
} \
-} while(0)
+} while(0)
#endif
void* workcycleObjectDetectorFunction(void* p)
vector<Rect> objects;
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
- pthread_mutex_lock(&mutex);
+ pthread_mutex_lock(&mutex);
{
pthread_cond_signal(&objectDetectorThreadStartStop);
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- imageSeparateDetecting is empty, continue");
continue;
}
- LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- start handling imageSeparateDetecting, img.size=%dx%d, img.data=0x%p",
+ LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- start handling imageSeparateDetecting, img.size=%dx%d, img.data=0x%p",
imageSeparateDetecting.size().width, imageSeparateDetecting.size().height, (void*)imageSeparateDetecting.data);
pthread_mutex_unlock(&mutex);
-
+
}
bool DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread(const Mat& imageGray, vector<Rect>& rectsWhereRegions)
if (timeWhenDetectingThreadStartedWork > 0) {
double time_from_previous_launch_in_ms=1000.0 * (((double)(getTickCount() - timeWhenDetectingThreadStartedWork )) / freq); //the same formula as for lastBigDetectionDuration
shouldSendNewDataToWorkThread = (time_from_previous_launch_in_ms >= detectionBasedTracker.parameters.minDetectionPeriod);
- LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: shouldSendNewDataToWorkThread was 1, now it is %d, since time_from_previous_launch_in_ms=%.2f, minDetectionPeriod=%d",
+ LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: shouldSendNewDataToWorkThread was 1, now it is %d, since time_from_previous_launch_in_ms=%.2f, minDetectionPeriod=%d",
(shouldSendNewDataToWorkThread?1:0), time_from_previous_launch_in_ms, detectionBasedTracker.parameters.minDetectionPeriod);
}
&& (params.scaleFactor > 1.0)
&& (params.maxTrackLifetime >= 0) );
- if (!cascadeForTracking.load(cascadeFilename)) {
+ if (!cascadeForTracking.load(cascadeFilename)) {
CV_Error(CV_StsBadArg, "DetectionBasedTracker::DetectionBasedTracker: Cannot load a cascade from the file '"+cascadeFilename+"'");
}
Mat imageDetect=imageGray;
int D=parameters.minObjectSize;
- if (D < 1)
+ if (D < 1)
D=1;
vector<Rect> rectsWhereRegions;
LOGD("DetectionBasedTracker::updateTrackedObjects: j=%d is rejected, because it is intersected with another rectangle", j);
continue;
}
- LOGD("DetectionBasedTracker::updateTrackedObjects: detectedObjects[%d]={%d, %d, %d x %d}",
+ LOGD("DetectionBasedTracker::updateTrackedObjects: detectedObjects[%d]={%d, %d, %d x %d}",
j, detectedObjects[j].x, detectedObjects[j].y, detectedObjects[j].width, detectedObjects[j].height);
Rect r=prevRect & detectedObjects[j];
std::vector<TrackedObject>::iterator it=trackedObjects.begin();
while( it != trackedObjects.end() ) {
- if ( (it->numFramesNotDetected > parameters.maxTrackLifetime)
+ if ( (it->numFramesNotDetected > parameters.maxTrackLifetime)
||
- (
+ (
(it->numDetectedFrames <= innerParameters.numStepsToWaitBeforeFirstShow)
&&
(it->numFramesNotDetected > innerParameters.numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown)
return Rect();
}
if (trackedObjects[i].numDetectedFrames <= innerParameters.numStepsToWaitBeforeFirstShow){
- LOGI("DetectionBasedTracker::calcTrackedObjectPositionToShow: trackedObjects[%d].numDetectedFrames=%d <= numStepsToWaitBeforeFirstShow=%d --- return empty Rect()",
+ LOGI("DetectionBasedTracker::calcTrackedObjectPositionToShow: trackedObjects[%d].numDetectedFrames=%d <= numStepsToWaitBeforeFirstShow=%d --- return empty Rect()",
i, trackedObjects[i].numDetectedFrames, innerParameters.numStepsToWaitBeforeFirstShow);
return Rect();
}
{
using std::set;
-
+
// Reads a sequence from a FileNode::SEQ with type _Tp into a result vector.
template<typename _Tp>
inline void readFileNodeList(const FileNode& fn, vector<_Tp>& result) {
}
fs << "]";
}
-
+
static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0)
{
// number of samples
}
return data;
}
-
+
// Removes duplicate elements in a given vector.
template<typename _Tp>
inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
return elems;
}
-
+
// Turk, M., and Pentland, A. "Eigenfaces for recognition.". Journal of
// Cognitive Neuroscience 3 (1991), 71–86.
class Eigenfaces : public FaceRecognizer
using FaceRecognizer::load;
// Initializes an empty Eigenfaces model.
- Eigenfaces(int num_components = 0) :
- _num_components(num_components) { }
+ Eigenfaces(int numComponents = 0) :
+ _num_components(numComponents) { }
// Initializes and computes an Eigenfaces model with images in src and
// corresponding labels in labels. num_components will be kept for
// classification.
Eigenfaces(InputArray src, InputArray labels,
- int num_components = 0) :
- _num_components(num_components) {
+ int numComponents = 0) :
+ _num_components(numComponents) {
train(src, labels);
}
using FaceRecognizer::load;
// Initializes an empty Fisherfaces model.
- Fisherfaces(int num_components = 0) :
- _num_components(num_components) {}
+ Fisherfaces(int numComponents = 0) :
+ _num_components(numComponents) {}
// Initializes and computes a Fisherfaces model with images in src and
// corresponding labels in labels. num_components will be kept for
// classification.
Fisherfaces(InputArray src,
InputArray labels,
- int num_components = 0) :
- _num_components(num_components) {
+ int numComponents = 0) :
+ _num_components(numComponents) {
train(src, labels);
}
//
// radius, neighbors are used in the local binary patterns creation.
// grid_x, grid_y control the grid size of the spatial histograms.
- LBPH(int radius=1, int neighbors=8, int grid_x=8, int grid_y=8) :
- _grid_x(grid_x),
- _grid_y(grid_y),
- _radius(radius),
- _neighbors(neighbors) {}
+ LBPH(int radius_=1, int neighbors_=8, int grid_x_=8, int grid_y_=8) :
+ _grid_x(grid_x_),
+ _grid_y(grid_y_),
+ _radius(radius_),
+ _neighbors(neighbors_) {}
// Initializes and computes this LBPH Model. The current implementation is
// rather fixed as it uses the Extended Local Binary Patterns per default.
// (grid_x=8), (grid_y=8) controls the grid size of the spatial histograms.
LBPH(InputArray src,
InputArray labels,
- int radius=1, int neighbors=8,
- int grid_x=8, int grid_y=8) :
- _grid_x(grid_x),
- _grid_y(grid_y),
- _radius(radius),
- _neighbors(neighbors) {
+ int radius_=1, int neighbors_=8,
+ int grid_x_=8, int grid_y_=8) :
+ _grid_x(grid_x_),
+ _grid_y(grid_y_),
+ _radius(radius_),
+ _neighbors(neighbors_) {
train(src, labels);
}
return Mat();
}
-
+
static Mat spatial_histogram(InputArray _src, int numPatterns,
int grid_x, int grid_y, bool normed)
{
elbp(src, dst, radius, neighbors);
return dst;
}
-
+
void LBPH::load(const FileStorage& fs) {
fs["radius"] >> _radius;
fs["neighbors"] >> _neighbors;
}
return minClass;
}
-
-
+
+
Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components)
{
return new Eigenfaces(num_components);
}
-
+
Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components)
{
return new Fisherfaces(num_components);
}
-
+
Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius, int neighbors,
int grid_x, int grid_y)
{
using namespace cv;
-void downsamplePoints( const Mat& src, Mat& dst, size_t count )
+static void downsamplePoints( const Mat& src, Mat& dst, size_t count )
{
CV_Assert( count >= 2 );
CV_Assert( src.cols == 1 || src.rows == 1 );
using std::set;
using std::cout;
using std::endl;
-
+
// Removes duplicate elements in a given vector.
template<typename _Tp>
inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
elems.push_back(*it);
return elems;
}
-
+
static Mat argsort(InputArray _src, bool ascending=true)
{
Mat src = _src.getMat();
- if (src.rows != 1 && src.cols != 1)
- CV_Error(CV_StsBadArg, "cv::argsort only sorts 1D matrices.");
+ if (src.rows != 1 && src.cols != 1) {
+ string error_message = "Wrong shape of input matrix! Expected a matrix with one row or column.";
+ CV_Error(CV_StsBadArg, error_message);
+ }
int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING);
Mat sorted_indices;
sortIdx(src.reshape(1,1),sorted_indices,flags);
return sorted_indices;
}
-static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0)
-{
+static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0) {
+ // make sure the input data is a vector of matrices or vector of vector
+ if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
+ string error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >).";
+ CV_Error(CV_StsBadArg, error_message);
+ }
// number of samples
- int n = (int) src.total();
- // return empty matrix if no data given
+ size_t n = src.total();
+ // return empty matrix if no matrices given
if(n == 0)
return Mat();
- // dimensionality of samples
- int d = (int)src.getMat(0).total();
+ // dimensionality of (reshaped) samples
+ size_t d = src.getMat(0).total();
// create data matrix
- Mat data(n, d, rtype);
- // copy data
- for(int i = 0; i < n; i++) {
+ Mat data((int)n, (int)d, rtype);
+ // now copy data
+ for(int i = 0; i < (int)n; i++) {
+ // make sure data can be reshaped, throw exception if not!
+ if(src.getMat(i).total() != d) {
+ string error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, (int)d, (int)src.getMat(i).total());
+ CV_Error(CV_StsBadArg, error_message);
+ }
+ // get a hold of the current row
Mat xi = data.row(i);
- src.getMat(i).reshape(1, 1).convertTo(xi, rtype, alpha, beta);
+ // make reshape happy by cloning for non-continuous matrices
+ if(src.getMat(i).isContinuous()) {
+ src.getMat(i).reshape(1, 1).convertTo(xi, rtype, alpha, beta);
+ } else {
+ src.getMat(i).clone().reshape(1, 1).convertTo(xi, rtype, alpha, beta);
+ }
}
return data;
}
-
-void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
- if(_indices.getMat().type() != CV_32SC1)
+
+static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
+ if(_indices.getMat().type() != CV_32SC1) {
CV_Error(CV_StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
+ }
Mat src = _src.getMat();
vector<int> indices = _indices.getMat();
_dst.create(src.rows, src.cols, src.type());
}
}
-Mat sortMatrixColumnsByIndices(InputArray src, InputArray indices) {
+static Mat sortMatrixColumnsByIndices(InputArray src, InputArray indices) {
Mat dst;
sortMatrixColumnsByIndices(src, indices, dst);
return dst;
}
-
-
+
+
template<typename _Tp> static bool
isSymmetric_(InputArray src) {
Mat _src = src.getMat();
return false;
}
-
+
//------------------------------------------------------------------------------
-// subspace::project
+// cv::subspaceProject
//------------------------------------------------------------------------------
-Mat subspaceProject(InputArray _W, InputArray _mean, InputArray _src)
-{
+Mat subspaceProject(InputArray _W, InputArray _mean, InputArray _src) {
// get data matrices
Mat W = _W.getMat();
Mat mean = _mean.getMat();
Mat src = _src.getMat();
+ // get number of samples and dimension
+ int n = src.rows;
+ int d = src.cols;
+ // make sure the data has the correct shape
+ if(W.rows != d) {
+ string error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
+ CV_Error(CV_StsBadArg, error_message);
+ }
+ // make sure mean is correct if not empty
+ if(!mean.empty() && (mean.total() != (size_t) d)) {
+ string error_message = format("Wrong mean shape for the given data matrix. Expected %d, but was %d.", d, mean.total());
+ CV_Error(CV_StsBadArg, error_message);
+ }
// create temporary matrices
Mat X, Y;
- // copy data & make sure we are using the correct type
+ // make sure you operate on correct type
src.convertTo(X, W.type());
- // get number of samples and dimension
- int n = X.rows;
- int d = X.cols;
- // center the data if correct aligned sample mean is given
- if(mean.total() == (size_t)d)
- subtract(X, repeat(mean.reshape(1,1), n, 1), X);
+ // safe to do, because of above assertion
+ if(!mean.empty()) {
+ for(int i=0; i<n; i++) {
+ Mat r_i = X.row(i);
+ subtract(r_i, mean.reshape(1,1), r_i);
+ }
+ }
// finally calculate projection as Y = (X-mean)*W
gemm(X, W, 1.0, Mat(), 0.0, Y);
return Y;
}
//------------------------------------------------------------------------------
-// subspace::reconstruct
+// cv::subspaceReconstruct
//------------------------------------------------------------------------------
Mat subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src)
{
Mat W = _W.getMat();
Mat mean = _mean.getMat();
Mat src = _src.getMat();
- // get number of samples
+ // get number of samples and dimension
int n = src.rows;
+ int d = src.cols;
+ // make sure the data has the correct shape
+ if(W.cols != d) {
+ string error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
+ CV_Error(CV_StsBadArg, error_message);
+ }
+ // make sure mean is correct if not empty
+ if(!mean.empty() && (mean.total() != (size_t) W.rows)) {
+ string error_message = format("Wrong mean shape for the given eigenvector matrix. Expected %d, but was %d.", W.cols, mean.total());
+ CV_Error(CV_StsBadArg, error_message);
+ }
// initalize temporary matrices
Mat X, Y;
// copy data & make sure we are using the correct type
src.convertTo(Y, W.type());
// calculate the reconstruction
gemm(Y, W, 1.0, Mat(), 0.0, X, GEMM_2_T);
- if(mean.total() == (size_t) X.cols)
- add(X, repeat(mean.reshape(1,1), n, 1), X);
+ // safe to do because of above assertion
+ if(!mean.empty()) {
+ for(int i=0; i<n; i++) {
+ Mat r_i = X.row(i);
+ add(r_i, mean.reshape(1,1), r_i);
+ }
+ }
return X;
}
-
+
class EigenvalueDecomposition {
private:
-
+
// Holds the data dimension.
int n;
-
+
// Stores real/imag part of a complex division.
double cdivr, cdivi;
-
+
// Pointer to internal memory.
double *d, *e, *ort;
double **V, **H;
-
+
// Holds the computed eigenvalues.
Mat _eigenvalues;
-
+
// Holds the computed eigenvectors.
Mat _eigenvectors;
-
+
// Allocates memory.
template<typename _Tp>
_Tp *alloc_1d(int m) {
return new _Tp[m];
}
-
+
// Allocates memory.
template<typename _Tp>
_Tp *alloc_1d(int m, _Tp val) {
arr[i] = val;
return arr;
}
-
+
// Allocates memory.
template<typename _Tp>
- _Tp **alloc_2d(int m, int n) {
+ _Tp **alloc_2d(int m, int _n) {
_Tp **arr = new _Tp*[m];
for (int i = 0; i < m; i++)
- arr[i] = new _Tp[n];
+ arr[i] = new _Tp[_n];
return arr;
}
-
+
// Allocates memory.
template<typename _Tp>
- _Tp **alloc_2d(int m, int n, _Tp val) {
- _Tp **arr = alloc_2d<_Tp> (m, n);
+ _Tp **alloc_2d(int m, int _n, _Tp val) {
+ _Tp **arr = alloc_2d<_Tp> (m, _n);
for (int i = 0; i < m; i++) {
- for (int j = 0; j < n; j++) {
+ for (int j = 0; j < _n; j++) {
arr[i][j] = val;
}
}
return arr;
}
-
+
void cdiv(double xr, double xi, double yr, double yi) {
- double r, d;
+ double r, dv;
if (std::abs(yr) > std::abs(yi)) {
r = yi / yr;
- d = yr + r * yi;
- cdivr = (xr + r * xi) / d;
- cdivi = (xi - r * xr) / d;
+ dv = yr + r * yi;
+ cdivr = (xr + r * xi) / dv;
+ cdivi = (xi - r * xr) / dv;
} else {
r = yr / yi;
- d = yi + r * yr;
- cdivr = (r * xr + xi) / d;
- cdivi = (r * xi - xr) / d;
+ dv = yi + r * yr;
+ cdivr = (r * xr + xi) / dv;
+ cdivi = (r * xi - xr) / dv;
}
}
-
+
// Nonsymmetric reduction from Hessenberg to real Schur form.
-
+
void hqr2() {
-
+
// This is derived from the Algol procedure hqr2,
// by Martin and Wilkinson, Handbook for Auto. Comp.,
// Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutine in EISPACK.
-
+
// Initialize
int nn = this->n;
- int n = nn - 1;
+ int n1 = nn - 1;
int low = 0;
int high = nn - 1;
double eps = pow(2.0, -52.0);
double exshift = 0.0;
double p = 0, q = 0, r = 0, s = 0, z = 0, t, w, x, y;
-
+
// Store roots isolated by balanc and compute matrix norm
-
+
double norm = 0.0;
for (int i = 0; i < nn; i++) {
if (i < low || i > high) {
norm = norm + std::abs(H[i][j]);
}
}
-
+
// Outer loop over eigenvalue index
int iter = 0;
- while (n >= low) {
-
+ while (n1 >= low) {
+
// Look for single small sub-diagonal element
- int l = n;
+ int l = n1;
while (l > low) {
s = std::abs(H[l - 1][l - 1]) + std::abs(H[l][l]);
if (s == 0.0) {
}
l--;
}
-
+
// Check for convergence
// One root found
-
- if (l == n) {
- H[n][n] = H[n][n] + exshift;
- d[n] = H[n][n];
- e[n] = 0.0;
- n--;
+
+ if (l == n1) {
+ H[n1][n1] = H[n1][n1] + exshift;
+ d[n1] = H[n1][n1];
+ e[n1] = 0.0;
+ n1--;
iter = 0;
-
+
// Two roots found
-
- } else if (l == n - 1) {
- w = H[n][n - 1] * H[n - 1][n];
- p = (H[n - 1][n - 1] - H[n][n]) / 2.0;
+
+ } else if (l == n1 - 1) {
+ w = H[n1][n1 - 1] * H[n1 - 1][n1];
+ p = (H[n1 - 1][n1 - 1] - H[n1][n1]) / 2.0;
q = p * p + w;
z = sqrt(std::abs(q));
- H[n][n] = H[n][n] + exshift;
- H[n - 1][n - 1] = H[n - 1][n - 1] + exshift;
- x = H[n][n];
-
+ H[n1][n1] = H[n1][n1] + exshift;
+ H[n1 - 1][n1 - 1] = H[n1 - 1][n1 - 1] + exshift;
+ x = H[n1][n1];
+
// Real pair
-
+
if (q >= 0) {
if (p >= 0) {
z = p + z;
} else {
z = p - z;
}
- d[n - 1] = x + z;
- d[n] = d[n - 1];
+ d[n1 - 1] = x + z;
+ d[n1] = d[n1 - 1];
if (z != 0.0) {
- d[n] = x - w / z;
+ d[n1] = x - w / z;
}
- e[n - 1] = 0.0;
- e[n] = 0.0;
- x = H[n][n - 1];
+ e[n1 - 1] = 0.0;
+ e[n1] = 0.0;
+ x = H[n1][n1 - 1];
s = std::abs(x) + std::abs(z);
p = x / s;
q = z / s;
r = sqrt(p * p + q * q);
p = p / r;
q = q / r;
-
+
// Row modification
-
- for (int j = n - 1; j < nn; j++) {
- z = H[n - 1][j];
- H[n - 1][j] = q * z + p * H[n][j];
- H[n][j] = q * H[n][j] - p * z;
+
+ for (int j = n1 - 1; j < nn; j++) {
+ z = H[n1 - 1][j];
+ H[n1 - 1][j] = q * z + p * H[n1][j];
+ H[n1][j] = q * H[n1][j] - p * z;
}
-
+
// Column modification
-
- for (int i = 0; i <= n; i++) {
- z = H[i][n - 1];
- H[i][n - 1] = q * z + p * H[i][n];
- H[i][n] = q * H[i][n] - p * z;
+
+ for (int i = 0; i <= n1; i++) {
+ z = H[i][n1 - 1];
+ H[i][n1 - 1] = q * z + p * H[i][n1];
+ H[i][n1] = q * H[i][n1] - p * z;
}
-
+
// Accumulate transformations
-
+
for (int i = low; i <= high; i++) {
- z = V[i][n - 1];
- V[i][n - 1] = q * z + p * V[i][n];
- V[i][n] = q * V[i][n] - p * z;
+ z = V[i][n1 - 1];
+ V[i][n1 - 1] = q * z + p * V[i][n1];
+ V[i][n1] = q * V[i][n1] - p * z;
}
-
+
// Complex pair
-
+
} else {
- d[n - 1] = x + p;
- d[n] = x + p;
- e[n - 1] = z;
- e[n] = -z;
+ d[n1 - 1] = x + p;
+ d[n1] = x + p;
+ e[n1 - 1] = z;
+ e[n1] = -z;
}
- n = n - 2;
+ n1 = n1 - 2;
iter = 0;
-
+
// No convergence yet
-
+
} else {
-
+
// Form shift
-
- x = H[n][n];
+
+ x = H[n1][n1];
y = 0.0;
w = 0.0;
- if (l < n) {
- y = H[n - 1][n - 1];
- w = H[n][n - 1] * H[n - 1][n];
+ if (l < n1) {
+ y = H[n1 - 1][n1 - 1];
+ w = H[n1][n1 - 1] * H[n1 - 1][n1];
}
-
+
// Wilkinson's original ad hoc shift
-
+
if (iter == 10) {
exshift += x;
- for (int i = low; i <= n; i++) {
+ for (int i = low; i <= n1; i++) {
H[i][i] -= x;
}
- s = std::abs(H[n][n - 1]) + std::abs(H[n - 1][n - 2]);
+ s = std::abs(H[n1][n1 - 1]) + std::abs(H[n1 - 1][n1 - 2]);
x = y = 0.75 * s;
w = -0.4375 * s * s;
}
-
+
// MATLAB's new ad hoc shift
-
+
if (iter == 30) {
s = (y - x) / 2.0;
s = s * s + w;
s = -s;
}
s = x - w / ((y - x) / 2.0 + s);
- for (int i = low; i <= n; i++) {
+ for (int i = low; i <= n1; i++) {
H[i][i] -= s;
}
exshift += s;
x = y = w = 0.964;
}
}
-
+
iter = iter + 1; // (Could check iteration count here.)
-
+
// Look for two consecutive small sub-diagonal elements
- int m = n - 2;
+ int m = n1 - 2;
while (m >= l) {
z = H[m][m];
r = x - z;
}
m--;
}
-
- for (int i = m + 2; i <= n; i++) {
+
+ for (int i = m + 2; i <= n1; i++) {
H[i][i - 2] = 0.0;
if (i > m + 2) {
H[i][i - 3] = 0.0;
}
}
-
+
// Double QR step involving rows l:n and columns m:n
-
- for (int k = m; k <= n - 1; k++) {
- bool notlast = (k != n - 1);
+
+ for (int k = m; k <= n1 - 1; k++) {
+ bool notlast = (k != n1 - 1);
if (k != m) {
p = H[k][k - 1];
q = H[k + 1][k - 1];
z = r / s;
q = q / p;
r = r / p;
-
+
// Row modification
-
+
for (int j = k; j < nn; j++) {
p = H[k][j] + q * H[k + 1][j];
if (notlast) {
H[k][j] = H[k][j] - p * x;
H[k + 1][j] = H[k + 1][j] - p * y;
}
-
+
// Column modification
-
- for (int i = 0; i <= min(n, k + 3); i++) {
+
+ for (int i = 0; i <= min(n1, k + 3); i++) {
p = x * H[i][k] + y * H[i][k + 1];
if (notlast) {
p = p + z * H[i][k + 2];
H[i][k] = H[i][k] - p;
H[i][k + 1] = H[i][k + 1] - p * q;
}
-
+
// Accumulate transformations
-
+
for (int i = low; i <= high; i++) {
p = x * V[i][k] + y * V[i][k + 1];
if (notlast) {
} // (s != 0)
} // k loop
} // check convergence
- } // while (n >= low)
-
+ } // while (n1 >= low)
+
// Backsubstitute to find vectors of upper triangular form
-
+
if (norm == 0.0) {
return;
}
-
- for (n = nn - 1; n >= 0; n--) {
- p = d[n];
- q = e[n];
-
+
+ for (n1 = nn - 1; n1 >= 0; n1--) {
+ p = d[n1];
+ q = e[n1];
+
// Real vector
-
+
if (q == 0) {
- int l = n;
- H[n][n] = 1.0;
- for (int i = n - 1; i >= 0; i--) {
+ int l = n1;
+ H[n1][n1] = 1.0;
+ for (int i = n1 - 1; i >= 0; i--) {
w = H[i][i] - p;
r = 0.0;
- for (int j = l; j <= n; j++) {
- r = r + H[i][j] * H[j][n];
+ for (int j = l; j <= n1; j++) {
+ r = r + H[i][j] * H[j][n1];
}
if (e[i] < 0.0) {
z = w;
l = i;
if (e[i] == 0.0) {
if (w != 0.0) {
- H[i][n] = -r / w;
+ H[i][n1] = -r / w;
} else {
- H[i][n] = -r / (eps * norm);
+ H[i][n1] = -r / (eps * norm);
}
-
+
// Solve real equations
-
+
} else {
x = H[i][i + 1];
y = H[i + 1][i];
q = (d[i] - p) * (d[i] - p) + e[i] * e[i];
t = (x * s - z * r) / q;
- H[i][n] = t;
+ H[i][n1] = t;
if (std::abs(x) > std::abs(z)) {
- H[i + 1][n] = (-r - w * t) / x;
+ H[i + 1][n1] = (-r - w * t) / x;
} else {
- H[i + 1][n] = (-s - y * t) / z;
+ H[i + 1][n1] = (-s - y * t) / z;
}
}
-
+
// Overflow control
-
- t = std::abs(H[i][n]);
+
+ t = std::abs(H[i][n1]);
if ((eps * t) * t > 1) {
- for (int j = i; j <= n; j++) {
- H[j][n] = H[j][n] / t;
+ for (int j = i; j <= n1; j++) {
+ H[j][n1] = H[j][n1] / t;
}
}
}
}
-
// Complex vector
-
} else if (q < 0) {
- int l = n - 1;
-
+ int l = n1 - 1;
+
// Last vector component imaginary so matrix is triangular
-
- if (std::abs(H[n][n - 1]) > std::abs(H[n - 1][n])) {
- H[n - 1][n - 1] = q / H[n][n - 1];
- H[n - 1][n] = -(H[n][n] - p) / H[n][n - 1];
+
+ if (std::abs(H[n1][n1 - 1]) > std::abs(H[n1 - 1][n1])) {
+ H[n1 - 1][n1 - 1] = q / H[n1][n1 - 1];
+ H[n1 - 1][n1] = -(H[n1][n1] - p) / H[n1][n1 - 1];
} else {
- cdiv(0.0, -H[n - 1][n], H[n - 1][n - 1] - p, q);
- H[n - 1][n - 1] = cdivr;
- H[n - 1][n] = cdivi;
+ cdiv(0.0, -H[n1 - 1][n1], H[n1 - 1][n1 - 1] - p, q);
+ H[n1 - 1][n1 - 1] = cdivr;
+ H[n1 - 1][n1] = cdivi;
}
- H[n][n - 1] = 0.0;
- H[n][n] = 1.0;
- for (int i = n - 2; i >= 0; i--) {
+ H[n1][n1 - 1] = 0.0;
+ H[n1][n1] = 1.0;
+ for (int i = n1 - 2; i >= 0; i--) {
double ra, sa, vr, vi;
ra = 0.0;
sa = 0.0;
- for (int j = l; j <= n; j++) {
- ra = ra + H[i][j] * H[j][n - 1];
- sa = sa + H[i][j] * H[j][n];
+ for (int j = l; j <= n1; j++) {
+ ra = ra + H[i][j] * H[j][n1 - 1];
+ sa = sa + H[i][j] * H[j][n1];
}
w = H[i][i] - p;
-
+
if (e[i] < 0.0) {
z = w;
r = ra;
l = i;
if (e[i] == 0) {
cdiv(-ra, -sa, w, q);
- H[i][n - 1] = cdivr;
- H[i][n] = cdivi;
+ H[i][n1 - 1] = cdivr;
+ H[i][n1] = cdivi;
} else {
-
+
// Solve complex equations
-
+
x = H[i][i + 1];
y = H[i + 1][i];
vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
}
cdiv(x * r - z * ra + q * sa,
x * s - z * sa - q * ra, vr, vi);
- H[i][n - 1] = cdivr;
- H[i][n] = cdivi;
+ H[i][n1 - 1] = cdivr;
+ H[i][n1] = cdivi;
if (std::abs(x) > (std::abs(z) + std::abs(q))) {
- H[i + 1][n - 1] = (-ra - w * H[i][n - 1] + q
- * H[i][n]) / x;
- H[i + 1][n] = (-sa - w * H[i][n] - q * H[i][n
+ H[i + 1][n1 - 1] = (-ra - w * H[i][n1 - 1] + q
+ * H[i][n1]) / x;
+ H[i + 1][n1] = (-sa - w * H[i][n1] - q * H[i][n1
- 1]) / x;
} else {
- cdiv(-r - y * H[i][n - 1], -s - y * H[i][n], z,
+ cdiv(-r - y * H[i][n1 - 1], -s - y * H[i][n1], z,
q);
- H[i + 1][n - 1] = cdivr;
- H[i + 1][n] = cdivi;
+ H[i + 1][n1 - 1] = cdivr;
+ H[i + 1][n1] = cdivi;
}
}
-
+
// Overflow control
-
- t = max(std::abs(H[i][n - 1]), std::abs(H[i][n]));
+
+ t = max(std::abs(H[i][n1 - 1]), std::abs(H[i][n1]));
if ((eps * t) * t > 1) {
- for (int j = i; j <= n; j++) {
- H[j][n - 1] = H[j][n - 1] / t;
- H[j][n] = H[j][n] / t;
+ for (int j = i; j <= n1; j++) {
+ H[j][n1 - 1] = H[j][n1 - 1] / t;
+ H[j][n1] = H[j][n1] / t;
}
}
}
}
}
}
-
+
// Vectors of isolated roots
-
+
for (int i = 0; i < nn; i++) {
if (i < low || i > high) {
for (int j = i; j < nn; j++) {
}
}
}
-
+
// Back transformation to get eigenvectors of original matrix
-
+
for (int j = nn - 1; j >= low; j--) {
for (int i = low; i <= high; i++) {
z = 0.0;
}
}
}
-
+
// Nonsymmetric reduction to Hessenberg form.
void orthes() {
// This is derived from the Algol procedures orthes and ortran,
// Fortran subroutines in EISPACK.
int low = 0;
int high = n - 1;
-
+
for (int m = low + 1; m <= high - 1; m++) {
-
+
// Scale column.
-
+
double scale = 0.0;
for (int i = m; i <= high; i++) {
scale = scale + std::abs(H[i][m - 1]);
}
if (scale != 0.0) {
-
+
// Compute Householder transformation.
-
+
double h = 0.0;
for (int i = high; i >= m; i--) {
ort[i] = H[i][m - 1] / scale;
}
h = h - ort[m] * g;
ort[m] = ort[m] - g;
-
+
// Apply Householder similarity transformation
// H = (I-u*u'/h)*H*(I-u*u')/h)
-
+
for (int j = m; j < n; j++) {
double f = 0.0;
for (int i = high; i >= m; i--) {
H[i][j] -= f * ort[i];
}
}
-
+
for (int i = 0; i <= high; i++) {
double f = 0.0;
for (int j = high; j >= m; j--) {
H[m][m - 1] = scale * g;
}
}
-
+
// Accumulate transformations (Algol's ortran).
-
+
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
V[i][j] = (i == j ? 1.0 : 0.0);
}
}
-
+
for (int m = high - 1; m >= low + 1; m--) {
if (H[m][m - 1] != 0.0) {
for (int i = m + 1; i <= high; i++) {
}
}
}
-
+
// Releases all internal working memory.
void release() {
// releases the working data
delete[] H;
delete[] V;
}
-
+
// Computes the Eigenvalue Decomposition for a matrix given in H.
void compute() {
// Allocate memory for the working data.
// Deallocate the memory by releasing all internal working data.
release();
}
-
+
public:
EigenvalueDecomposition()
: n(0) { }
-
+
// Initializes & computes the Eigenvalue Decomposition for a general matrix
// given in src. This function is a port of the EigenvalueSolver in JAMA,
// which has been released to public domain by The MathWorks and the
EigenvalueDecomposition(InputArray src) {
compute(src);
}
-
+
// This function computes the Eigenvalue Decomposition for a general matrix
// given in src. This function is a port of the EigenvalueSolver in JAMA,
// which has been released to public domain by The MathWorks and the
compute();
}
}
-
+
~EigenvalueDecomposition() {}
-
+
// Returns the eigenvalues of the Eigenvalue Decomposition.
Mat eigenvalues() { return _eigenvalues; }
// Returns the eigenvectors of the Eigenvalue Decomposition.
//------------------------------------------------------------------------------
void LDA::save(const string& filename) const {
FileStorage fs(filename, FileStorage::WRITE);
- if (!fs.isOpened())
+ if (!fs.isOpened()) {
CV_Error(CV_StsError, "File can't be opened for writing!");
+ }
this->save(fs);
fs.release();
}
vector<int> mapped_labels(labels.size());
vector<int> num2label = remove_dups(labels);
map<int, int> label2num;
- for (size_t i = 0; i < num2label.size(); i++)
- label2num[num2label[i]] = (int)i;
+ for (int i = 0; i < (int)num2label.size(); i++)
+ label2num[num2label[i]] = i;
for (size_t i = 0; i < labels.size(); i++)
mapped_labels[i] = label2num[labels[i]];
// get sample size, dimension
int D = data.cols;
// number of unique labels
int C = (int)num2label.size();
+ // we can't do a LDA on one class, what do you
+ // want to separate from each other then?
+ if(C == 1) {
+ string error_message = "At least two classes are needed to perform a LDA. Reason: Only one class was given!";
+ CV_Error(CV_StsBadArg, error_message);
+ }
// throw error if less labels, than samples
- if (labels.size() != (size_t)N)
- CV_Error(CV_StsBadArg, "Error: The number of samples must equal the number of labels.");
+ if (labels.size() != static_cast<size_t>(N)) {
+ string error_message = format("The number of samples must equal the number of labels. Given %d labels, %d samples. ", labels.size(), N);
+ CV_Error(CV_StsBadArg, error_message);
+ }
// warn if within-classes scatter matrix becomes singular
- if (N < D)
+ if (N < D) {
cout << "Warning: Less observations than feature dimension given!"
- << "Computation will probably fail."
- << endl;
+ << "Computation will probably fail."
+ << endl;
+ }
// clip number of components to be a valid number
- if ((_num_components <= 0) || (_num_components > (C - 1)))
+ if ((_num_components <= 0) || (_num_components > (C - 1))) {
_num_components = (C - 1);
+ }
// holds the mean over all classes
Mat meanTotal = Mat::zeros(1, D, data.type());
// holds the mean for each class
add(meanClass[classIdx], instance, meanClass[classIdx]);
numClass[classIdx]++;
}
- // calculate means
- meanTotal.convertTo(meanTotal, meanTotal.type(),
- 1.0 / static_cast<double> (N));
- for (int i = 0; i < C; i++)
- meanClass[i].convertTo(meanClass[i], meanClass[i].type(),
- 1.0 / static_cast<double> (numClass[i]));
+ // calculate total mean
+ meanTotal.convertTo(meanTotal, meanTotal.type(), 1.0 / static_cast<double> (N));
+ // calculate class means
+ for (int i = 0; i < C; i++) {
+ meanClass[i].convertTo(meanClass[i], meanClass[i].type(), 1.0 / static_cast<double> (numClass[i]));
+ }
// subtract class means
for (int i = 0; i < N; i++) {
int classIdx = mapped_labels[i];
lda(_src.getMat(), _lbls);
break;
default:
- CV_Error(CV_StsNotImplemented, "This data type is not supported by subspace::LDA::compute.");
+ string error_message= format("InputArray Datatype %d is not supported.", _src.kind());
+ CV_Error(CV_StsBadArg, error_message);
break;
}
}
Mat LDA::reconstruct(InputArray src) {
return subspaceReconstruct(_eigenvectors, Mat(), _dataAsRow ? src : src.getMat().t());
}
-
+
}
namespace cv
{
-
+
//------------------------------------interp-------------------------------------------
-LogPolar_Interp::LogPolar_Interp(int w, int h, Point2i center, int R, double ro0, int interp, int full, int S, int sp)
+LogPolar_Interp::LogPolar_Interp(int w, int h, Point2i center, int _R, double _ro0, int _interp, int full, int _s, int sp)
{
if ( (center.x!=w/2 || center.y!=h/2) && full==0) full=1;
if (sp){
int jc=M/2-1, ic=N/2-1;
- int romax=min(ic, jc);
- double a=exp(log((double)(romax/2-1)/(double)ro0)/(double)R);
- S=(int) floor(2*CV_PI/(a-1)+0.5);
+ int _romax=min(ic, jc);
+ double _a=exp(log((double)(_romax/2-1)/(double)ro0)/(double)R);
+ S=(int) floor(2*CV_PI/(_a-1)+0.5);
}
- this->interp=interp;
+ interp=_interp;
- create_map(M, N, R, S, ro0);
+ create_map(M, N, _R, _s, _ro0);
}
-void LogPolar_Interp::create_map(int M, int N, int R, int S, double ro0)
+void LogPolar_Interp::create_map(int _M, int _n, int _R, int _s, double _ro0)
{
- this->M=M;
- this->N=N;
- this->R=R;
- this->S=S;
- this->ro0=ro0;
+ M=_M;
+ N=_n;
+ R=_R;
+ S=_s;
+ ro0=_ro0;
int jc=N/2-1, ic=M/2-1;
romax=min(ic, jc);
for(int u=0; u<R; u++)
{
Rsri.at<float>(v,u)=(float)(ro0*pow(a,u)*sin(v/q)+jc);
- Csri.at<float>(v,u)=(float)(ro0*pow(a,u)*cos(v/q)+ic);
+ Csri.at<float>(v,u)=(float)(ro0*pow(a,u)*cos(v/q)+ic);
}
}
const Mat LogPolar_Interp::to_cortical(const Mat &source)
{
Mat out(S,R,CV_8UC1,Scalar(0));
-
+
Mat source_border;
copyMakeBorder(source,source_border,top,bottom,left,right,BORDER_CONSTANT,Scalar(0));
Mat out(N,M,CV_8UC1,Scalar(0));
Mat source_border;
-
+
if (interp==INTER_NEAREST || interp==INTER_LINEAR){
copyMakeBorder(source,source_border,0,1,0,0,BORDER_CONSTANT,Scalar(0));
Mat rowS0 = source_border.row(S);
//------------------------------------overlapping----------------------------------
-LogPolar_Overlapping::LogPolar_Overlapping(int w, int h, Point2i center, int R, double ro0, int full, int S, int sp)
+LogPolar_Overlapping::LogPolar_Overlapping(int w, int h, Point2i center, int _R, double _ro0, int full, int _s, int sp)
{
if ( (center.x!=w/2 || center.y!=h/2) && full==0) full=1;
if (sp){
int jc=M/2-1, ic=N/2-1;
- int romax=min(ic, jc);
- double a=exp(log((double)(romax/2-1)/(double)ro0)/(double)R);
- S=(int) floor(2*CV_PI/(a-1)+0.5);
+ int _romax=min(ic, jc);
+ double _a=exp(log((double)(_romax/2-1)/(double)ro0)/(double)R);
+ S=(int) floor(2*CV_PI/(_a-1)+0.5);
}
- create_map(M, N, R, S, ro0);
+ create_map(M, N, _R, _s, _ro0);
}
-void LogPolar_Overlapping::create_map(int M, int N, int R, int S, double ro0)
+void LogPolar_Overlapping::create_map(int _M, int _n, int _R, int _s, double _ro0)
{
- this->M=M;
- this->N=N;
- this->R=R;
- this->S=S;
- this->ro0=ro0;
+ M=_M;
+ N=_n;
+ R=_R;
+ S=_s;
+ ro0=_ro0;
int jc=N/2-1, ic=M/2-1;
romax=min(ic, jc);
for(int u=0; u<R; u++)
{
Rsri.at<float>(v,u)=(float)(ro0*pow(a,u)*sin(v/q)+jc);
- Csri.at<float>(v,u)=(float)(ro0*pow(a,u)*cos(v/q)+ic);
+ Csri.at<float>(v,u)=(float)(ro0*pow(a,u)*cos(v/q)+ic);
Rsr[v*R+u]=(int)floor(Rsri.at<float>(v,u));
- Csr[v*R+u]=(int)floor(Csri.at<float>(v,u));
+ Csr[v*R+u]=(int)floor(Csri.at<float>(v,u));
}
}
bool done=false;
-
+
for(int i=0; i<R; i++)
{
Wsr[i]=ro0*(a-1)*pow(a,i-1);
done =true;
}
}
-
+
for(int j=0; j<N; j++)
{
for(int i=0; i<M; i++)//mdf
theta+=2*CV_PI;
ETAyx.at<float>(j,i)=(float)(q*theta);
-
+
double ro2=(j-jc)*(j-jc)+(i-ic)*(i-ic);
CSIyx.at<float>(j,i)=(float)(0.5*log(ro2/(ro0*ro0))/log(a));
}
remap(source_border,out,CSIyx,ETAyx,INTER_LINEAR);
int wm=w_ker_2D[R-1].w;
-
+
vector<double> IMG((N+2*wm+1)*(M+2*wm+1), 0.);
vector<double> NOR((N+2*wm+1)*(M+2*wm+1), 0.);
Mat out_cropped=out(Range(top,N-1-bottom),Range(left,M-1-right));
return out_cropped;
}
-
+
LogPolar_Overlapping::~LogPolar_Overlapping()
{
}
//----------------------------------------adjacent---------------------------------------
-LogPolar_Adjacent::LogPolar_Adjacent(int w, int h, Point2i center, int R, double ro0, double smin, int full, int S, int sp)
+LogPolar_Adjacent::LogPolar_Adjacent(int w, int h, Point2i center, int _R, double _ro0, double smin, int full, int _s, int sp)
{
if ( (center.x!=w/2 || center.y!=h/2) && full==0) full=1;
if (sp){
int jc=M/2-1, ic=N/2-1;
- int romax=min(ic, jc);
- double a=exp(log((double)(romax/2-1)/(double)ro0)/(double)R);
- S=(int) floor(2*CV_PI/(a-1)+0.5);
+ int _romax=min(ic, jc);
+ double _a=exp(log((double)(_romax/2-1)/(double)ro0)/(double)R);
+ S=(int) floor(2*CV_PI/(_a-1)+0.5);
}
- create_map(M, N, R, S, ro0, smin);
+ create_map(M, N, _R, _s, _ro0, smin);
}
-void LogPolar_Adjacent::create_map(int M, int N, int R, int S, double ro0, double smin)
+void LogPolar_Adjacent::create_map(int _M, int _n, int _R, int _s, double _ro0, double smin)
{
- LogPolar_Adjacent::M=M;
- LogPolar_Adjacent::N=N;
- LogPolar_Adjacent::R=R;
- LogPolar_Adjacent::S=S;
- LogPolar_Adjacent::ro0=ro0;
+ M=_M;
+ N=_n;
+ R=_R;
+ S=_s;
+ ro0=_ro0;
romax=min(M/2.0, N/2.0);
a=exp(log(romax/ro0)/(double)R);
void LogPolar_Adjacent::subdivide_recursively(double x, double y, int i, int j, double length, double smin)
-{
+{
if(length<=smin)
{
int u, v;
for(int j=0; j<N; j++)
for(int i=0; i<M; i++)
- {
+ {
for(size_t z=0; z<(L[M*j+i]).size(); z++)
{
map[R*((L[M*j+i])[z].v)+((L[M*j+i])[z].u)]+=((L[M*j+i])[z].a)*(source_border.at<uchar>(j,i));
else
v= (int) floor(q*(theta+2*CV_PI));
return true;
- }
+ }
}
LogPolar_Adjacent::~LogPolar_Adjacent()
#include "precomp.hpp"
#include <limits>
-namespace cv
+namespace
{
+ using namespace cv;
const size_t MAX_STACK_SIZE = 255;
const size_t MAX_LEAFS = 8;
-
+
bool checkIfNodeOutsideSphere(const Octree::Node& node, const Point3f& c, float r)
{
if (node.x_max < (c.x - r) || node.y_max < (c.y - r) || node.z_max < (c.z - r))
return true;
-
+
if ((c.x + r) < node.x_min || (c.y + r) < node.y_min || (c.z + r) < node.z_min)
return true;
-
+
return false;
}
-
+
bool checkIfNodeInsideSphere(const Octree::Node& node, const Point3f& c, float r)
{
r *= r;
-
+
float d2_xmin = (node.x_min - c.x) * (node.x_min - c.x);
float d2_ymin = (node.y_min - c.y) * (node.y_min - c.y);
float d2_zmin = (node.z_min - c.z) * (node.z_min - c.z);
-
+
if (d2_xmin + d2_ymin + d2_zmin > r)
return false;
-
+
float d2_zmax = (node.z_max - c.z) * (node.z_max - c.z);
-
+
if (d2_xmin + d2_ymin + d2_zmax > r)
return false;
-
+
float d2_ymax = (node.y_max - c.y) * (node.y_max - c.y);
-
+
if (d2_xmin + d2_ymax + d2_zmin > r)
return false;
-
+
if (d2_xmin + d2_ymax + d2_zmax > r)
return false;
-
+
float d2_xmax = (node.x_max - c.x) * (node.x_max - c.x);
-
+
if (d2_xmax + d2_ymin + d2_zmin > r)
return false;
-
+
if (d2_xmax + d2_ymin + d2_zmax > r)
return false;
-
+
if (d2_xmax + d2_ymax + d2_zmin > r)
return false;
-
+
if (d2_xmax + d2_ymax + d2_zmax > r)
return false;
-
+
return true;
}
-
+
void fillMinMax(const vector<Point3f>& points, Octree::Node& node)
{
node.x_max = node.y_max = node.z_max = std::numeric_limits<float>::min();
node.x_min = node.y_min = node.z_min = std::numeric_limits<float>::max();
-
+
for (size_t i = 0; i < points.size(); ++i)
{
const Point3f& point = points[i];
-
+
if (node.x_max < point.x)
node.x_max = point.x;
-
+
if (node.y_max < point.y)
node.y_max = point.y;
-
+
if (node.z_max < point.z)
node.z_max = point.z;
-
+
if (node.x_min > point.x)
node.x_min = point.x;
-
+
if (node.y_min > point.y)
node.y_min = point.y;
-
+
if (node.z_min > point.z)
node.z_min = point.z;
}
}
-
+
size_t findSubboxForPoint(const Point3f& point, const Octree::Node& node)
{
size_t ind_x = point.x < (node.x_max + node.x_min) / 2 ? 0 : 1;
size_t ind_y = point.y < (node.y_max + node.y_min) / 2 ? 0 : 1;
size_t ind_z = point.z < (node.z_max + node.z_min) / 2 ? 0 : 1;
-
+
return (ind_x << 2) + (ind_y << 1) + (ind_z << 0);
}
void initChildBox(const Octree::Node& parent, size_t boxIndex, Octree::Node& child)
child.x_min = child.x_max = (parent.x_max + parent.x_min) / 2;
child.y_min = child.y_max = (parent.y_max + parent.y_min) / 2;
child.z_min = child.z_max = (parent.z_max + parent.z_min) / 2;
-
+
if ((boxIndex >> 0) & 1)
child.z_max = parent.z_max;
else
child.z_min = parent.z_min;
-
+
if ((boxIndex >> 1) & 1)
child.y_max = parent.y_max;
else
child.y_min = parent.y_min;
-
+
if ((boxIndex >> 2) & 1)
child.x_max = parent.x_max;
else
child.x_min = parent.x_min;
}
-
+
+}//namespace
+
////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////// Octree //////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
-
+namespace cv
+{
Octree::Octree()
{
}
-
- Octree::Octree(const vector<Point3f>& points3d, int maxLevels, int minPoints)
+
+ Octree::Octree(const vector<Point3f>& points3d, int maxLevels, int _minPoints)
{
- buildTree(points3d, maxLevels, minPoints);
+ buildTree(points3d, maxLevels, _minPoints);
}
-
+
Octree::~Octree()
{
}
-
+
void Octree::getPointsWithinSphere(const Point3f& center, float radius, vector<Point3f>& out) const
{
out.clear();
-
+
if (nodes.empty())
return;
-
+
int stack[MAX_STACK_SIZE];
int pos = 0;
stack[pos] = 0;
-
+
while (pos >= 0)
{
const Node& cur = nodes[stack[pos--]];
-
+
if (checkIfNodeOutsideSphere(cur, center, radius))
continue;
-
+
if (checkIfNodeInsideSphere(cur, center, radius))
{
size_t sz = out.size();
out[sz++] = points[i];
continue;
}
-
+
if (cur.isLeaf)
{
double r2 = radius * radius;
size_t sz = out.size();
out.resize(sz + (cur.end - cur.begin));
-
+
for (int i = cur.begin; i < cur.end; ++i)
{
const Point3f& point = points[i];
-
+
double dx = (point.x - center.x);
double dy = (point.y - center.y);
double dz = (point.z - center.z);
-
+
double dist2 = dx * dx + dy * dy + dz * dz;
-
+
if (dist2 < r2)
out[sz++] = point;
};
out.resize(sz);
continue;
}
-
+
if (cur.children[0])
stack[++pos] = cur.children[0];
-
+
if (cur.children[1])
stack[++pos] = cur.children[1];
-
+
if (cur.children[2])
stack[++pos] = cur.children[2];
-
+
if (cur.children[3])
stack[++pos] = cur.children[3];
-
+
if (cur.children[4])
stack[++pos] = cur.children[4];
-
+
if (cur.children[5])
stack[++pos] = cur.children[5];
-
+
if (cur.children[6])
stack[++pos] = cur.children[6];
-
+
if (cur.children[7])
stack[++pos] = cur.children[7];
}
}
-
- void Octree::buildTree(const vector<Point3f>& points3d, int maxLevels, int minPoints)
+
+ void Octree::buildTree(const vector<Point3f>& points3d, int maxLevels, int _minPoints)
{
assert((size_t)maxLevels * 8 < MAX_STACK_SIZE);
points.resize(points3d.size());
std::copy(points3d.begin(), points3d.end(), points.begin());
- this->minPoints = minPoints;
-
+ minPoints = _minPoints;
+
nodes.clear();
nodes.push_back(Node());
Node& root = nodes[0];
fillMinMax(points, root);
-
+
root.isLeaf = true;
root.maxLevels = maxLevels;
root.begin = 0;
root.end = (int)points.size();
for (size_t i = 0; i < MAX_LEAFS; i++)
root.children[i] = 0;
-
- if (maxLevels != 1 && (root.end - root.begin) > minPoints)
+
+ if (maxLevels != 1 && (root.end - root.begin) > _minPoints)
{
root.isLeaf = false;
buildNext(0);
}
}
-
+
void Octree::buildNext(size_t nodeInd)
{
size_t size = nodes[nodeInd].end - nodes[nodeInd].begin;
-
+
vector<size_t> boxBorders(MAX_LEAFS+1, 0);
vector<size_t> boxIndices(size);
vector<Point3f> tempPoints(size);
-
+
for (int i = nodes[nodeInd].begin, j = 0; i < nodes[nodeInd].end; ++i, ++j)
{
const Point3f& p = points[i];
-
+
size_t subboxInd = findSubboxForPoint(p, nodes[nodeInd]);
-
+
boxBorders[subboxInd+1]++;
boxIndices[j] = subboxInd;
tempPoints[j] = p;
}
-
+
for (size_t i = 1; i < boxBorders.size(); ++i)
boxBorders[i] += boxBorders[i-1];
-
+
vector<size_t> writeInds(boxBorders.begin(), boxBorders.end());
-
+
for (size_t i = 0; i < size; ++i)
{
size_t boxIndex = boxIndices[i];
Point3f& curPoint = tempPoints[i];
-
+
size_t copyTo = nodes[nodeInd].begin + writeInds[boxIndex]++;
points[copyTo] = curPoint;
}
-
+
for (size_t i = 0; i < MAX_LEAFS; ++i)
{
if (boxBorders[i] == boxBorders[i+1])
continue;
-
+
nodes.push_back(Node());
Node& child = nodes.back();
initChildBox(nodes[nodeInd], i, child);
-
+
child.isLeaf = true;
child.maxLevels = nodes[nodeInd].maxLevels - 1;
child.begin = nodes[nodeInd].begin + (int)boxBorders[i+0];
child.end = nodes[nodeInd].begin + (int)boxBorders[i+1];
for (size_t k = 0; k < MAX_LEAFS; k++)
child.children[k] = 0;
-
+
nodes[nodeInd].children[i] = (int)(nodes.size() - 1);
-
+
if (child.maxLevels != 1 && (child.end - child.begin) > minPoints)
{
child.isLeaf = false;
}
}
}
-
+
}
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4710 4711 4514 4996 )
-#endif
-
-#ifdef HAVE_CVCONFIG_H
+#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
namespace cv
{
-
-Retina::Retina(const cv::Size inputSize)
+
+Retina::Retina(const cv::Size inputSz)
{
_retinaFilter = 0;
- _init(inputSize, true, RETINA_COLOR_BAYER, false);
+ _init(inputSz, true, RETINA_COLOR_BAYER, false);
}
-Retina::Retina(const cv::Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght)
+Retina::Retina(const cv::Size inputSz, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght)
{
_retinaFilter = 0;
- _init(inputSize, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
+ _init(inputSz, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
};
Retina::~Retina()
}
/**
-* retreive retina input buffer size
+* retreive retina input buffer size
*/
Size Retina::inputSize(){return cv::Size(_retinaFilter->getInputNBcolumns(), _retinaFilter->getInputNBrows());}
/**
-* retreive retina output buffer size
+* retreive retina output buffer size
*/
Size Retina::outputSize(){return cv::Size(_retinaFilter->getOutputNBcolumns(), _retinaFilter->getOutputNBrows());}
void Retina::setColorSaturation(const bool saturateColors, const float colorSaturationValue)
{
- _retinaFilter->setColorSaturation(saturateColors, colorSaturationValue);
+ _retinaFilter->setColorSaturation(saturateColors, colorSaturationValue);
}
struct Retina::RetinaParameters Retina::getParameters(){return _retinaParameters;}
setup(fs, applyDefaultSetupOnFailure);
}catch(Exception &e)
{
- std::cout<<"Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>"<<e.what()<<std::endl;
- if (applyDefaultSetupOnFailure)
- {
+ std::cout<<"Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>"<<e.what()<<std::endl;
+ if (applyDefaultSetupOnFailure)
+ {
std::cout<<"Retina::setup: resetting retina with default parameters"<<std::endl;
- setupOPLandIPLParvoChannel();
- setupIPLMagnoChannel();
- }
+ setupOPLandIPLParvoChannel();
+ setupIPLMagnoChannel();
+ }
else
{
- std::cout<<"=> keeping current parameters"<<std::endl;
+ std::cout<<"=> keeping current parameters"<<std::endl;
}
}
}
void Retina::setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure)
{
- try
- {
- // read parameters file if it exists or apply default setup if asked for
- if (!fs.isOpened())
- {
- std::cout<<"Retina::setup: provided parameters file could not be open... skeeping configuration"<<std::endl;
- return;
- // implicit else case : retinaParameterFile could be open (it exists at least)
- }
+ try
+ {
+ // read parameters file if it exists or apply default setup if asked for
+ if (!fs.isOpened())
+ {
+ std::cout<<"Retina::setup: provided parameters file could not be open... skeeping configuration"<<std::endl;
+ return;
+ // implicit else case : retinaParameterFile could be open (it exists at least)
+ }
// OPL and Parvo init first... update at the same time the parameters structure and the retina core
- cv::FileNode rootFn = fs.root(), currFn=rootFn["OPLandIPLparvo"];
- currFn["colorMode"]>>_retinaParameters.OPLandIplParvo.colorMode;
- currFn["normaliseOutput"]>>_retinaParameters.OPLandIplParvo.normaliseOutput;
- currFn["photoreceptorsLocalAdaptationSensitivity"]>>_retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity;
- currFn["photoreceptorsTemporalConstant"]>>_retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant;
- currFn["photoreceptorsSpatialConstant"]>>_retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant;
- currFn["horizontalCellsGain"]>>_retinaParameters.OPLandIplParvo.horizontalCellsGain;
- currFn["hcellsTemporalConstant"]>>_retinaParameters.OPLandIplParvo.hcellsTemporalConstant;
- currFn["hcellsSpatialConstant"]>>_retinaParameters.OPLandIplParvo.hcellsSpatialConstant;
- currFn["ganglionCellsSensitivity"]>>_retinaParameters.OPLandIplParvo.ganglionCellsSensitivity;
- setupOPLandIPLParvoChannel(_retinaParameters.OPLandIplParvo.colorMode, _retinaParameters.OPLandIplParvo.normaliseOutput, _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity, _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant, _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant, _retinaParameters.OPLandIplParvo.horizontalCellsGain, _retinaParameters.OPLandIplParvo.hcellsTemporalConstant, _retinaParameters.OPLandIplParvo.hcellsSpatialConstant, _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity);
-
- // init retina IPL magno setup... update at the same time the parameters structure and the retina core
- currFn=rootFn["IPLmagno"];
- currFn["normaliseOutput"]>>_retinaParameters.IplMagno.normaliseOutput;
- currFn["parasolCells_beta"]>>_retinaParameters.IplMagno.parasolCells_beta;
- currFn["parasolCells_tau"]>>_retinaParameters.IplMagno.parasolCells_tau;
- currFn["parasolCells_k"]>>_retinaParameters.IplMagno.parasolCells_k;
- currFn["amacrinCellsTemporalCutFrequency"]>>_retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency;
- currFn["V0CompressionParameter"]>>_retinaParameters.IplMagno.V0CompressionParameter;
- currFn["localAdaptintegration_tau"]>>_retinaParameters.IplMagno.localAdaptintegration_tau;
- currFn["localAdaptintegration_k"]>>_retinaParameters.IplMagno.localAdaptintegration_k;
-
- setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency,_retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k);
-
- }catch(Exception &e)
- {
- std::cout<<"Retina::setup: resetting retina with default parameters"<<std::endl;
- if (applyDefaultSetupOnFailure)
- {
- setupOPLandIPLParvoChannel();
- setupIPLMagnoChannel();
- }
- std::cout<<"Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>"<<e.what()<<std::endl;
- std::cout<<"=> keeping current parameters"<<std::endl;
- }
-
- // report current configuration
- std::cout<<printSetup()<<std::endl;
+ cv::FileNode rootFn = fs.root(), currFn=rootFn["OPLandIPLparvo"];
+ currFn["colorMode"]>>_retinaParameters.OPLandIplParvo.colorMode;
+ currFn["normaliseOutput"]>>_retinaParameters.OPLandIplParvo.normaliseOutput;
+ currFn["photoreceptorsLocalAdaptationSensitivity"]>>_retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity;
+ currFn["photoreceptorsTemporalConstant"]>>_retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant;
+ currFn["photoreceptorsSpatialConstant"]>>_retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant;
+ currFn["horizontalCellsGain"]>>_retinaParameters.OPLandIplParvo.horizontalCellsGain;
+ currFn["hcellsTemporalConstant"]>>_retinaParameters.OPLandIplParvo.hcellsTemporalConstant;
+ currFn["hcellsSpatialConstant"]>>_retinaParameters.OPLandIplParvo.hcellsSpatialConstant;
+ currFn["ganglionCellsSensitivity"]>>_retinaParameters.OPLandIplParvo.ganglionCellsSensitivity;
+ setupOPLandIPLParvoChannel(_retinaParameters.OPLandIplParvo.colorMode, _retinaParameters.OPLandIplParvo.normaliseOutput, _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity, _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant, _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant, _retinaParameters.OPLandIplParvo.horizontalCellsGain, _retinaParameters.OPLandIplParvo.hcellsTemporalConstant, _retinaParameters.OPLandIplParvo.hcellsSpatialConstant, _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity);
+
+ // init retina IPL magno setup... update at the same time the parameters structure and the retina core
+ currFn=rootFn["IPLmagno"];
+ currFn["normaliseOutput"]>>_retinaParameters.IplMagno.normaliseOutput;
+ currFn["parasolCells_beta"]>>_retinaParameters.IplMagno.parasolCells_beta;
+ currFn["parasolCells_tau"]>>_retinaParameters.IplMagno.parasolCells_tau;
+ currFn["parasolCells_k"]>>_retinaParameters.IplMagno.parasolCells_k;
+ currFn["amacrinCellsTemporalCutFrequency"]>>_retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency;
+ currFn["V0CompressionParameter"]>>_retinaParameters.IplMagno.V0CompressionParameter;
+ currFn["localAdaptintegration_tau"]>>_retinaParameters.IplMagno.localAdaptintegration_tau;
+ currFn["localAdaptintegration_k"]>>_retinaParameters.IplMagno.localAdaptintegration_k;
+
+ setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency,_retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k);
+
+ }catch(Exception &e)
+ {
+ std::cout<<"Retina::setup: resetting retina with default parameters"<<std::endl;
+ if (applyDefaultSetupOnFailure)
+ {
+ setupOPLandIPLParvoChannel();
+ setupIPLMagnoChannel();
+ }
+ std::cout<<"Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>"<<e.what()<<std::endl;
+ std::cout<<"=> keeping current parameters"<<std::endl;
+ }
+
+ // report current configuration
+ std::cout<<printSetup()<<std::endl;
}
void Retina::setup(cv::Retina::RetinaParameters newConfiguration)
const std::string Retina::printSetup()
{
- std::stringstream outmessage;
-
- // displaying OPL and IPL parvo setup
- outmessage<<"Current Retina instance setup :"
- <<"\nOPLandIPLparvo"<<"{"
- << "\n==> colorMode : " << _retinaParameters.OPLandIplParvo.colorMode
- << "\n==> normalizeParvoOutput :" << _retinaParameters.OPLandIplParvo.normaliseOutput
- << "\n==> photoreceptorsLocalAdaptationSensitivity : " << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity
- << "\n==> photoreceptorsTemporalConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant
- << "\n==> photoreceptorsSpatialConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant
- << "\n==> horizontalCellsGain : " << _retinaParameters.OPLandIplParvo.horizontalCellsGain
- << "\n==> hcellsTemporalConstant : " << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant
- << "\n==> hcellsSpatialConstant : " << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant
- << "\n==> parvoGanglionCellsSensitivity : " << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity
- <<"}\n";
-
- // displaying IPL magno setup
- outmessage<<"Current Retina instance setup :"
- <<"\nIPLmagno"<<"{"
- << "\n==> normaliseOutput : " << _retinaParameters.IplMagno.normaliseOutput
- << "\n==> parasolCells_beta : " << _retinaParameters.IplMagno.parasolCells_beta
- << "\n==> parasolCells_tau : " << _retinaParameters.IplMagno.parasolCells_tau
- << "\n==> parasolCells_k : " << _retinaParameters.IplMagno.parasolCells_k
- << "\n==> amacrinCellsTemporalCutFrequency : " << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency
- << "\n==> V0CompressionParameter : " << _retinaParameters.IplMagno.V0CompressionParameter
- << "\n==> localAdaptintegration_tau : " << _retinaParameters.IplMagno.localAdaptintegration_tau
- << "\n==> localAdaptintegration_k : " << _retinaParameters.IplMagno.localAdaptintegration_k
- <<"}";
- return outmessage.str();
+ std::stringstream outmessage;
+
+ // displaying OPL and IPL parvo setup
+ outmessage<<"Current Retina instance setup :"
+ <<"\nOPLandIPLparvo"<<"{"
+ << "\n==> colorMode : " << _retinaParameters.OPLandIplParvo.colorMode
+ << "\n==> normalizeParvoOutput :" << _retinaParameters.OPLandIplParvo.normaliseOutput
+ << "\n==> photoreceptorsLocalAdaptationSensitivity : " << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity
+ << "\n==> photoreceptorsTemporalConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant
+ << "\n==> photoreceptorsSpatialConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant
+ << "\n==> horizontalCellsGain : " << _retinaParameters.OPLandIplParvo.horizontalCellsGain
+ << "\n==> hcellsTemporalConstant : " << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant
+ << "\n==> hcellsSpatialConstant : " << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant
+ << "\n==> parvoGanglionCellsSensitivity : " << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity
+ <<"}\n";
+
+ // displaying IPL magno setup
+ outmessage<<"Current Retina instance setup :"
+ <<"\nIPLmagno"<<"{"
+ << "\n==> normaliseOutput : " << _retinaParameters.IplMagno.normaliseOutput
+ << "\n==> parasolCells_beta : " << _retinaParameters.IplMagno.parasolCells_beta
+ << "\n==> parasolCells_tau : " << _retinaParameters.IplMagno.parasolCells_tau
+ << "\n==> parasolCells_k : " << _retinaParameters.IplMagno.parasolCells_k
+ << "\n==> amacrinCellsTemporalCutFrequency : " << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency
+ << "\n==> V0CompressionParameter : " << _retinaParameters.IplMagno.V0CompressionParameter
+ << "\n==> localAdaptintegration_tau : " << _retinaParameters.IplMagno.localAdaptintegration_tau
+ << "\n==> localAdaptintegration_k : " << _retinaParameters.IplMagno.localAdaptintegration_k
+ <<"}";
+ return outmessage.str();
}
void Retina::write( std::string fs ) const
void Retina::write( FileStorage& fs ) const
{
- if (!fs.isOpened())
- return; // basic error case
- fs<<"OPLandIPLparvo"<<"{";
- fs << "colorMode" << _retinaParameters.OPLandIplParvo.colorMode;
- fs << "normaliseOutput" << _retinaParameters.OPLandIplParvo.normaliseOutput;
- fs << "photoreceptorsLocalAdaptationSensitivity" << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity;
- fs << "photoreceptorsTemporalConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant;
- fs << "photoreceptorsSpatialConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant;
- fs << "horizontalCellsGain" << _retinaParameters.OPLandIplParvo.horizontalCellsGain;
- fs << "hcellsTemporalConstant" << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant;
- fs << "hcellsSpatialConstant" << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant;
- fs << "ganglionCellsSensitivity" << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity;
- fs << "}";
- fs<<"IPLmagno"<<"{";
- fs << "normaliseOutput" << _retinaParameters.IplMagno.normaliseOutput;
- fs << "parasolCells_beta" << _retinaParameters.IplMagno.parasolCells_beta;
- fs << "parasolCells_tau" << _retinaParameters.IplMagno.parasolCells_tau;
- fs << "parasolCells_k" << _retinaParameters.IplMagno.parasolCells_k;
- fs << "amacrinCellsTemporalCutFrequency" << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency;
- fs << "V0CompressionParameter" << _retinaParameters.IplMagno.V0CompressionParameter;
- fs << "localAdaptintegration_tau" << _retinaParameters.IplMagno.localAdaptintegration_tau;
- fs << "localAdaptintegration_k" << _retinaParameters.IplMagno.localAdaptintegration_k;
- fs<<"}";
+ if (!fs.isOpened())
+ return; // basic error case
+ fs<<"OPLandIPLparvo"<<"{";
+ fs << "colorMode" << _retinaParameters.OPLandIplParvo.colorMode;
+ fs << "normaliseOutput" << _retinaParameters.OPLandIplParvo.normaliseOutput;
+ fs << "photoreceptorsLocalAdaptationSensitivity" << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity;
+ fs << "photoreceptorsTemporalConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant;
+ fs << "photoreceptorsSpatialConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant;
+ fs << "horizontalCellsGain" << _retinaParameters.OPLandIplParvo.horizontalCellsGain;
+ fs << "hcellsTemporalConstant" << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant;
+ fs << "hcellsSpatialConstant" << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant;
+ fs << "ganglionCellsSensitivity" << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity;
+ fs << "}";
+ fs<<"IPLmagno"<<"{";
+ fs << "normaliseOutput" << _retinaParameters.IplMagno.normaliseOutput;
+ fs << "parasolCells_beta" << _retinaParameters.IplMagno.parasolCells_beta;
+ fs << "parasolCells_tau" << _retinaParameters.IplMagno.parasolCells_tau;
+ fs << "parasolCells_k" << _retinaParameters.IplMagno.parasolCells_k;
+ fs << "amacrinCellsTemporalCutFrequency" << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency;
+ fs << "V0CompressionParameter" << _retinaParameters.IplMagno.V0CompressionParameter;
+ fs << "localAdaptintegration_tau" << _retinaParameters.IplMagno.localAdaptintegration_tau;
+ fs << "localAdaptintegration_k" << _retinaParameters.IplMagno.localAdaptintegration_k;
+ fs<<"}";
}
void Retina::setupOPLandIPLParvoChannel(const bool colorMode, const bool normaliseOutput, const float photoreceptorsLocalAdaptationSensitivity, const float photoreceptorsTemporalConstant, const float photoreceptorsSpatialConstant, const float horizontalCellsGain, const float HcellsTemporalConstant, const float HcellsSpatialConstant, const float ganglionCellsSensitivity)
{
- // retina core parameters setup
- _retinaFilter->setColorMode(colorMode);
- _retinaFilter->setPhotoreceptorsLocalAdaptationSensitivity(photoreceptorsLocalAdaptationSensitivity);
- _retinaFilter->setOPLandParvoParameters(0, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, HcellsTemporalConstant, HcellsSpatialConstant, ganglionCellsSensitivity);
- _retinaFilter->setParvoGanglionCellsLocalAdaptationSensitivity(ganglionCellsSensitivity);
- _retinaFilter->activateNormalizeParvoOutput_0_maxOutputValue(normaliseOutput);
-
+ // retina core parameters setup
+ _retinaFilter->setColorMode(colorMode);
+ _retinaFilter->setPhotoreceptorsLocalAdaptationSensitivity(photoreceptorsLocalAdaptationSensitivity);
+ _retinaFilter->setOPLandParvoParameters(0, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, HcellsTemporalConstant, HcellsSpatialConstant, ganglionCellsSensitivity);
+ _retinaFilter->setParvoGanglionCellsLocalAdaptationSensitivity(ganglionCellsSensitivity);
+ _retinaFilter->activateNormalizeParvoOutput_0_maxOutputValue(normaliseOutput);
+
// update parameters struture
- _retinaParameters.OPLandIplParvo.colorMode = colorMode;
- _retinaParameters.OPLandIplParvo.normaliseOutput = normaliseOutput;
- _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity = photoreceptorsLocalAdaptationSensitivity;
- _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant = photoreceptorsTemporalConstant;
- _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant = photoreceptorsSpatialConstant;
- _retinaParameters.OPLandIplParvo.horizontalCellsGain = horizontalCellsGain;
- _retinaParameters.OPLandIplParvo.hcellsTemporalConstant = HcellsTemporalConstant;
- _retinaParameters.OPLandIplParvo.hcellsSpatialConstant = HcellsSpatialConstant;
- _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity = ganglionCellsSensitivity;
+ _retinaParameters.OPLandIplParvo.colorMode = colorMode;
+ _retinaParameters.OPLandIplParvo.normaliseOutput = normaliseOutput;
+ _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity = photoreceptorsLocalAdaptationSensitivity;
+ _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant = photoreceptorsTemporalConstant;
+ _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant = photoreceptorsSpatialConstant;
+ _retinaParameters.OPLandIplParvo.horizontalCellsGain = horizontalCellsGain;
+ _retinaParameters.OPLandIplParvo.hcellsTemporalConstant = HcellsTemporalConstant;
+ _retinaParameters.OPLandIplParvo.hcellsSpatialConstant = HcellsSpatialConstant;
+ _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity = ganglionCellsSensitivity;
}
void Retina::setupIPLMagnoChannel(const bool normaliseOutput, const float parasolCells_beta, const float parasolCells_tau, const float parasolCells_k, const float amacrinCellsTemporalCutFrequency, const float V0CompressionParameter, const float localAdaptintegration_tau, const float localAdaptintegration_k)
{
- _retinaFilter->setMagnoCoefficientsTable(parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k);
- _retinaFilter->activateNormalizeMagnoOutput_0_maxOutputValue(normaliseOutput);
+ _retinaFilter->setMagnoCoefficientsTable(parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k);
+ _retinaFilter->activateNormalizeMagnoOutput_0_maxOutputValue(normaliseOutput);
// update parameters struture
- _retinaParameters.IplMagno.normaliseOutput = normaliseOutput;
- _retinaParameters.IplMagno.parasolCells_beta = parasolCells_beta;
- _retinaParameters.IplMagno.parasolCells_tau = parasolCells_tau;
- _retinaParameters.IplMagno.parasolCells_k = parasolCells_k;
- _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency = amacrinCellsTemporalCutFrequency;
- _retinaParameters.IplMagno.V0CompressionParameter = V0CompressionParameter;
- _retinaParameters.IplMagno.localAdaptintegration_tau = localAdaptintegration_tau;
- _retinaParameters.IplMagno.localAdaptintegration_k = localAdaptintegration_k;
+ _retinaParameters.IplMagno.normaliseOutput = normaliseOutput;
+ _retinaParameters.IplMagno.parasolCells_beta = parasolCells_beta;
+ _retinaParameters.IplMagno.parasolCells_tau = parasolCells_tau;
+ _retinaParameters.IplMagno.parasolCells_k = parasolCells_k;
+ _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency = amacrinCellsTemporalCutFrequency;
+ _retinaParameters.IplMagno.V0CompressionParameter = V0CompressionParameter;
+ _retinaParameters.IplMagno.localAdaptintegration_tau = localAdaptintegration_tau;
+ _retinaParameters.IplMagno.localAdaptintegration_k = localAdaptintegration_k;
}
void Retina::run(const cv::Mat &inputMatToConvert)
{
- // first convert input image to the compatible format : std::valarray<float>
- const bool colorMode = _convertCvMat2ValarrayBuffer(inputMatToConvert, _inputBuffer);
- // process the retina
- if (!_retinaFilter->runFilter(_inputBuffer, colorMode, false, _retinaParameters.OPLandIplParvo.colorMode && colorMode, false))
- throw cv::Exception(-1, "Retina cannot be applied, wrong input buffer size", "Retina::run", "Retina.h", 0);
+ // first convert input image to the compatible format : std::valarray<float>
+ const bool colorMode = _convertCvMat2ValarrayBuffer(inputMatToConvert, _inputBuffer);
+ // process the retina
+ if (!_retinaFilter->runFilter(_inputBuffer, colorMode, false, _retinaParameters.OPLandIplParvo.colorMode && colorMode, false))
+ throw cv::Exception(-1, "Retina cannot be applied, wrong input buffer size", "Retina::run", "Retina.h", 0);
}
void Retina::getParvo(cv::Mat &retinaOutput_parvo)
{
- if (_retinaFilter->getColorMode())
- {
- // reallocate output buffer (if necessary)
- _convertValarrayBuffer2cvMat(_retinaFilter->getColorOutput(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), true, retinaOutput_parvo);
- }else
- {
- // reallocate output buffer (if necessary)
- _convertValarrayBuffer2cvMat(_retinaFilter->getContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_parvo);
- }
- //retinaOutput_parvo/=255.0;
+ if (_retinaFilter->getColorMode())
+ {
+ // reallocate output buffer (if necessary)
+ _convertValarrayBuffer2cvMat(_retinaFilter->getColorOutput(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), true, retinaOutput_parvo);
+ }else
+ {
+ // reallocate output buffer (if necessary)
+ _convertValarrayBuffer2cvMat(_retinaFilter->getContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_parvo);
+ }
+ //retinaOutput_parvo/=255.0;
}
void Retina::getMagno(cv::Mat &retinaOutput_magno)
{
- // reallocate output buffer (if necessary)
- _convertValarrayBuffer2cvMat(_retinaFilter->getMovingContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_magno);
- //retinaOutput_magno/=255.0;
+ // reallocate output buffer (if necessary)
+ _convertValarrayBuffer2cvMat(_retinaFilter->getMovingContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_magno);
+ //retinaOutput_magno/=255.0;
}
// original API level data accessors : copy buffers if size matches
const std::valarray<float> & Retina::getParvo() const {if (_retinaFilter->getColorMode())return _retinaFilter->getColorOutput(); /* implicite else */return _retinaFilter->getContours();}
// private method called by constructirs
-void Retina::_init(const cv::Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght)
+void Retina::_init(const cv::Size inputSz, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght)
{
- // basic error check
- if (inputSize.height*inputSize.width <= 0)
- throw cv::Exception(-1, "Bad retina size setup : size height and with must be superior to zero", "Retina::setup", "Retina.h", 0);
+ // basic error check
+ if (inputSz.height*inputSz.width <= 0)
+ throw cv::Exception(-1, "Bad retina size setup : size height and with must be superior to zero", "Retina::setup", "Retina.h", 0);
- unsigned int nbPixels=inputSize.height*inputSize.width;
- // resize buffers if size does not match
- _inputBuffer.resize(nbPixels*3); // buffer supports gray images but also 3 channels color buffers... (larger is better...)
+ unsigned int nbPixels=inputSz.height*inputSz.width;
+ // resize buffers if size does not match
+ _inputBuffer.resize(nbPixels*3); // buffer supports gray images but also 3 channels color buffers... (larger is better...)
- // allocate the retina model
+ // allocate the retina model
if (_retinaFilter)
delete _retinaFilter;
- _retinaFilter = new RetinaFilter(inputSize.height, inputSize.width, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
+ _retinaFilter = new RetinaFilter(inputSz.height, inputSz.width, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
- // prepare the default parameter XML file with default setup
+ // prepare the default parameter XML file with default setup
setup(_retinaParameters);
- // init retina
- _retinaFilter->clearAllBuffers();
+ // init retina
+ _retinaFilter->clearAllBuffers();
- // report current configuration
- std::cout<<printSetup()<<std::endl;
+ // report current configuration
+ std::cout<<printSetup()<<std::endl;
}
void Retina::_convertValarrayBuffer2cvMat(const std::valarray<float> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, cv::Mat &outBuffer)
{
- // fill output buffer with the valarray buffer
- const float *valarrayPTR=get_data(grayMatrixToConvert);
- if (!colorMode)
- {
- outBuffer.create(cv::Size(nbColumns, nbRows), CV_8U);
- for (unsigned int i=0;i<nbRows;++i)
- {
- for (unsigned int j=0;j<nbColumns;++j)
- {
- cv::Point2d pixel(j,i);
- outBuffer.at<unsigned char>(pixel)=(unsigned char)*(valarrayPTR++);
- }
- }
- }else
- {
- const unsigned int doubleNBpixels=_retinaFilter->getOutputNBpixels()*2;
- outBuffer.create(cv::Size(nbColumns, nbRows), CV_8UC3);
- for (unsigned int i=0;i<nbRows;++i)
- {
- for (unsigned int j=0;j<nbColumns;++j,++valarrayPTR)
- {
- cv::Point2d pixel(j,i);
- cv::Vec3b pixelValues;
- pixelValues[2]=(unsigned char)*(valarrayPTR);
- pixelValues[1]=(unsigned char)*(valarrayPTR+_retinaFilter->getOutputNBpixels());
- pixelValues[0]=(unsigned char)*(valarrayPTR+doubleNBpixels);
-
- outBuffer.at<cv::Vec3b>(pixel)=pixelValues;
- }
- }
- }
+ // fill output buffer with the valarray buffer
+ const float *valarrayPTR=get_data(grayMatrixToConvert);
+ if (!colorMode)
+ {
+ outBuffer.create(cv::Size(nbColumns, nbRows), CV_8U);
+ for (unsigned int i=0;i<nbRows;++i)
+ {
+ for (unsigned int j=0;j<nbColumns;++j)
+ {
+ cv::Point2d pixel(j,i);
+ outBuffer.at<unsigned char>(pixel)=(unsigned char)*(valarrayPTR++);
+ }
+ }
+ }else
+ {
+ const unsigned int doubleNBpixels=_retinaFilter->getOutputNBpixels()*2;
+ outBuffer.create(cv::Size(nbColumns, nbRows), CV_8UC3);
+ for (unsigned int i=0;i<nbRows;++i)
+ {
+ for (unsigned int j=0;j<nbColumns;++j,++valarrayPTR)
+ {
+ cv::Point2d pixel(j,i);
+ cv::Vec3b pixelValues;
+ pixelValues[2]=(unsigned char)*(valarrayPTR);
+ pixelValues[1]=(unsigned char)*(valarrayPTR+_retinaFilter->getOutputNBpixels());
+ pixelValues[0]=(unsigned char)*(valarrayPTR+doubleNBpixels);
+
+ outBuffer.at<cv::Vec3b>(pixel)=pixelValues;
+ }
+ }
+ }
}
bool Retina::_convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std::valarray<float> &outputValarrayMatrix)
{
- // first check input consistency
- if (inputMatToConvert.empty())
- throw cv::Exception(-1, "Retina cannot be applied, input buffer is empty", "Retina::run", "Retina.h", 0);
+ // first check input consistency
+ if (inputMatToConvert.empty())
+ throw cv::Exception(-1, "Retina cannot be applied, input buffer is empty", "Retina::run", "Retina.h", 0);
+
+ // retreive color mode from image input
+ int imageNumberOfChannels = inputMatToConvert.channels();
- // retreive color mode from image input
- int imageNumberOfChannels = inputMatToConvert.channels();
-
// convert to float AND fill the valarray buffer
- typedef float T; // define here the target pixel format, here, float
+ typedef float T; // define here the target pixel format, here, float
const int dsttype = DataType<T>::depth; // output buffer is float format
- if(imageNumberOfChannels==4)
+ if(imageNumberOfChannels==4)
+ {
+ // create a cv::Mat table (for RGBA planes)
+ cv::Mat planes[4] =
{
- // create a cv::Mat table (for RGBA planes)
- cv::Mat planes[] =
- {
- cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()*2]),
- cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()]),
- cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0]),
- cv::Mat(inputMatToConvert.size(), dsttype) // last channel (alpha) does not point on the valarray (not usefull in our case)
- };
- // split color cv::Mat in 4 planes... it fills valarray directely
- cv::split(cv::Mat_<Vec<T, 4> >(inputMatToConvert), planes);
- }else if (imageNumberOfChannels==3)
- {
- // create a cv::Mat table (for RGB planes)
- cv::Mat planes[] =
- {
- cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()*2]),
- cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()]),
- cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0])
- };
- // split color cv::Mat in 3 planes... it fills valarray directely
- cv::split(cv::Mat_<Vec<T, 3> >(inputMatToConvert), planes);
- }else if(imageNumberOfChannels==1)
- {
- // create a cv::Mat header for the valarray
- cv::Mat dst(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0]);
- inputMatToConvert.convertTo(dst, dsttype);
- }
+ cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()*2]),
+ cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()]),
+ cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0])
+ };
+ planes[3] = cv::Mat(inputMatToConvert.size(), dsttype); // last channel (alpha) does not point on the valarray (not usefull in our case)
+ // split color cv::Mat in 4 planes... it fills valarray directely
+ cv::split(cv::Mat_<Vec<T, 4> >(inputMatToConvert), planes);
+ }
+ else if (imageNumberOfChannels==3)
+ {
+ // create a cv::Mat table (for RGB planes)
+ cv::Mat planes[] =
+ {
+ cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()*2]),
+ cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()]),
+ cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0])
+ };
+ // split color cv::Mat in 3 planes... it fills valarray directely
+ cv::split(cv::Mat_<Vec<T, 3> >(inputMatToConvert), planes);
+ }
+ else if(imageNumberOfChannels==1)
+ {
+ // create a cv::Mat header for the valarray
+ cv::Mat dst(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0]);
+ inputMatToConvert.convertTo(dst, dsttype);
+ }
else
CV_Error(CV_StsUnsupportedFormat, "input image must be single channel (gray levels), bgr format (color) or bgra (color with transparency which won't be considered");
-
+
return imageNumberOfChannels>1; // return bool : false for gray level image processing, true for color mode
}
}else
{
- register const float *multiplexedColorFramePTR= get_data(multiplexedColorFrame);
- for (unsigned int indexc=0; indexc<_filterOutput.getNBpixels() ; ++indexc, ++chrominancePTR, ++colorLocalDensityPTR, ++luminance, ++multiplexedColorFramePTR)
+ register const float *multiplexedColorFramePTR1= get_data(multiplexedColorFrame);
+ for (unsigned int indexc=0; indexc<_filterOutput.getNBpixels() ; ++indexc, ++chrominancePTR, ++colorLocalDensityPTR, ++luminance, ++multiplexedColorFramePTR1)
{
// normalize by photoreceptors density
float Cr=*(chrominancePTR)*_colorLocalDensity[indexc];
float Cg=*(chrominancePTR+_filterOutput.getNBpixels())*_colorLocalDensity[indexc+_filterOutput.getNBpixels()];
float Cb=*(chrominancePTR+_filterOutput.getDoubleNBpixels())*_colorLocalDensity[indexc+_filterOutput.getDoubleNBpixels()];
*luminance=(Cr+Cg+Cb)*_pG;
- _demultiplexedTempBuffer[_colorSampling[indexc]] = *multiplexedColorFramePTR - *luminance;
+ _demultiplexedTempBuffer[_colorSampling[indexc]] = *multiplexedColorFramePTR1 - *luminance;
}
using std::min;
using std::sqrt;
}
-namespace
+namespace
{
- const static Scalar colors[] =
+ const static Scalar colors[] =
{
CV_RGB(255, 0, 0),
CV_RGB( 0, 255, 0),
template<class FwIt, class T> void iota(FwIt first, FwIt last, T value) { while(first != last) *first++ = value++; }
-void computeNormals( const Octree& Octree, const vector<Point3f>& centers, vector<Point3f>& normals,
+void computeNormals( const Octree& Octree, const vector<Point3f>& centers, vector<Point3f>& normals,
vector<uchar>& mask, float normalRadius, int minNeighbors = 20)
-{
+{
size_t normals_size = centers.size();
normals.resize(normals_size);
-
+
if (mask.size() != normals_size)
{
- size_t m = mask.size();
+ size_t m = mask.size();
mask.resize(normals_size);
if (normals_size > m)
for(; m < normals_size; ++m)
mask[m] = 1;
}
-
+
vector<Point3f> buffer;
buffer.reserve(128);
SVD svd;
mean.x /= buf_size;
mean.y /= buf_size;
mean.z /= buf_size;
-
+
double pxpx = 0;
double pypy = 0;
double pzpz = 0;
/*normals[n] = Point3f( (float)((double*)svd.vt.data)[6],
(float)((double*)svd.vt.data)[7],
- (float)((double*)svd.vt.data)[8] );*/
- normals[n] = reinterpret_cast<Point3d*>(svd.vt.data)[2];
- mask[n] = 1;
+ (float)((double*)svd.vt.data)[8] );*/
+ normals[n] = reinterpret_cast<Point3d*>(svd.vt.data)[2];
+ mask[n] = 1;
}
}
}
inline __m128i _mm_mullo_epi32_emul(const __m128i& a, __m128i& b)
-{
+{
__m128i pack = _mm_packs_epi32(a, a);
- return _mm_unpacklo_epi16(_mm_mullo_epi16(pack, b), _mm_mulhi_epi16(pack, b));
+ return _mm_unpacklo_epi16(_mm_mullo_epi16(pack, b), _mm_mulhi_epi16(pack, b));
}
#endif
-void computeSpinImages( const Octree& Octree, const vector<Point3f>& points, const vector<Point3f>& normals,
+void computeSpinImages( const Octree& Octree, const vector<Point3f>& points, const vector<Point3f>& normals,
vector<uchar>& mask, Mat& spinImages, int imageWidth, float binSize)
-{
+{
float pixelsPerMeter = 1.f / binSize;
- float support = imageWidth * binSize;
-
+ float support = imageWidth * binSize;
+
assert(normals.size() == points.size());
assert(mask.size() == points.size());
-
+
size_t points_size = points.size();
mask.resize(points_size);
int t = cvGetThreadNum();
vector<Point3f>& pointsInSphere = pointsInSpherePool[t];
-
+
const Point3f& center = points[i];
Octree.getPointsWithinSphere(center, searchRad, pointsInSphere);
}
const Point3f& normal = normals[i];
-
+
float rotmat[9];
initRotationMat(normal, rotmat);
Point3f new_center;
{
__m128 rotmatSSE[3];
convertTransformMatrix(rotmat, (float*)rotmatSSE);
-
+
__m128 center_x4 = _mm_set1_ps(new_center.x);
__m128 center_y4 = _mm_set1_ps(new_center.y);
__m128 center_z4 = _mm_set1_ps(new_center.z + halfSuppport);
__m128 z0 = _mm_unpackhi_ps(pt0, pt1); // z0 z1 . .
__m128 z1 = _mm_unpackhi_ps(pt2, pt3); // z2 z3 . .
__m128 beta4 = _mm_sub_ps(center_z4, _mm_movelh_ps(z0, z1)); // b0 b1 b2 b3
-
+
__m128 xy0 = _mm_unpacklo_ps(pt0, pt1); // x0 x1 y0 y1
__m128 xy1 = _mm_unpacklo_ps(pt2, pt3); // x2 x3 y2 y3
__m128 x4 = _mm_movelh_ps(xy0, xy1); // x0 x1 x2 x3
x4 = _mm_sub_ps(x4, center_x4);
y4 = _mm_sub_ps(y4, center_y4);
__m128 alpha4 = _mm_sqrt_ps(_mm_add_ps(_mm_mul_ps(x4,x4),_mm_mul_ps(y4,y4)));
-
+
__m128 n1f4 = _mm_mul_ps( beta4, ppm4); /* beta4 float */
__m128 n2f4 = _mm_mul_ps(alpha4, ppm4); /* alpha4 float */
__m128 f1 = _mm_sub_ps( n1f4, _mm_cvtepi32_ps(n1) ); /* { beta4 } */
__m128 f2 = _mm_sub_ps( n2f4, _mm_cvtepi32_ps(n2) ); /* { alpha4 } */
- __m128 f1f2 = _mm_mul_ps(f1, f2); // f1 * f2
+ __m128 f1f2 = _mm_mul_ps(f1, f2); // f1 * f2
__m128 omf1omf2 = _mm_add_ps(_mm_sub_ps(_mm_sub_ps(one4f, f2), f1), f1f2); // (1-f1) * (1-f2)
-
- __m128i mask = _mm_and_si128(
+
+ __m128i _mask = _mm_and_si128(
_mm_andnot_si128(_mm_cmpgt_epi32(zero4, n1), _mm_cmpgt_epi32(height4m1, n1)),
_mm_andnot_si128(_mm_cmpgt_epi32(zero4, n2), _mm_cmpgt_epi32(width4m1, n2)));
- __m128 maskf = _mm_cmpneq_ps(_mm_cvtepi32_ps(mask), zero4f);
-
+ __m128 maskf = _mm_cmpneq_ps(_mm_cvtepi32_ps(_mask), zero4f);
+
__m128 v00 = _mm_and_ps( omf1omf2 , maskf); // a00 b00 c00 d00
__m128 v01 = _mm_and_ps( _mm_sub_ps( f2, f1f2 ), maskf); // a01 b01 c01 d01
__m128 v10 = _mm_and_ps( _mm_sub_ps( f1, f1f2 ), maskf); // a10 b10 c10 d10
__m128 v11 = _mm_and_ps( f1f2 , maskf); // a11 b11 c11 d11
- __m128i ofs4 = _mm_and_si128(_mm_add_epi32(_mm_mullo_epi32_emul(n1, step4), n2), mask);
+ __m128i ofs4 = _mm_and_si128(_mm_add_epi32(_mm_mullo_epi32_emul(n1, step4), n2), _mask);
_mm_store_si128((__m128i*)o, ofs4);
__m128 t0 = _mm_unpacklo_ps(v00, v01); // a00 a01 b00 b01
if (beta >= support || beta < 0)
continue;
- alpha = sqrt( (new_center.x - pt.x) * (new_center.x - pt.x) +
- (new_center.y - pt.y) * (new_center.y - pt.y) );
-
+ alpha = sqrt( (new_center.x - pt.x) * (new_center.x - pt.x) +
+ (new_center.y - pt.y) * (new_center.y - pt.y) );
+
float n1f = beta * pixelsPerMeter;
float n2f = alpha * pixelsPerMeter;
float f1 = n1f - n1;
float f2 = n2f - n2;
- if ((unsigned)n1 >= (unsigned)(spinImage.rows-1) ||
+ if ((unsigned)n1 >= (unsigned)(spinImage.rows-1) ||
(unsigned)n2 >= (unsigned)(spinImage.cols-1))
continue;
vector<double> dist(tryNum * neighbors);
vector<int> inds(tryNum * neighbors);
- vector<Point3f> query;
+ vector<Point3f> query;
- RNG& rng = theRNG();
+ RNG& rng = theRNG();
for(int i = 0; i < tryNum; ++i)
query.push_back(vtx[rng.next() % vtx.size()]);
-
+
CvMat cvinds = cvMat( (int)tryNum, neighbors, CV_32S, &inds[0] );
- CvMat cvdist = cvMat( (int)tryNum, neighbors, CV_64F, &dist[0] );
+ CvMat cvdist = cvMat( (int)tryNum, neighbors, CV_64F, &dist[0] );
CvMat cvquery = cvMat( (int)tryNum, 3, CV_32F, &query[0] );
- cvFindFeatures(tr, &cvquery, &cvinds, &cvdist, neighbors, 50);
+ cvFindFeatures(tr, &cvquery, &cvinds, &cvdist, neighbors, 50);
cvReleaseFeatureTree(tr);
- const int invalid_dist = -2;
+ const int invalid_dist = -2;
for(int i = 0; i < tryNum; ++i)
if (inds[i] == -1)
dist[i] = invalid_dist;
dist.resize(remove(dist.begin(), dist.end(), invalid_dist) - dist.begin());
-
+
sort(dist, less<double>());
-
+
return resolution = (float)dist[ dist.size() / 2 ];
#else
CV_Error(CV_StsNotImplemented, "");
{
buildOctree();
vector<uchar> mask(vtx.size(), 0);
- for(size_t i = 0; i < subset.size(); ++i)
+ for(size_t i = 0; i < subset.size(); ++i)
mask[subset[i]] = 1;
::computeNormals(octree, vtx, normals, mask, normalRadius, minNeighbors);
}
ofstream ofs(file.c_str());
ofs << "#VRML V2.0 utf8" << endl;
- ofs << "Shape" << std::endl << "{" << endl;
- ofs << "geometry PointSet" << endl << "{" << endl;
- ofs << "coord Coordinate" << endl << "{" << endl;
- ofs << "point[" << endl;
+ ofs << "Shape" << std::endl << "{" << endl;
+ ofs << "geometry PointSet" << endl << "{" << endl;
+ ofs << "coord Coordinate" << endl << "{" << endl;
+ ofs << "point[" << endl;
for(size_t i = 0; i < vtx.size(); ++i)
ofs << vtx[i].x << " " << vtx[i].y << " " << vtx[i].z << endl;
-
- ofs << "]" << endl; //point[
- ofs << "}" << endl; //Coordinate{
+
+ ofs << "]" << endl; //point[
+ ofs << "}" << endl; //Coordinate{
if (vtx.size() == _colors.size())
{
ofs << "color Color" << endl << "{" << endl;
ofs << "color[" << endl;
-
+
for(size_t i = 0; i < _colors.size(); ++i)
ofs << (float)_colors[i][2] << " " << (float)_colors[i][1] << " " << (float)_colors[i][0] << endl;
-
+
ofs << "]" << endl; //color[
- ofs << "}" << endl; //color Color{
+ ofs << "}" << endl; //color Color{
}
- ofs << "}" << endl; //PointSet{
- ofs << "}" << endl; //Shape{
+ ofs << "}" << endl; //PointSet{
+ ofs << "}" << endl; //Shape{
}
bool cv::SpinImageModel::spinCorrelation(const Mat& spin1, const Mat& spin2, float lambda, float& result)
{
struct Math { static double atanh(double x) { return 0.5 * std::log( (1 + x) / (1 - x) ); } };
-
+
const float* s1 = spin1.ptr<float>();
const float* s2 = spin2.ptr<float>();
- int spin_sz = spin1.cols * spin1.rows;
+ int spin_sz = spin1.cols * spin1.rows;
double sum1 = 0.0, sum2 = 0.0, sum12 = 0.0, sum11 = 0.0, sum22 = 0.0;
int N = 0;
int i = 0;
#if CV_SSE2//____________TEMPORARY_DISABLED_____________
- float CV_DECL_ALIGNED(16) su1[4], su2[4], su11[4], su22[4], su12[4], n[4];
-
+ float CV_DECL_ALIGNED(16) su1[4], su2[4], su11[4], su22[4], su12[4], n[4];
+
__m128 zerof4 = _mm_setzero_ps();
__m128 onef4 = _mm_set1_ps(1.f);
- __m128 Nf4 = zerof4;
+ __m128 Nf4 = zerof4;
__m128 sum1f4 = zerof4;
__m128 sum2f4 = zerof4;
__m128 sum11f4 = zerof4;
__m128 sum22f4 = zerof4;
- __m128 sum12f4 = zerof4;
+ __m128 sum12f4 = zerof4;
for(; i < spin_sz - 5; i += 4)
{
- __m128 v1f4 = _mm_loadu_ps(s1 + i);
- __m128 v2f4 = _mm_loadu_ps(s2 + i);
+ __m128 v1f4 = _mm_loadu_ps(s1 + i);
+ __m128 v2f4 = _mm_loadu_ps(s2 + i);
__m128 mskf4 = _mm_and_ps(_mm_cmpneq_ps(v1f4, zerof4), _mm_cmpneq_ps(v2f4, zerof4));
- if( !_mm_movemask_ps(mskf4) )
+ if( !_mm_movemask_ps(mskf4) )
continue;
-
+
Nf4 = _mm_add_ps(Nf4, _mm_and_ps(onef4, mskf4));
v1f4 = _mm_and_ps(v1f4, mskf4);
v2f4 = _mm_and_ps(v2f4, mskf4);
-
+
sum1f4 = _mm_add_ps(sum1f4, v1f4);
sum2f4 = _mm_add_ps(sum2f4, v2f4);
sum11f4 = _mm_add_ps(sum11f4, _mm_mul_ps(v1f4, v1f4));
sum22f4 = _mm_add_ps(sum22f4, _mm_mul_ps(v2f4, v2f4));
- sum12f4 = _mm_add_ps(sum12f4, _mm_mul_ps(v1f4, v2f4));
+ sum12f4 = _mm_add_ps(sum12f4, _mm_mul_ps(v1f4, v2f4));
}
_mm_store_ps( su1, sum1f4 );
_mm_store_ps( su2, sum2f4 );
if( !v1 || !v2 )
continue;
N++;
-
- sum1 += v1;
- sum2 += v2;
- sum11 += v1 * v1;
- sum22 += v2 * v2;
+
+ sum1 += v1;
+ sum2 += v2;
+ sum11 += v1 * v1;
+ sum22 += v2 * v2;
sum12 += v1 * v2;
}
if( N < 4 )
double corr = (Nsum12 - sum1 * sum2) / sqrt( (Nsum11 - sum1sum1) * (Nsum22 - sum2sum2) );
double atanh = Math::atanh(corr);
result = (float)( atanh * atanh - lambda * ( 1.0 / (N - 3) ) );
- return true;
+ return true;
}
inline Point2f cv::SpinImageModel::calcSpinMapCoo(const Point3f& p, const Point3f& v, const Point3f& n)
-{
- /*Point3f PmV(p.x - v.x, p.y - v.y, p.z - v.z);
- float normalNorm = (float)norm(n);
+{
+ /*Point3f PmV(p.x - v.x, p.y - v.y, p.z - v.z);
+ float normalNorm = (float)norm(n);
float beta = PmV.dot(n) / normalNorm;
float pmcNorm = (float)norm(PmV);
float alpha = sqrt( pmcNorm * pmcNorm - beta * beta);
float pmv_x = p.x - v.x, pmv_y = p.y - v.y, pmv_z = p.z - v.z;
float beta = (pmv_x * n.x + pmv_y + n.y + pmv_z * n.z) / sqrt(n.x * n.x + n.y * n.y + n.z * n.z);
- float alpha = sqrt( pmv_x * pmv_x + pmv_y * pmv_y + pmv_z * pmv_z - beta * beta);
+ float alpha = sqrt( pmv_x * pmv_x + pmv_y * pmv_y + pmv_z * pmv_z - beta * beta);
return Point2f(alpha, beta);
}
inline float cv::SpinImageModel::geometricConsistency(const Point3f& pointScene1, const Point3f& normalScene1,
const Point3f& pointModel1, const Point3f& normalModel1,
- const Point3f& pointScene2, const Point3f& normalScene2,
+ const Point3f& pointScene2, const Point3f& normalScene2,
const Point3f& pointModel2, const Point3f& normalModel2)
-{
+{
Point2f Sm2_to_m1, Ss2_to_s1;
Point2f Sm1_to_m2, Ss1_to_s2;
double n_Sm2_to_m1 = norm(Sm2_to_m1 = calcSpinMapCoo(pointModel2, pointModel1, normalModel1));
- double n_Ss2_to_s1 = norm(Ss2_to_s1 = calcSpinMapCoo(pointScene2, pointScene1, normalScene1));
+ double n_Ss2_to_s1 = norm(Ss2_to_s1 = calcSpinMapCoo(pointScene2, pointScene1, normalScene1));
double gc21 = 2 * norm(Sm2_to_m1 - Ss2_to_s1) / (n_Sm2_to_m1 + n_Ss2_to_s1 ) ;
-
+
double n_Sm1_to_m2 = norm(Sm1_to_m2 = calcSpinMapCoo(pointModel1, pointModel2, normalModel2));
double n_Ss1_to_s2 = norm(Ss1_to_s2 = calcSpinMapCoo(pointScene1, pointScene2, normalScene2));
inline float cv::SpinImageModel::groupingCreteria(const Point3f& pointScene1, const Point3f& normalScene1,
const Point3f& pointModel1, const Point3f& normalModel1,
- const Point3f& pointScene2, const Point3f& normalScene2,
- const Point3f& pointModel2, const Point3f& normalModel2,
+ const Point3f& pointScene2, const Point3f& normalScene2,
+ const Point3f& pointModel2, const Point3f& normalModel2,
float gamma)
-{
+{
Point2f Sm2_to_m1, Ss2_to_s1;
Point2f Sm1_to_m2, Ss1_to_s2;
double gc21 = 2 * norm(Sm2_to_m1 - Ss2_to_s1) / (n_Sm2_to_m1 + n_Ss2_to_s1 );
double wgc21 = gc21 / (1 - exp( -(n_Sm2_to_m1 + n_Ss2_to_s1) * gamma05_inv ) );
-
+
double n_Sm1_to_m2 = norm(Sm1_to_m2 = calcSpinMapCoo(pointModel1, pointModel2, normalModel2));
double n_Ss1_to_s2 = norm(Ss1_to_s2 = calcSpinMapCoo(pointScene1, pointScene2, normalScene2));
cv::SpinImageModel::SpinImageModel(const Mesh3D& _mesh) : mesh(_mesh) , out(0)
-{
+{
if (mesh.vtx.empty())
throw Mesh3D::EmptyMeshException();
- defaultParams();
+ defaultParams();
}
cv::SpinImageModel::SpinImageModel() : out(0) { defaultParams(); }
cv::SpinImageModel::~SpinImageModel() {}
minNeighbors = 20;
binSize = 0.f; /* autodetect according to mesh resolution */
- imageWidth = 32;
-
+ imageWidth = 32;
+
lambda = 0.f; /* autodetect according to medan non zero images bin */
gamma = 0.f; /* autodetect according to mesh resolution */
if (num == 0)
return Mat();
- RNG& rng = theRNG();
+ RNG& rng = theRNG();
vector<Mat> spins;
for(int i = 0; i < num; ++i)
- spins.push_back(getSpinImage( rng.next() % spinNum ).reshape(1, imageWidth));
-
+ spins.push_back(getSpinImage( rng.next() % spinNum ).reshape(1, imageWidth));
+
if (separateScale)
for(int i = 0; i < num; ++i)
{
double max;
Mat spin8u;
- minMaxLoc(spins[i], 0, &max);
+ minMaxLoc(spins[i], 0, &max);
spins[i].convertTo(spin8u, CV_8U, -255.0/max, 255.0);
spins[i] = spin8u;
}
else
- {
+ {
double totalMax = 0;
for(int i = 0; i < num; ++i)
{
double m;
- minMaxLoc(spins[i], 0, &m);
+ minMaxLoc(spins[i], 0, &m);
totalMax = max(m, totalMax);
}
int sz = spins.front().cols;
- Mat result((int)(yCount * sz + (yCount - 1)), (int)(xCount * sz + (xCount - 1)), CV_8UC3);
+ Mat result((int)(yCount * sz + (yCount - 1)), (int)(xCount * sz + (xCount - 1)), CV_8UC3);
result = colors[(static_cast<int64>(cvGetTickCount()/cvGetTickFrequency())/1000) % colors_mum];
int pos = 0;
for(int y = 0; y < (int)yCount; ++y)
- for(int x = 0; x < (int)xCount; ++x)
+ for(int x = 0; x < (int)xCount; ++x)
if (pos < num)
{
int starty = (y + 0) * sz + y;
cvtColor(spins[pos++], color, CV_GRAY2BGR);
Mat roi = result(Range(starty, endy), Range(startx, endx));
color.copyTo(roi);
- }
+ }
return result;
}
subset.resize(setSize);
for(size_t i = 0; i < setSize; ++i)
{
- int pos = rnd.next() % left.size();
+ int pos = rnd.next() % (int)left.size();
subset[i] = (int)left[pos];
- left[pos] = left.back();
- left.resize(left.size() - 1);
+ left[pos] = left.back();
+ left.resize(left.size() - 1);
}
sort(subset, less<int>());
}
subset = ss;
}
-void cv::SpinImageModel::repackSpinImages(const vector<uchar>& mask, Mat& spinImages, bool reAlloc) const
-{
+void cv::SpinImageModel::repackSpinImages(const vector<uchar>& mask, Mat& _spinImages, bool reAlloc) const
+{
if (reAlloc)
{
size_t spinCount = mask.size() - count(mask.begin(), mask.end(), (uchar)0);
- Mat newImgs((int)spinCount, spinImages.cols, spinImages.type());
+ Mat newImgs((int)spinCount, _spinImages.cols, _spinImages.type());
int pos = 0;
for(size_t t = 0; t < mask.size(); ++t)
if (mask[t])
{
Mat row = newImgs.row(pos++);
- spinImages.row((int)t).copyTo(row);
+ _spinImages.row((int)t).copyTo(row);
}
- spinImages = newImgs;
+ _spinImages = newImgs;
}
else
{
int first = dest + 1;
for (; first != last; ++first)
- if (mask[first] != 0)
+ if (mask[first] != 0)
{
- Mat row = spinImages.row(dest);
- spinImages.row(first).copyTo(row);
+ Mat row = _spinImages.row(dest);
+ _spinImages.row(first).copyTo(row);
++dest;
}
- spinImages = spinImages.rowRange(0, dest);
+ _spinImages = _spinImages.rowRange(0, dest);
}
}
if (binSize == 0.f)
{
if (mesh.resolution == -1.f)
- mesh.estimateResolution();
+ mesh.estimateResolution();
binSize = mesh.resolution;
}
- /* estimate normalRadius */
- normalRadius = normalRadius != 0.f ? normalRadius : binSize * imageWidth / 2;
+ /* estimate normalRadius */
+ normalRadius = normalRadius != 0.f ? normalRadius : binSize * imageWidth / 2;
- mesh.buildOctree();
+ mesh.buildOctree();
if (subset.empty())
{
mesh.computeNormals(normalRadius, minNeighbors);
else
mesh.computeNormals(subset, normalRadius, minNeighbors);
- vector<uchar> mask(mesh.vtx.size(), 0);
+ vector<uchar> mask(mesh.vtx.size(), 0);
for(size_t i = 0; i < subset.size(); ++i)
- if (mesh.normals[subset[i]] == Mesh3D::allzero)
- subset[i] = -1;
+ if (mesh.normals[subset[i]] == Mesh3D::allzero)
+ subset[i] = -1;
else
mask[subset[i]] = 1;
subset.resize( remove(subset.begin(), subset.end(), -1) - subset.begin() );
-
+
vector<Point3f> vtx;
- vector<Point3f> normals;
+ vector<Point3f> normals;
for(size_t i = 0; i < mask.size(); ++i)
if(mask[i])
{
for(size_t i = 0; i < mask.size(); ++i)
if(mask[i])
if (spinMask[mask_pos++] == 0)
- subset.resize( remove(subset.begin(), subset.end(), (int)i) - subset.begin() );
+ subset.resize( remove(subset.begin(), subset.end(), (int)i) - subset.begin() );
}
void cv::SpinImageModel::matchSpinToModel(const Mat& spin, vector<int>& indeces, vector<float>& corrCoeffs, bool useExtremeOutliers) const
vector<uchar> masks(model.spinImages.rows);
vector<float> cleanCorrs;
cleanCorrs.reserve(model.spinImages.rows);
-
+
for(int i = 0; i < model.spinImages.rows; ++i)
{
- masks[i] = spinCorrelation(spin, model.spinImages.row(i), model.lambda, corrs[i]);
+ masks[i] = spinCorrelation(spin, model.spinImages.row(i), model.lambda, corrs[i]);
if (masks[i])
cleanCorrs.push_back(corrs[i]);
}
-
+
/* Filtering by measure histogram */
size_t total = cleanCorrs.size();
if(total < 5)
return;
sort(cleanCorrs, less<float>());
-
+
float lower_fourth = cleanCorrs[(1 * total) / 4 - 1];
float upper_fourth = cleanCorrs[(3 * total) / 4 - 0];
float fourth_spread = upper_fourth - lower_fourth;
//extreme or moderate?
- float coef = useExtremeOutliers ? 3.0f : 1.5f;
+ float coef = useExtremeOutliers ? 3.0f : 1.5f;
+
+ float histThresHi = upper_fourth + coef * fourth_spread;
+ //float histThresLo = lower_fourth - coef * fourth_spread;
- float histThresHi = upper_fourth + coef * fourth_spread;
- //float histThresLo = lower_fourth - coef * fourth_spread;
-
for(size_t i = 0; i < corrs.size(); ++i)
if (masks[i])
if (/* corrs[i] < histThresLo || */ corrs[i] > histThresHi)
{
indeces.push_back((int)i);
- corrCoeffs.push_back(corrs[i]);
+ corrCoeffs.push_back(corrs[i]);
}
-}
+}
-namespace
+namespace
{
struct Match
{
- int sceneInd;
+ int sceneInd;
int modelInd;
float measure;
{
const float* wgcLine = mat.ptr<float>((int)corespInd);
float maximum = numeric_limits<float>::min();
-
+
for(citer pos = group.begin(); pos != group.end(); ++pos)
maximum = max(wgcLine[*pos], maximum);
}
void cv::SpinImageModel::match(const SpinImageModel& scene, vector< vector<Vec2i> >& result)
-{
+{
if (mesh.vtx.empty())
throw Mesh3D::EmptyMeshException();
SpinImageModel& model = *this;
const float infinity = numeric_limits<float>::infinity();
const float float_max = numeric_limits<float>::max();
-
+
/* estimate gamma */
if (model.gamma == 0.f)
{
if (model.mesh.resolution == -1.f)
- model.mesh.estimateResolution();
+ model.mesh.estimateResolution();
model.gamma = 4 * model.mesh.resolution;
}
/* estimate lambda */
if (model.lambda == 0.f)
{
- vector<int> nonzero(model.spinImages.rows);
+ vector<int> nonzero(model.spinImages.rows);
for(int i = 0; i < model.spinImages.rows; ++i)
nonzero[i] = countNonZero(model.spinImages.row(i));
sort(nonzero, less<int>());
model.lambda = static_cast<float>( nonzero[ nonzero.size()/2 ] ) / 2;
- }
-
+ }
+
TickMeter corr_timer;
corr_timer.start();
vector<Match> allMatches;
{
vector<int> indeces;
vector<float> coeffs;
- matchSpinToModel(scene.spinImages.row(i), indeces, coeffs);
+ matchSpinToModel(scene.spinImages.row(i), indeces, coeffs);
for(size_t t = 0; t < indeces.size(); ++t)
- allMatches.push_back(Match(i, indeces[t], coeffs[t]));
+ allMatches.push_back(Match(i, indeces[t], coeffs[t]));
- if (out) if (i % 100 == 0) *out << "Comparing scene spinimage " << i << " of " << scene.spinImages.rows << endl;
+ if (out) if (i % 100 == 0) *out << "Comparing scene spinimage " << i << " of " << scene.spinImages.rows << endl;
}
corr_timer.stop();
if (out) *out << "Spin correlation time = " << corr_timer << endl;
if (out) *out << "Matches number = " << allMatches.size() << endl;
- if(allMatches.empty())
+ if(allMatches.empty())
return;
-
+
/* filtering by similarity measure */
const float fraction = 0.5f;
- float maxMeasure = max_element(allMatches.begin(), allMatches.end(), less<float>())->measure;
+ float maxMeasure = max_element(allMatches.begin(), allMatches.end(), less<float>())->measure;
allMatches.erase(
- remove_if(allMatches.begin(), allMatches.end(), bind2nd(less<float>(), maxMeasure * fraction)),
+ remove_if(allMatches.begin(), allMatches.end(), bind2nd(less<float>(), maxMeasure * fraction)),
allMatches.end());
if (out) *out << "Matches number [filtered by similarity measure] = " << allMatches.size() << endl;
int matchesSize = (int)allMatches.size();
if(matchesSize == 0)
return;
-
- /* filtering by geometric consistency */
+
+ /* filtering by geometric consistency */
for(int i = 0; i < matchesSize; ++i)
{
int consistNum = 1;
float gc = float_max;
-
+
for(int j = 0; j < matchesSize; ++j)
if (i != j)
{
{
const Point3f& pointSceneI = scene.getSpinVertex(mi.sceneInd);
const Point3f& normalSceneI = scene.getSpinNormal(mi.sceneInd);
-
+
const Point3f& pointModelI = model.getSpinVertex(mi.modelInd);
const Point3f& normalModelI = model.getSpinNormal(mi.modelInd);
-
+
const Point3f& pointSceneJ = scene.getSpinVertex(mj.sceneInd);
const Point3f& normalSceneJ = scene.getSpinNormal(mj.sceneInd);
-
+
const Point3f& pointModelJ = model.getSpinVertex(mj.modelInd);
const Point3f& normalModelJ = model.getSpinNormal(mj.modelInd);
-
+
gc = geometricConsistency(pointSceneI, normalSceneI, pointModelI, normalModelI,
- pointSceneJ, normalSceneJ, pointModelJ, normalModelJ);
+ pointSceneJ, normalSceneJ, pointModelJ, normalModelJ);
}
if (gc < model.T_GeometriccConsistency)
++consistNum;
}
-
-
+
+
if (consistNum < matchesSize / 4) /* failed consistensy test */
- allMatches[i].measure = infinity;
+ allMatches[i].measure = infinity;
}
allMatches.erase(
- remove_if(allMatches.begin(), allMatches.end(), bind2nd(equal_to<float>(), infinity)),
- allMatches.end());
+ remove_if(allMatches.begin(), allMatches.end(), bind2nd(equal_to<float>(), infinity)),
+ allMatches.end());
if (out) *out << "Matches number [filtered by geometric consistency] = " << allMatches.size() << endl;
if (out) *out << "grouping ..." << endl;
Mat groupingMat((int)matchesSize, (int)matchesSize, CV_32F);
- groupingMat = Scalar(0);
-
+ groupingMat = Scalar(0);
+
/* grouping */
for(int j = 0; j < matchesSize; ++j)
- for(int i = j + 1; i < matchesSize; ++i)
+ for(int i = j + 1; i < matchesSize; ++i)
{
const Match& mi = allMatches[i];
const Match& mj = allMatches[j];
const Point3f& pointSceneI = scene.getSpinVertex(mi.sceneInd);
const Point3f& normalSceneI = scene.getSpinNormal(mi.sceneInd);
-
+
const Point3f& pointModelI = model.getSpinVertex(mi.modelInd);
const Point3f& normalModelI = model.getSpinNormal(mi.modelInd);
-
+
const Point3f& pointSceneJ = scene.getSpinVertex(mj.sceneInd);
const Point3f& normalSceneJ = scene.getSpinNormal(mj.sceneInd);
-
+
const Point3f& pointModelJ = model.getSpinVertex(mj.modelInd);
const Point3f& normalModelJ = model.getSpinNormal(mj.modelInd);
float wgc = groupingCreteria(pointSceneI, normalSceneI, pointModelI, normalModelI,
pointSceneJ, normalSceneJ, pointModelJ, normalModelJ,
- model.gamma);
-
+ model.gamma);
+
groupingMat.ptr<float>(i)[j] = wgc;
groupingMat.ptr<float>(j)[i] = wgc;
}
group_t allMatchesInds;
for(int i = 0; i < matchesSize; ++i)
allMatchesInds.insert(i);
-
+
vector<float> buf(matchesSize);
float *buf_beg = &buf[0];
vector<group_t> groups;
-
+
for(int g = 0; g < matchesSize; ++g)
- {
+ {
if (out) if (g % 100 == 0) *out << "G = " << g << endl;
group_t left = allMatchesInds;
group_t group;
-
+
left.erase(g);
group.insert(g);
-
+
for(;;)
{
size_t left_size = left.size();
if (left_size == 0)
break;
-
+
std::transform(left.begin(), left.end(), buf_beg, WgcHelper(group, groupingMat));
size_t minInd = min_element(buf_beg, buf_beg + left_size) - buf_beg;
-
+
if (buf[minInd] < model.T_GroupingCorespondances) /* can add corespondance to group */
{
iter pos = left.begin();
advance(pos, minInd);
-
+
group.insert(*pos);
left.erase(pos);
}
{
const Match& m = allMatches[*pos];
outgrp.push_back(Vec2i(subset[m.modelInd], scene.subset[m.sceneInd]));
- }
+ }
result.push_back(outgrp);
- }
+ }
}
cv::TickMeter::TickMeter() { reset(); }
int64 cv::TickMeter::getTimeTicks() const { return sumTime; }
double cv::TickMeter::getTimeMicro() const { return (double)getTimeTicks()/cvGetTickFrequency(); }
double cv::TickMeter::getTimeMilli() const { return getTimeMicro()*1e-3; }
-double cv::TickMeter::getTimeSec() const { return getTimeMilli()*1e-3; }
+double cv::TickMeter::getTimeSec() const { return getTimeMilli()*1e-3; }
int64 cv::TickMeter::getCounter() const { return counter; }
void cv::TickMeter::reset() {startTime = 0; sumTime = 0; counter = 0; }
Proceedings of the 5th International Symposium on Visual Computing, Vegas, USA
This code is written by Sergey G. Kosov for "Visir PX" application as part of Project X (www.project-10.de)
- */
+ */
#include "precomp.hpp"
#include <limits.h>
-namespace cv
+namespace cv
{
-StereoVar::StereoVar() : levels(3), pyrScale(0.5), nIt(5), minDisp(0), maxDisp(16), poly_n(3), poly_sigma(0), fi(25.0f), lambda(0.03f), penalization(PENALIZATION_TICHONOV), cycle(CYCLE_V), flags(USE_SMART_ID | USE_AUTO_PARAMS)
+StereoVar::StereoVar() : levels(3), pyrScale(0.5), nIt(5), minDisp(0), maxDisp(16), poly_n(3), poly_sigma(0), fi(25.0f), lambda(0.03f), penalization(PENALIZATION_TICHONOV), cycle(CYCLE_V), flags(USE_SMART_ID | USE_AUTO_PARAMS)
{
}
static Mat diffX(Mat &src)
{
- register int x, y, cols = src.cols - 1;
- Mat dst(src.size(), src.type());
- for(y = 0; y < src.rows; y++){
+ register int x, y, cols = src.cols - 1;
+ Mat dst(src.size(), src.type());
+ for(y = 0; y < src.rows; y++){
const float* pSrc = src.ptr<float>(y);
float* pDst = dst.ptr<float>(y);
#if CV_SSE2
static Mat getGradient(Mat &src)
{
- register int x, y;
- Mat dst(src.size(), src.type());
- dst.setTo(0);
- for (y = 0; y < src.rows - 1; y++) {
- float *pSrc = src.ptr<float>(y);
- float *pSrcF = src.ptr<float>(y + 1);
- float *pDst = dst.ptr<float>(y);
- for (x = 0; x < src.cols - 1; x++)
- pDst[x] = fabs(pSrc[x + 1] - pSrc[x]) + fabs(pSrcF[x] - pSrc[x]);
- }
- return dst;
+ register int x, y;
+ Mat dst(src.size(), src.type());
+ dst.setTo(0);
+ for (y = 0; y < src.rows - 1; y++) {
+ float *pSrc = src.ptr<float>(y);
+ float *pSrcF = src.ptr<float>(y + 1);
+ float *pDst = dst.ptr<float>(y);
+ for (x = 0; x < src.cols - 1; x++)
+ pDst[x] = fabs(pSrc[x + 1] - pSrc[x]) + fabs(pSrcF[x] - pSrc[x]);
+ }
+ return dst;
}
static Mat getG_c(Mat &src, float l)
{
- Mat dst(src.size(), src.type());
- for (register int y = 0; y < src.rows; y++) {
- float *pSrc = src.ptr<float>(y);
- float *pDst = dst.ptr<float>(y);
- for (register int x = 0; x < src.cols; x++)
- pDst[x] = 0.5f*l / sqrtf(l*l + pSrc[x]*pSrc[x]);
- }
- return dst;
+ Mat dst(src.size(), src.type());
+ for (register int y = 0; y < src.rows; y++) {
+ float *pSrc = src.ptr<float>(y);
+ float *pDst = dst.ptr<float>(y);
+ for (register int x = 0; x < src.cols; x++)
+ pDst[x] = 0.5f*l / sqrtf(l*l + pSrc[x]*pSrc[x]);
+ }
+ return dst;
}
static Mat getG_p(Mat &src, float l)
{
- Mat dst(src.size(), src.type());
- for (register int y = 0; y < src.rows; y++) {
- float *pSrc = src.ptr<float>(y);
- float *pDst = dst.ptr<float>(y);
- for (register int x = 0; x < src.cols; x++)
- pDst[x] = 0.5f*l*l / (l*l + pSrc[x]*pSrc[x]);
- }
- return dst;
+ Mat dst(src.size(), src.type());
+ for (register int y = 0; y < src.rows; y++) {
+ float *pSrc = src.ptr<float>(y);
+ float *pDst = dst.ptr<float>(y);
+ for (register int x = 0; x < src.cols; x++)
+ pDst[x] = 0.5f*l*l / (l*l + pSrc[x]*pSrc[x]);
+ }
+ return dst;
}
void StereoVar::VariationalSolver(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level)
{
- register int n, x, y;
- float gl = 1, gr = 1, gu = 1, gd = 1, gc = 4;
- Mat g_c, g_p;
- Mat U;
- u.copyTo(U);
-
- int N = nIt;
- float l = lambda;
- float Fi = fi;
-
-
- if (flags & USE_SMART_ID) {
- double scale = pow(pyrScale, (double) level) * (1 + pyrScale);
- N = (int) (N / scale);
- }
-
- double scale = pow(pyrScale, (double) level);
- Fi /= (float) scale;
- l *= (float) scale;
-
- int width = u.cols - 1;
- int height = u.rows - 1;
- for (n = 0; n < N; n++) {
- if (penalization != PENALIZATION_TICHONOV) {
- Mat gradient = getGradient(U);
- switch (penalization) {
- case PENALIZATION_CHARBONNIER: g_c = getG_c(gradient, l); break;
- case PENALIZATION_PERONA_MALIK: g_p = getG_p(gradient, l); break;
- }
- gradient.release();
- }
- for (y = 1 ; y < height; y++) {
- float *pU = U.ptr<float>(y);
- float *pUu = U.ptr<float>(y + 1);
- float *pUd = U.ptr<float>(y - 1);
- float *pu = u.ptr<float>(y);
- float *pI1 = I1.ptr<float>(y);
- float *pI2 = I2.ptr<float>(y);
- float *pI2x = I2x.ptr<float>(y);
- float *pG_c = NULL, *pG_cu = NULL, *pG_cd = NULL;
- float *pG_p = NULL, *pG_pu = NULL, *pG_pd = NULL;
- switch (penalization) {
- case PENALIZATION_CHARBONNIER:
- pG_c = g_c.ptr<float>(y);
- pG_cu = g_c.ptr<float>(y + 1);
- pG_cd = g_c.ptr<float>(y - 1);
- break;
- case PENALIZATION_PERONA_MALIK:
- pG_p = g_p.ptr<float>(y);
- pG_pu = g_p.ptr<float>(y + 1);
- pG_pd = g_p.ptr<float>(y - 1);
- break;
- }
- for (x = 1; x < width; x++) {
- switch (penalization) {
- case PENALIZATION_CHARBONNIER:
- gc = pG_c[x];
- gl = gc + pG_c[x - 1];
- gr = gc + pG_c[x + 1];
- gu = gc + pG_cu[x];
- gd = gc + pG_cd[x];
- gc = gl + gr + gu + gd;
- break;
- case PENALIZATION_PERONA_MALIK:
- gc = pG_p[x];
- gl = gc + pG_p[x - 1];
- gr = gc + pG_p[x + 1];
- gu = gc + pG_pu[x];
- gd = gc + pG_pd[x];
- gc = gl + gr + gu + gd;
- break;
- }
-
- float fi = Fi;
- if (maxDisp > minDisp) {
- if (pU[x] > maxDisp * scale) {fi *= 1000; pU[x] = static_cast<float>(maxDisp * scale);}
- if (pU[x] < minDisp * scale) {fi *= 1000; pU[x] = static_cast<float>(minDisp * scale);}
- }
-
- int A = static_cast<int>(pU[x]);
- int neg = 0; if (pU[x] <= 0) neg = -1;
-
- if (x + A > width)
- pu[x] = pU[width - A];
- else if (x + A + neg < 0)
- pu[x] = pU[- A + 2];
- else {
- pu[x] = A + (pI2x[x + A + neg] * (pI1[x] - pI2[x + A])
- + fi * (gr * pU[x + 1] + gl * pU[x - 1] + gu * pUu[x] + gd * pUd[x] - gc * A))
- / (pI2x[x + A + neg] * pI2x[x + A + neg] + gc * fi) ;
- }
- }// x
- pu[0] = pu[1];
- pu[width] = pu[width - 1];
- }// y
- for (x = 0; x <= width; x++) {
- u.at<float>(0, x) = u.at<float>(1, x);
- u.at<float>(height, x) = u.at<float>(height - 1, x);
- }
- u.copyTo(U);
- if (!g_c.empty()) g_c.release();
- if (!g_p.empty()) g_p.release();
- }//n
+ register int n, x, y;
+ float gl = 1, gr = 1, gu = 1, gd = 1, gc = 4;
+ Mat g_c, g_p;
+ Mat U;
+ u.copyTo(U);
+
+ int N = nIt;
+ float l = lambda;
+ float Fi = fi;
+
+
+ if (flags & USE_SMART_ID) {
+ double scale = pow(pyrScale, (double) level) * (1 + pyrScale);
+ N = (int) (N / scale);
+ }
+
+ double scale = pow(pyrScale, (double) level);
+ Fi /= (float) scale;
+ l *= (float) scale;
+
+ int width = u.cols - 1;
+ int height = u.rows - 1;
+ for (n = 0; n < N; n++) {
+ if (penalization != PENALIZATION_TICHONOV) {
+ Mat gradient = getGradient(U);
+ switch (penalization) {
+ case PENALIZATION_CHARBONNIER: g_c = getG_c(gradient, l); break;
+ case PENALIZATION_PERONA_MALIK: g_p = getG_p(gradient, l); break;
+ }
+ gradient.release();
+ }
+ for (y = 1 ; y < height; y++) {
+ float *pU = U.ptr<float>(y);
+ float *pUu = U.ptr<float>(y + 1);
+ float *pUd = U.ptr<float>(y - 1);
+ float *pu = u.ptr<float>(y);
+ float *pI1 = I1.ptr<float>(y);
+ float *pI2 = I2.ptr<float>(y);
+ float *pI2x = I2x.ptr<float>(y);
+ float *pG_c = NULL, *pG_cu = NULL, *pG_cd = NULL;
+ float *pG_p = NULL, *pG_pu = NULL, *pG_pd = NULL;
+ switch (penalization) {
+ case PENALIZATION_CHARBONNIER:
+ pG_c = g_c.ptr<float>(y);
+ pG_cu = g_c.ptr<float>(y + 1);
+ pG_cd = g_c.ptr<float>(y - 1);
+ break;
+ case PENALIZATION_PERONA_MALIK:
+ pG_p = g_p.ptr<float>(y);
+ pG_pu = g_p.ptr<float>(y + 1);
+ pG_pd = g_p.ptr<float>(y - 1);
+ break;
+ }
+ for (x = 1; x < width; x++) {
+ switch (penalization) {
+ case PENALIZATION_CHARBONNIER:
+ gc = pG_c[x];
+ gl = gc + pG_c[x - 1];
+ gr = gc + pG_c[x + 1];
+ gu = gc + pG_cu[x];
+ gd = gc + pG_cd[x];
+ gc = gl + gr + gu + gd;
+ break;
+ case PENALIZATION_PERONA_MALIK:
+ gc = pG_p[x];
+ gl = gc + pG_p[x - 1];
+ gr = gc + pG_p[x + 1];
+ gu = gc + pG_pu[x];
+ gd = gc + pG_pd[x];
+ gc = gl + gr + gu + gd;
+ break;
+ }
+
+ float _fi = Fi;
+ if (maxDisp > minDisp) {
+ if (pU[x] > maxDisp * scale) {_fi *= 1000; pU[x] = static_cast<float>(maxDisp * scale);}
+ if (pU[x] < minDisp * scale) {_fi *= 1000; pU[x] = static_cast<float>(minDisp * scale);}
+ }
+
+ int A = static_cast<int>(pU[x]);
+ int neg = 0; if (pU[x] <= 0) neg = -1;
+
+ if (x + A > width)
+ pu[x] = pU[width - A];
+ else if (x + A + neg < 0)
+ pu[x] = pU[- A + 2];
+ else {
+ pu[x] = A + (pI2x[x + A + neg] * (pI1[x] - pI2[x + A])
+ + _fi * (gr * pU[x + 1] + gl * pU[x - 1] + gu * pUu[x] + gd * pUd[x] - gc * A))
+ / (pI2x[x + A + neg] * pI2x[x + A + neg] + gc * _fi) ;
+ }
+ }// x
+ pu[0] = pu[1];
+ pu[width] = pu[width - 1];
+ }// y
+ for (x = 0; x <= width; x++) {
+ u.at<float>(0, x) = u.at<float>(1, x);
+ u.at<float>(height, x) = u.at<float>(height - 1, x);
+ }
+ u.copyTo(U);
+ if (!g_c.empty()) g_c.release();
+ if (!g_p.empty()) g_p.release();
+ }//n
}
void StereoVar::VCycle_MyFAS(Mat &I1, Mat &I2, Mat &I2x, Mat &_u, int level)
{
- CvSize imgSize = _u.size();
- CvSize frmSize = cvSize((int) (imgSize.width * pyrScale + 0.5), (int) (imgSize.height * pyrScale + 0.5));
- Mat I1_h, I2_h, I2x_h, u_h, U, U_h;
+ CvSize imgSize = _u.size();
+ CvSize frmSize = cvSize((int) (imgSize.width * pyrScale + 0.5), (int) (imgSize.height * pyrScale + 0.5));
+ Mat I1_h, I2_h, I2x_h, u_h, U, U_h;
- //PRE relaxation
- VariationalSolver(I1, I2, I2x, _u, level);
+ //PRE relaxation
+ VariationalSolver(I1, I2, I2x, _u, level);
- if (level >= levels - 1) return;
- level ++;
+ if (level >= levels - 1) return;
+ level ++;
- //scaling DOWN
- resize(I1, I1_h, frmSize, 0, 0, INTER_AREA);
- resize(I2, I2_h, frmSize, 0, 0, INTER_AREA);
- resize(_u, u_h, frmSize, 0, 0, INTER_AREA);
- u_h.convertTo(u_h, u_h.type(), pyrScale);
- I2x_h = diffX(I2_h);
+ //scaling DOWN
+ resize(I1, I1_h, frmSize, 0, 0, INTER_AREA);
+ resize(I2, I2_h, frmSize, 0, 0, INTER_AREA);
+ resize(_u, u_h, frmSize, 0, 0, INTER_AREA);
+ u_h.convertTo(u_h, u_h.type(), pyrScale);
+ I2x_h = diffX(I2_h);
- //Next level
- U_h = u_h.clone();
- VCycle_MyFAS(I1_h, I2_h, I2x_h, U_h, level);
+ //Next level
+ U_h = u_h.clone();
+ VCycle_MyFAS(I1_h, I2_h, I2x_h, U_h, level);
- subtract(U_h, u_h, U_h);
- U_h.convertTo(U_h, U_h.type(), 1.0 / pyrScale);
+ subtract(U_h, u_h, U_h);
+ U_h.convertTo(U_h, U_h.type(), 1.0 / pyrScale);
- //scaling UP
- resize(U_h, U, imgSize);
+ //scaling UP
+ resize(U_h, U, imgSize);
- //correcting the solution
- add(_u, U, _u);
+ //correcting the solution
+ add(_u, U, _u);
- //POST relaxation
- VariationalSolver(I1, I2, I2x, _u, level - 1);
+ //POST relaxation
+ VariationalSolver(I1, I2, I2x, _u, level - 1);
- if (flags & USE_MEDIAN_FILTERING) medianBlur(_u, _u, 3);
+ if (flags & USE_MEDIAN_FILTERING) medianBlur(_u, _u, 3);
- I1_h.release();
- I2_h.release();
- I2x_h.release();
- u_h.release();
- U.release();
- U_h.release();
+ I1_h.release();
+ I2_h.release();
+ I2x_h.release();
+ u_h.release();
+ U.release();
+ U_h.release();
}
void StereoVar::FMG(Mat &I1, Mat &I2, Mat &I2x, Mat &u, int level)
{
- double scale = pow(pyrScale, (double) level);
- CvSize frmSize = cvSize((int) (u.cols * scale + 0.5), (int) (u.rows * scale + 0.5));
- Mat I1_h, I2_h, I2x_h, u_h;
-
- //scaling DOWN
- resize(I1, I1_h, frmSize, 0, 0, INTER_AREA);
- resize(I2, I2_h, frmSize, 0, 0, INTER_AREA);
- resize(u, u_h, frmSize, 0, 0, INTER_AREA);
- u_h.convertTo(u_h, u_h.type(), scale);
- I2x_h = diffX(I2_h);
-
- switch (cycle) {
- case CYCLE_O:
- VariationalSolver(I1_h, I2_h, I2x_h, u_h, level);
- break;
- case CYCLE_V:
- VCycle_MyFAS(I1_h, I2_h, I2x_h, u_h, level);
- break;
- }
-
- u_h.convertTo(u_h, u_h.type(), 1.0 / scale);
-
- //scaling UP
- resize(u_h, u, u.size(), 0, 0, INTER_CUBIC);
-
- I1_h.release();
- I2_h.release();
- I2x_h.release();
- u_h.release();
-
- level--;
- if ((flags & USE_AUTO_PARAMS) && (level < levels / 3)) {
- penalization = PENALIZATION_PERONA_MALIK;
- fi *= 100;
- flags -= USE_AUTO_PARAMS;
- autoParams();
- }
- if (flags & USE_MEDIAN_FILTERING) medianBlur(u, u, 3);
- if (level >= 0) FMG(I1, I2, I2x, u, level);
+ double scale = pow(pyrScale, (double) level);
+ CvSize frmSize = cvSize((int) (u.cols * scale + 0.5), (int) (u.rows * scale + 0.5));
+ Mat I1_h, I2_h, I2x_h, u_h;
+
+ //scaling DOWN
+ resize(I1, I1_h, frmSize, 0, 0, INTER_AREA);
+ resize(I2, I2_h, frmSize, 0, 0, INTER_AREA);
+ resize(u, u_h, frmSize, 0, 0, INTER_AREA);
+ u_h.convertTo(u_h, u_h.type(), scale);
+ I2x_h = diffX(I2_h);
+
+ switch (cycle) {
+ case CYCLE_O:
+ VariationalSolver(I1_h, I2_h, I2x_h, u_h, level);
+ break;
+ case CYCLE_V:
+ VCycle_MyFAS(I1_h, I2_h, I2x_h, u_h, level);
+ break;
+ }
+
+ u_h.convertTo(u_h, u_h.type(), 1.0 / scale);
+
+ //scaling UP
+ resize(u_h, u, u.size(), 0, 0, INTER_CUBIC);
+
+ I1_h.release();
+ I2_h.release();
+ I2x_h.release();
+ u_h.release();
+
+ level--;
+ if ((flags & USE_AUTO_PARAMS) && (level < levels / 3)) {
+ penalization = PENALIZATION_PERONA_MALIK;
+ fi *= 100;
+ flags -= USE_AUTO_PARAMS;
+ autoParams();
+ }
+ if (flags & USE_MEDIAN_FILTERING) medianBlur(u, u, 3);
+ if (level >= 0) FMG(I1, I2, I2x, u, level);
}
void StereoVar::autoParams()
-{
- int maxD = MAX(labs(maxDisp), labs(minDisp));
-
- if (!maxD) pyrScale = 0.85;
- else if (maxD < 8) pyrScale = 0.5;
- else if (maxD < 64) pyrScale = 0.5 + static_cast<double>(maxD - 8) * 0.00625;
- else pyrScale = 0.85;
-
- if (maxD) {
- levels = 0;
- while ( pow(pyrScale, levels) * maxD > 1.5) levels ++;
- levels++;
- }
-
- switch(penalization) {
- case PENALIZATION_TICHONOV: cycle = CYCLE_V; break;
- case PENALIZATION_CHARBONNIER: cycle = CYCLE_O; break;
- case PENALIZATION_PERONA_MALIK: cycle = CYCLE_O; break;
- }
+{
+ int maxD = MAX(labs(maxDisp), labs(minDisp));
+
+ if (!maxD) pyrScale = 0.85;
+ else if (maxD < 8) pyrScale = 0.5;
+ else if (maxD < 64) pyrScale = 0.5 + static_cast<double>(maxD - 8) * 0.00625;
+ else pyrScale = 0.85;
+
+ if (maxD) {
+ levels = 0;
+ while ( pow(pyrScale, levels) * maxD > 1.5) levels ++;
+ levels++;
+ }
+
+ switch(penalization) {
+ case PENALIZATION_TICHONOV: cycle = CYCLE_V; break;
+ case PENALIZATION_CHARBONNIER: cycle = CYCLE_O; break;
+ case PENALIZATION_PERONA_MALIK: cycle = CYCLE_O; break;
+ }
}
void StereoVar::operator ()( const Mat& left, const Mat& right, Mat& disp )
{
- CV_Assert(left.size() == right.size() && left.type() == right.type());
- CvSize imgSize = left.size();
- int MaxD = MAX(labs(minDisp), labs(maxDisp));
- int SignD = 1; if (MIN(minDisp, maxDisp) < 0) SignD = -1;
- if (minDisp >= maxDisp) {MaxD = 256; SignD = 1;}
-
- Mat u;
- if ((flags & USE_INITIAL_DISPARITY) && (!disp.empty())) {
- CV_Assert(disp.size() == left.size() && disp.type() == CV_8UC1);
- disp.convertTo(u, CV_32FC1, static_cast<double>(SignD * MaxD) / 256);
- } else {
- u.create(imgSize, CV_32FC1);
- u.setTo(0);
- }
-
- // Preprocessing
- Mat leftgray, rightgray;
- if (left.type() != CV_8UC1) {
- cvtColor(left, leftgray, CV_BGR2GRAY);
- cvtColor(right, rightgray, CV_BGR2GRAY);
- } else {
- left.copyTo(leftgray);
- right.copyTo(rightgray);
- }
- if (flags & USE_EQUALIZE_HIST) {
- equalizeHist(leftgray, leftgray);
- equalizeHist(rightgray, rightgray);
- }
- if (poly_sigma > 0.0001) {
- GaussianBlur(leftgray, leftgray, cvSize(poly_n, poly_n), poly_sigma);
- GaussianBlur(rightgray, rightgray, cvSize(poly_n, poly_n), poly_sigma);
- }
-
- if (flags & USE_AUTO_PARAMS) {
- penalization = PENALIZATION_TICHONOV;
- autoParams();
- }
-
- Mat I1, I2;
- leftgray.convertTo(I1, CV_32FC1);
- rightgray.convertTo(I2, CV_32FC1);
- leftgray.release();
- rightgray.release();
-
- Mat I2x = diffX(I2);
-
- FMG(I1, I2, I2x, u, levels - 1);
-
- I1.release();
- I2.release();
- I2x.release();
-
-
- disp.create( left.size(), CV_8UC1 );
- u = abs(u);
- u.convertTo(disp, disp.type(), 256 / MaxD, 0);
-
- u.release();
+ CV_Assert(left.size() == right.size() && left.type() == right.type());
+ CvSize imgSize = left.size();
+ int MaxD = MAX(labs(minDisp), labs(maxDisp));
+ int SignD = 1; if (MIN(minDisp, maxDisp) < 0) SignD = -1;
+ if (minDisp >= maxDisp) {MaxD = 256; SignD = 1;}
+
+ Mat u;
+ if ((flags & USE_INITIAL_DISPARITY) && (!disp.empty())) {
+ CV_Assert(disp.size() == left.size() && disp.type() == CV_8UC1);
+ disp.convertTo(u, CV_32FC1, static_cast<double>(SignD * MaxD) / 256);
+ } else {
+ u.create(imgSize, CV_32FC1);
+ u.setTo(0);
+ }
+
+ // Preprocessing
+ Mat leftgray, rightgray;
+ if (left.type() != CV_8UC1) {
+ cvtColor(left, leftgray, CV_BGR2GRAY);
+ cvtColor(right, rightgray, CV_BGR2GRAY);
+ } else {
+ left.copyTo(leftgray);
+ right.copyTo(rightgray);
+ }
+ if (flags & USE_EQUALIZE_HIST) {
+ equalizeHist(leftgray, leftgray);
+ equalizeHist(rightgray, rightgray);
+ }
+ if (poly_sigma > 0.0001) {
+ GaussianBlur(leftgray, leftgray, cvSize(poly_n, poly_n), poly_sigma);
+ GaussianBlur(rightgray, rightgray, cvSize(poly_n, poly_n), poly_sigma);
+ }
+
+ if (flags & USE_AUTO_PARAMS) {
+ penalization = PENALIZATION_TICHONOV;
+ autoParams();
+ }
+
+ Mat I1, I2;
+ leftgray.convertTo(I1, CV_32FC1);
+ rightgray.convertTo(I2, CV_32FC1);
+ leftgray.release();
+ rightgray.release();
+
+ Mat I2x = diffX(I2);
+
+ FMG(I1, I2, I2x, u, levels - 1);
+
+ I1.release();
+ I2.release();
+ I2x.release();
+
+
+ disp.create( left.size(), CV_8UC1 );
+ u = abs(u);
+ u.convertTo(disp, disp.type(), 256 / MaxD, 0);
+
+ u.release();
}
} // namespace
\ No newline at end of file
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
if(HAVE_CUDA)
file(GLOB lib_cuda "src/cuda/*.cu")
source_group("Cuda" FILES "${lib_cuda}")
-
- ocv_include_directories(${CUDA_INCLUDE_DIRS} "${OpenCV_SOURCE_DIR}/modules/gpu/src" "${OpenCV_SOURCE_DIR}/modules/gpu/src/cuda")
- OCV_CUDA_COMPILE(cuda_objs ${lib_cuda})
-
+
+ ocv_include_directories("${OpenCV_SOURCE_DIR}/modules/gpu/src" "${OpenCV_SOURCE_DIR}/modules/gpu/src/cuda" ${CUDA_INCLUDE_DIRS})
+ ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef)
+ ocv_cuda_compile(cuda_objs ${lib_cuda})
+
set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
else()
set(lib_cuda "")
GPU_MAT = 9 << KIND_SHIFT
};
_InputArray();
+
_InputArray(const Mat& m);
_InputArray(const MatExpr& expr);
template<typename _Tp> _InputArray(const _Tp* vec, int n);
virtual int channels(int i=-1) const;
virtual bool empty() const;
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+ virtual ~_InputArray();
+#endif
+
int flags;
void* obj;
Size sz;
virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void release() const;
virtual void clear() const;
+
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+ virtual ~_OutputArray();
+#endif
};
typedef const _InputArray& InputArray;
CV_WRAP virtual bool isOpened() const;
//! closes the file and releases all the memory buffers
CV_WRAP virtual void release();
- //! closes the file, releases all the memory buffers and returns the text string
+ //! closes the file, releases all the memory buffers and returns the text string
CV_WRAP string releaseAndGetString();
//! returns the first element of the top-level mapping
/* Finds all real and complex roots of a polynomial equation */
CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2,
- int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100));
+ int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100));
/****************************************************************************************\
* Matrix operations *
/* Returns a set element by index. If the element doesn't belong to the set,
NULL is returned */
-CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int index )
+CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int idx )
{
- CvSetElem* elem = (CvSetElem*)cvGetSeqElem( (CvSeq*)set_header, index );
+ CvSetElem* elem = (CvSetElem*)cvGetSeqElem( (CvSeq*)set_header, idx );
return elem && CV_IS_SET_ELEM( elem ) ? elem : 0;
}
CvScalar color, int thickness CV_DEFAULT(1),
int line_type CV_DEFAULT(8),
int shift CV_DEFAULT(0));
-
-
+
+
/* Draws a circle with specified center and radius.
Thickness works in the same way as with cvRectangle */
CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius,
/* Font structure */
typedef struct CvFont
{
- const char* nameFont; //Qt:nameFont
- CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component])
- int font_face; //Qt: bool italic /* =CV_FONT_* */
- const int* ascii; /* font data and metrics */
+ const char* nameFont; //Qt:nameFont
+ CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component])
+ int font_face; //Qt: bool italic /* =CV_FONT_* */
+ const int* ascii; /* font data and metrics */
const int* greek;
const int* cyrillic;
float hscale, vscale;
- float shear; /* slope coefficient: 0 - normal, >0 - italic */
- int thickness; //Qt: weight /* letters thickness */
- float dx; /* horizontal interval between letters */
- int line_type; //Qt: PointSize
+ float shear; /* slope coefficient: 0 - normal, >0 - italic */
+ int thickness; //Qt: weight /* letters thickness */
+ float dx; /* horizontal interval between letters */
+ int line_type; //Qt: PointSize
}
CvFont;
/*********************************** CPU capabilities ***********************************/
-#define CV_CPU_NONE 0
+#define CV_CPU_NONE 0
#define CV_CPU_MMX 1
#define CV_CPU_SSE 2
#define CV_CPU_SSE2 3
/* get index of the thread being executed */
CVAPI(int) cvGetThreadNum( void );
-
+
/********************************** Error Handling **************************************/
-
+
/* Get current OpenCV error status */
CVAPI(int) cvGetErrStatus( void );
const char* file_name, int line, void* userdata );
CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg,
- const char* file_name, int line, void* userdata );
-
+ const char* file_name, int line, void* userdata );
+
#define OPENCV_ERROR(status,func,context) \
cvError((status),(func),(context),__FILE__,__LINE__)
-
+
#define OPENCV_ERRCHK(func,context) \
{if (cvGetErrStatus() >= 0) \
{OPENCV_ERROR(CV_StsBackTrace,(func),(context));}}
-
+
#define OPENCV_ASSERT(expr,func,context) \
{if (! (expr)) \
{OPENCV_ERROR(CV_StsInternal,(func),(context));}}
-
+
#define OPENCV_RSTERR() (cvSetErrStatus(CV_StsOk))
-
+
#define OPENCV_CALL( Func ) \
{ \
Func; \
-}
-
-
+}
+
+
/* CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */
#ifdef CV_NO_FUNC_NAMES
#define CV_FUNCNAME( Name )
#define cvFuncName ""
-#else
+#else
#define CV_FUNCNAME( Name ) \
static char cvFuncName[] = Name
#endif
-
-
+
+
/*
CV_ERROR macro unconditionally raises error with passed code and message.
After raising error, control will be transferred to the exit label.
cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \
__CV_EXIT__; \
}
-
+
/* Simplified form of CV_ERROR */
#define CV_ERROR_FROM_CODE( code ) \
CV_ERROR( code, "" )
-
+
/*
CV_CHECK macro checks error status after CV (or IPL)
function call. If error detected, control will be transferred to the exit
if( cvGetErrStatus() < 0 ) \
CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \
}
-
-
+
+
/*
CV_CALL macro calls CV (or IPL) function, checks error status and
signals a error if the function failed. Useful in "parent node"
Func; \
CV_CHECK(); \
}
-
-
+
+
/* Runtime assertion macro */
#define CV_ASSERT( Condition ) \
{ \
if( !(Condition) ) \
CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \
}
-
+
#define __CV_BEGIN__ {
#define __CV_END__ goto exit; exit: ; }
-#define __CV_EXIT__ goto exit
-
+#define __CV_EXIT__ goto exit
+
#ifdef __cplusplus
}
#include "opencv2/core/core_c.h"
#include "opencv2/core/core.hpp"
+#if defined _MSC_VER && _MSC_VER >= 1200
+#pragma warning( disable: 4714 ) //__forceinline is not inlined
+#pragma warning( disable: 4127 ) //conditional expression is constant
+#endif
+
namespace cv
{
return m;\r
}\r
\r
- inline void GpuMat::assignTo(GpuMat& m, int type) const\r
+ inline void GpuMat::assignTo(GpuMat& m, int _type) const\r
{\r
- if (type < 0)\r
+ if (_type < 0)\r
m = *this;\r
else\r
- convertTo(m, type);\r
+ convertTo(m, _type);\r
}\r
\r
inline size_t GpuMat::step1() const\r
create(size_.height, size_.width, type_);\r
}\r
\r
- inline GpuMat GpuMat::operator()(Range rowRange, Range colRange) const\r
+ inline GpuMat GpuMat::operator()(Range _rowRange, Range _colRange) const\r
{\r
- return GpuMat(*this, rowRange, colRange);\r
+ return GpuMat(*this, _rowRange, _colRange);\r
}\r
\r
inline GpuMat GpuMat::operator()(Rect roi) const\r
#endif
#if defined WIN32 || defined WINCE
-#ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?)
-#define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx
-#endif
-#include <windows.h>
-#undef small
-#undef min
-#undef max
+# ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?)
+# define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx
+# endif
+# include <windows.h>
+# undef small
+# undef min
+# undef max
#else
-#include <pthread.h>
+# include <pthread.h>
#endif
#ifdef __BORLANDC__
-#ifndef WIN32
- #define WIN32
-#endif
-#ifndef _WIN32
- #define _WIN32
-#endif
- #define CV_DLL
- #undef _CV_ALWAYS_PROFILE_
- #define _CV_ALWAYS_NO_PROFILE_
+# ifndef WIN32
+# define WIN32
+# endif
+# ifndef _WIN32
+# define _WIN32
+# endif
+# define CV_DLL
+# undef _CV_ALWAYS_PROFILE_
+# define _CV_ALWAYS_NO_PROFILE_
#endif
#ifndef FALSE
-#define FALSE 0
+# define FALSE 0
#endif
#ifndef TRUE
-#define TRUE 1
+# define TRUE 1
#endif
#define __BEGIN__ __CV_BEGIN__
#define EXIT __CV_EXIT__
#ifdef HAVE_IPP
-#include "ipp.h"
+# include "ipp.h"
CV_INLINE IppiSize ippiSize(int width, int height)
{
}
#endif
-#if defined __SSE2__ || _MSC_VER >= 1300
-#include "emmintrin.h"
-#define CV_SSE 1
-#define CV_SSE2 1
-#if defined __SSE3__ || _MSC_VER >= 1500
-#include "pmmintrin.h"
-#define CV_SSE3 1
-#endif
-#if defined __SSSE3__
-#include "tmmintrin.h"
-#define CV_SSSE3 1
-#endif
+#if defined __SSE2__ || (defined _MSC_VER && _MSC_VER >= 1300)
+# include "emmintrin.h"
+# define CV_SSE 1
+# define CV_SSE2 1
+# if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
+# include "pmmintrin.h"
+# define CV_SSE3 1
+# else
+# define CV_SSE3 0
+# endif
+# if defined __SSSE3__
+# include "tmmintrin.h"
+# define CV_SSSE3 1
+# else
+# define CV_SSSE3 0
+# endif
#else
-#define CV_SSE 0
-#define CV_SSE2 0
-#define CV_SSE3 0
-#define CV_SSSE3 0
+# define CV_SSE 0
+# define CV_SSE2 0
+# define CV_SSE3 0
+# define CV_SSSE3 0
#endif
-#if defined ANDROID && defined __ARM_NEON__ && defined __GNUC__
-#include "arm_neon.h"
-#define CV_NEON 1
+#if defined ANDROID && defined __ARM_NEON__
+# include "arm_neon.h"
+# define CV_NEON 1
-#define CPU_HAS_NEON_FEATURE (true)
+# define CPU_HAS_NEON_FEATURE (true)
//TODO: make real check using stuff from "cpu-features.h"
//((bool)android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON)
#else
-#define CV_NEON 0
-#define CPU_HAS_NEON_FEATURE (false)
-#endif
-
-#ifdef CV_ICC
-#define CV_ENABLE_UNROLLED 0
-#else
-#define CV_ENABLE_UNROLLED 1
+# define CV_NEON 0
+# define CPU_HAS_NEON_FEATURE (false)
#endif
#ifndef IPPI_CALL
-#define IPPI_CALL(func) CV_Assert((func) >= 0)
+# define IPPI_CALL(func) CV_Assert((func) >= 0)
#endif
#ifdef HAVE_TBB
- #include "tbb/tbb_stddef.h"
- #if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
- #include "tbb/tbb.h"
- #include "tbb/task.h"
- #undef min
- #undef max
- #else
- #undef HAVE_TBB
- #endif
+# include "tbb/tbb_stddef.h"
+# if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
+# include "tbb/tbb.h"
+# include "tbb/task.h"
+# undef min
+# undef max
+# else
+# undef HAVE_TBB
+# endif
#endif
#ifdef HAVE_EIGEN
- #include <Eigen/Core>
- #include "opencv2/core/eigen.hpp"
+# include <Eigen/Core>
+# include "opencv2/core/eigen.hpp"
#endif
#ifdef __cplusplus
+namespace cv
+{
#ifdef HAVE_TBB
- namespace cv
+
+ typedef tbb::blocked_range<int> BlockedRange;
+
+ template<typename Body> static inline
+ void parallel_for( const BlockedRange& range, const Body& body )
{
- typedef tbb::blocked_range<int> BlockedRange;
-
- template<typename Body> static inline
- void parallel_for( const BlockedRange& range, const Body& body )
- {
- tbb::parallel_for(range, body);
- }
-
- template<typename Iterator, typename Body> static inline
- void parallel_do( Iterator first, Iterator last, const Body& body )
- {
- tbb::parallel_do(first, last, body);
- }
-
- typedef tbb::split Split;
-
- template<typename Body> static inline
- void parallel_reduce( const BlockedRange& range, Body& body )
- {
- tbb::parallel_reduce(range, body);
- }
-
- typedef tbb::concurrent_vector<Rect> ConcurrentRectVector;
- typedef tbb::concurrent_vector<double> ConcurrentDoubleVector;
+ tbb::parallel_for(range, body);
}
+
+ template<typename Iterator, typename Body> static inline
+ void parallel_do( Iterator first, Iterator last, const Body& body )
+ {
+ tbb::parallel_do(first, last, body);
+ }
+
+ typedef tbb::split Split;
+
+ template<typename Body> static inline
+ void parallel_reduce( const BlockedRange& range, Body& body )
+ {
+ tbb::parallel_reduce(range, body);
+ }
+
+ typedef tbb::concurrent_vector<Rect> ConcurrentRectVector;
+ typedef tbb::concurrent_vector<double> ConcurrentDoubleVector;
#else
- namespace cv
+ class BlockedRange
+ {
+ public:
+ BlockedRange() : _begin(0), _end(0), _grainsize(0) {}
+ BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}
+ int begin() const { return _begin; }
+ int end() const { return _end; }
+ int grainsize() const { return _grainsize; }
+
+ protected:
+ int _begin, _end, _grainsize;
+ };
+
+ template<typename Body> static inline
+ void parallel_for( const BlockedRange& range, const Body& body )
{
- class BlockedRange
- {
- public:
- BlockedRange() : _begin(0), _end(0), _grainsize(0) {}
- BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}
- int begin() const { return _begin; }
- int end() const { return _end; }
- int grainsize() const { return _grainsize; }
-
- protected:
- int _begin, _end, _grainsize;
- };
-
- template<typename Body> static inline
- void parallel_for( const BlockedRange& range, const Body& body )
- {
- body(range);
- }
- typedef std::vector<Rect> ConcurrentRectVector;
- typedef std::vector<double> ConcurrentDoubleVector;
-
- template<typename Iterator, typename Body> static inline
- void parallel_do( Iterator first, Iterator last, const Body& body )
- {
- for( ; first != last; ++first )
- body(*first);
- }
-
- class Split {};
-
- template<typename Body> static inline
- void parallel_reduce( const BlockedRange& range, Body& body )
- {
- body(range);
- }
-
+ body(range);
+ }
+ typedef std::vector<Rect> ConcurrentRectVector;
+ typedef std::vector<double> ConcurrentDoubleVector;
+
+ template<typename Iterator, typename Body> static inline
+ void parallel_do( Iterator first, Iterator last, const Body& body )
+ {
+ for( ; first != last; ++first )
+ body(*first);
+ }
+
+ class Split {};
+
+ template<typename Body> static inline
+ void parallel_reduce( const BlockedRange& range, Body& body )
+ {
+ body(range);
}
#endif
+} //namespace cv
- #define CV_INIT_ALGORITHM(classname, algname, memberinit) \
+#define CV_INIT_ALGORITHM(classname, algname, memberinit) \
static Algorithm* create##classname() \
{ \
return new classname; \
return &classname##_info(); \
}
-#endif
+#endif //__cplusplus
/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */
#define CV_MAX_INLINE_MAT_OP_SIZE 10
#define CV_MAX_STRLEN 1024
#if 0 /*def CV_CHECK_FOR_NANS*/
- #define CV_CHECK_NANS( arr ) cvCheckArray((arr))
+# define CV_CHECK_NANS( arr ) cvCheckArray((arr))
#else
- #define CV_CHECK_NANS( arr )
+# define CV_CHECK_NANS( arr )
#endif
/****************************************************************************************\
/* get alloca declaration */
#ifdef __GNUC__
- #undef alloca
- #define alloca __builtin_alloca
- #define CV_HAVE_ALLOCA 1
+# undef alloca
+# define alloca __builtin_alloca
+# define CV_HAVE_ALLOCA 1
#elif defined WIN32 || defined _WIN32 || \
defined WINCE || defined _MSC_VER || defined __BORLANDC__
- #include <malloc.h>
- #define CV_HAVE_ALLOCA 1
+# include <malloc.h>
+# define CV_HAVE_ALLOCA 1
#elif defined HAVE_ALLOCA_H
- #include <alloca.h>
- #define CV_HAVE_ALLOCA 1
+# include <alloca.h>
+# define CV_HAVE_ALLOCA 1
#elif defined HAVE_ALLOCA
- #include <stdlib.h>
- #define CV_HAVE_ALLOCA 1
+# include <stdlib.h>
+# define CV_HAVE_ALLOCA 1
#else
- #undef CV_HAVE_ALLOCA
+# undef CV_HAVE_ALLOCA
#endif
#ifdef __GNUC__
-#define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
+# define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
#elif defined _MSC_VER
-#define CV_DECL_ALIGNED(x) __declspec(align(x))
+# define CV_DECL_ALIGNED(x) __declspec(align(x))
#else
-#define CV_DECL_ALIGNED(x)
+# define CV_DECL_ALIGNED(x)
#endif
#if CV_HAVE_ALLOCA
/* ! DO NOT make it an inline function */
-#define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN )
+# define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN )
#endif
#ifndef CV_IMPL
-#define CV_IMPL CV_EXTERN_C
+# define CV_IMPL CV_EXTERN_C
#endif
#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; }
CV_UNSUPPORTED_DEPTH_ERR = -101,
CV_UNSUPPORTED_FORMAT_ERR = -100,
- CV_BADARG_ERR = -49, //ipp comp
- CV_NOTDEFINED_ERR = -48, //ipp comp
-
- CV_BADCHANNELS_ERR = -47, //ipp comp
- CV_BADRANGE_ERR = -44, //ipp comp
- CV_BADSTEP_ERR = -29, //ipp comp
-
- CV_BADFLAG_ERR = -12,
- CV_DIV_BY_ZERO_ERR = -11, //ipp comp
- CV_BADCOEF_ERR = -10,
-
- CV_BADFACTOR_ERR = -7,
- CV_BADPOINT_ERR = -6,
- CV_BADSCALE_ERR = -4,
- CV_OUTOFMEM_ERR = -3,
- CV_NULLPTR_ERR = -2,
- CV_BADSIZE_ERR = -1,
- CV_NO_ERR = 0,
- CV_OK = CV_NO_ERR
+ CV_BADARG_ERR = -49, //ipp comp
+ CV_NOTDEFINED_ERR = -48, //ipp comp
+
+ CV_BADCHANNELS_ERR = -47, //ipp comp
+ CV_BADRANGE_ERR = -44, //ipp comp
+ CV_BADSTEP_ERR = -29, //ipp comp
+
+ CV_BADFLAG_ERR = -12,
+ CV_DIV_BY_ZERO_ERR = -11, //ipp comp
+ CV_BADCOEF_ERR = -10,
+
+ CV_BADFACTOR_ERR = -7,
+ CV_BADPOINT_ERR = -6,
+ CV_BADSCALE_ERR = -4,
+ CV_OUTOFMEM_ERR = -3,
+ CV_NULLPTR_ERR = -2,
+ CV_BADSIZE_ERR = -1,
+ CV_NO_ERR = 0,
+ CV_OK = CV_NO_ERR
}
CvStatus;
typedef struct CvBigFuncTable
{
void* fn_2d[CV_DEPTH_MAX*4];
-}
-CvBigFuncTable;
+} CvBigFuncTable;
#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \
(tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \
(tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \
(tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG
+#ifdef __cplusplus
//! OpenGL extension table
class CV_EXPORTS CvOpenGlFuncTab
{
public:
virtual ~CvOpenGlFuncTab();
- virtual void genBuffers(int n, unsigned int* buffers) const = 0;
+ virtual void genBuffers(int n, unsigned int* buffers) const = 0;
virtual void deleteBuffers(int n, const unsigned int* buffers) const = 0;
virtual void bufferData(unsigned int target, ptrdiff_t size, const void* data, unsigned int usage) const = 0;
#define CV_CheckGlError() CV_DbgAssert( (::icvCheckGlError(__FILE__, __LINE__)) )
#endif
-#endif
+#endif //__cplusplus
+
+#endif // __OPENCV_CORE_INTERNAL_HPP__
refcount = 0;
allocator = 0;
}
-
+
inline Mat::Mat() : size(&rows)
{
initEmpty();
initEmpty();
create( _sz.height, _sz.width, _type );
}
-
+
inline Mat::Mat(Size _sz, int _type, const Scalar& _s) : size(&rows)
{
initEmpty();
create(_sz.height, _sz.width, _type);
*this = _s;
}
-
+
inline Mat::Mat(int _dims, const int* _sz, int _type) : size(&rows)
{
initEmpty();
initEmpty();
create(_dims, _sz, _type);
*this = _s;
-}
+}
inline Mat::Mat(const Mat& m)
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
else
Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this);
}
-
-
+
+
template<typename _Tp, int n> inline Mat::Mat(const Vec<_Tp, n>& vec, bool copyData)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
dims(2), rows(n), cols(1), data(0), refcount(0),
datalimit = dataend = datastart + rows*step[0];
}
else
- Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this);
+ Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this);
}
-
+
template<typename _Tp> inline Mat::Mat(const Point_<_Tp>& pt, bool copyData)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
dims(2), rows(2), cols(1), data(0), refcount(0),
((_Tp*)data)[1] = pt.y;
}
}
-
+
template<typename _Tp> inline Mat::Mat(const Point3_<_Tp>& pt, bool copyData)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
}
}
-
+
template<typename _Tp> inline Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer)
: flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG),
dims(0), rows(0), cols(0), data(0), refcount(0),
{
*this = *commaInitializer;
}
-
+
inline Mat::~Mat()
{
release();
}
return *this;
}
-
+
inline Mat Mat::row(int y) const { return Mat(*this, Range(y, y+1), Range::all()); }
inline Mat Mat::col(int x) const { return Mat(*this, Range::all(), Range(x, x+1)); }
inline Mat Mat::rowRange(int startrow, int endrow) const
return m;
}
-inline void Mat::assignTo( Mat& m, int type ) const
+inline void Mat::assignTo( Mat& m, int _type ) const
{
- if( type < 0 )
+ if( _type < 0 )
m = *this;
else
- convertTo(m, type);
+ convertTo(m, _type);
}
inline void Mat::create(int _rows, int _cols, int _type)
refcount = 0;
}
-inline Mat Mat::operator()( Range rowRange, Range colRange ) const
+inline Mat Mat::operator()( Range _rowRange, Range _colRange ) const
{
- return Mat(*this, rowRange, colRange);
+ return Mat(*this, _rowRange, _colRange);
}
-
+
inline Mat Mat::operator()( const Rect& roi ) const
{ return Mat(*this, roi); }
inline Mat Mat::operator()(const Range* ranges) const
{
return Mat(*this, ranges);
-}
-
+}
+
inline Mat::operator CvMat() const
{
CV_DbgAssert(dims <= 2);
return (const _Tp*)(data + step.p[0]*y);
}
-
+
inline uchar* Mat::ptr(int i0, int i1)
{
CV_DbgAssert( dims >= 2 && data &&
}
inline uchar* Mat::ptr(const int* idx)
-{
+{
int i, d = dims;
uchar* p = data;
CV_DbgAssert( d >= 1 && p );
p += idx[i]*step.p[i];
}
return p;
-}
-
+}
+
template<typename _Tp> inline _Tp& Mat::at(int i0, int i1)
{
CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
return ((const _Tp*)(data + step.p[0]*i0))[i1];
}
-
+
template<typename _Tp> inline _Tp& Mat::at(Point pt)
{
CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] &&
int i = i0/cols, j = i0 - i*cols;
return ((_Tp*)(data + step.p[0]*i))[j];
}
-
+
template<typename _Tp> inline const _Tp& Mat::at(int i0) const
{
CV_DbgAssert( dims <= 2 && data &&
int i = i0/cols, j = i0 - i*cols;
return ((const _Tp*)(data + step.p[0]*i))[j];
}
-
+
template<typename _Tp> inline _Tp& Mat::at(int i0, int i1, int i2)
{
CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
return *(const _Tp*)ptr(idx.val);
}
-
-
+
+
template<typename _Tp> inline MatConstIterator_<_Tp> Mat::begin() const
{
CV_DbgAssert( elemSize() == sizeof(_Tp) );
{
CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) &&
rows + cols - 1 == n && channels() == 1 );
-
+
if( isContinuous() && type() == DataType<_Tp>::type )
return Vec<_Tp, n>((_Tp*)data);
Vec<_Tp, n> v; Mat tmp(rows, cols, DataType<_Tp>::type, v.val);
convertTo(tmp, tmp.type());
return v;
}
-
+
template<typename _Tp, int m, int n> inline Mat::operator Matx<_Tp, m, n>() const
{
CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 );
-
+
if( isContinuous() && type() == DataType<_Tp>::type )
return Matx<_Tp, m, n>((_Tp*)data);
Matx<_Tp, m, n> mtx; Mat tmp(rows, cols, DataType<_Tp>::type, mtx.val);
template<typename _Tp> inline void Mat::push_back(const _Tp& elem)
{
if( !data )
- {
- *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone();
- return;
- }
- CV_Assert(DataType<_Tp>::type == type() && cols == 1
+ {
+ *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone();
+ return;
+ }
+ CV_Assert(DataType<_Tp>::type == type() && cols == 1
/* && dims == 2 (cols == 1 implies dims == 2) */);
uchar* tmp = dataend + step[0];
if( !isSubmatrix() && isContinuous() && tmp <= datalimit )
else
push_back_(&elem);
}
-
+
template<typename _Tp> inline void Mat::push_back(const Mat_<_Tp>& m)
{
push_back((const Mat&)m);
-}
-
+}
+
inline Mat::MSize::MSize(int* _p) : p(_p) {}
inline Size Mat::MSize::operator()() const
{
- CV_DbgAssert(p[-1] <= 2);
+ CV_DbgAssert(p[-1] <= 2);
return Size(p[1], p[0]);
}
inline const int& Mat::MSize::operator[](int i) const { return p[i]; }
return false;
if( d == 2 )
return p[0] == sz.p[0] && p[1] == sz.p[1];
-
+
for( int i = 0; i < d; i++ )
if( p[i] != sz.p[i] )
return false;
return true;
-}
+}
inline bool Mat::MSize::operator != (const MSize& sz) const
{
return !(*this == sz);
}
-
+
inline Mat::MStep::MStep() { p = buf; p[0] = p[1] = 0; }
inline Mat::MStep::MStep(size_t s) { p = buf; p[0] = s; p[1] = 0; }
inline const size_t& Mat::MStep::operator[](int i) const { return p[i]; }
buf[0] = s;
return *this;
}
-
+
static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0)
{
return cvarrToMat(arr, copyData, true, coiMode);
SVD::compute(_a, _w, _u, _vt);
CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]);
}
-
+
template<typename _Tp, int m, int n, int nm> inline void
SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w )
{
SVD::compute(_a, _w);
CV_Assert(_w.data == (uchar*)&w.val[0]);
}
-
+
template<typename _Tp, int m, int n, int nm, int nb> inline void
SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u,
const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs,
SVD::backSubst(_w, _u, _vt, _rhs, _dst);
CV_Assert(_dst.data == (uchar*)&dst.val[0]);
}
-
+
///////////////////////////////// Mat_<_Tp> ////////////////////////////////////
template<typename _Tp> inline Mat_<_Tp>::Mat_()
: Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; }
-
+
template<typename _Tp> inline Mat_<_Tp>::Mat_(int _rows, int _cols)
: Mat(_rows, _cols, DataType<_Tp>::type) {}
template<typename _Tp> inline Mat_<_Tp>::Mat_(Size _sz)
: Mat(_sz.height, _sz.width, DataType<_Tp>::type) {}
-
+
template<typename _Tp> inline Mat_<_Tp>::Mat_(Size _sz, const _Tp& value)
: Mat(_sz.height, _sz.width, DataType<_Tp>::type) { *this = value; }
-
+
template<typename _Tp> inline Mat_<_Tp>::Mat_(int _dims, const int* _sz)
: Mat(_dims, _sz, DataType<_Tp>::type) {}
-
+
template<typename _Tp> inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s)
: Mat(_dims, _sz, DataType<_Tp>::type, Scalar(_s)) {}
-
+
template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges)
: Mat(m, ranges) {}
-
+
template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat& m)
: Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; *this = m; }
template<typename _Tp> inline Mat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps)
: Mat(_rows, _cols, DataType<_Tp>::type, _data, steps) {}
-template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat_& m, const Range& rowRange, const Range& colRange)
- : Mat(m, rowRange, colRange) {}
+template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat_& m, const Range& _rowRange, const Range& _colRange)
+ : Mat(m, _rowRange, _colRange) {}
template<typename _Tp> inline Mat_<_Tp>::Mat_(const Mat_& m, const Rect& roi)
: Mat(m, roi) {}
if( copyData )
*this = clone();
}
-
+
template<typename _Tp> inline Mat_<_Tp>::Mat_(const Point_<typename DataType<_Tp>::channel_type>& pt, bool copyData)
: Mat(2/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt)
{
template<typename _Tp> inline Mat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer)
: Mat(commaInitializer) {}
-
+
template<typename _Tp> inline Mat_<_Tp>::Mat_(const vector<_Tp>& vec, bool copyData)
: Mat(vec, copyData) {}
template<typename _Tp> inline void Mat_<_Tp>::create(int _dims, const int* _sz)
{
Mat::create(_dims, _sz, DataType<_Tp>::type);
-}
-
-
+}
+
+
template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const
{ return Mat_<_Tp>(Mat::cross(m)); }
template<typename _Tp> inline Mat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright )
{ return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright)); }
-template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range& rowRange, const Range& colRange ) const
-{ return Mat_<_Tp>(*this, rowRange, colRange); }
+template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range& _rowRange, const Range& _colRange ) const
+{ return Mat_<_Tp>(*this, _rowRange, _colRange); }
template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const
{ return Mat_<_Tp>(*this, roi); }
template<typename _Tp> inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const
-{ return Mat_<_Tp>(*this, ranges); }
-
+{ return Mat_<_Tp>(*this, ranges); }
+
template<typename _Tp> inline _Tp* Mat_<_Tp>::operator [](int y)
{ return (_Tp*)ptr(y); }
template<typename _Tp> inline const _Tp* Mat_<_Tp>::operator [](int y) const
template<typename _Tp> template<int n> inline const _Tp& Mat_<_Tp>::operator ()(const Vec<int, n>& idx) const
{
return Mat::at<_Tp>(idx);
-}
-
+}
+
template<typename _Tp> inline _Tp& Mat_<_Tp>::operator ()(int i0)
{
return this->at<_Tp>(i0);
template<typename _Tp> inline const _Tp& Mat_<_Tp>::operator ()(int i0) const
{
return this->at<_Tp>(i0);
-}
+}
template<typename _Tp> inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2)
{
template<typename _Tp> inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const
{
return this->at<_Tp>(i0, i1, i2);
-}
-
-
+}
+
+
template<typename _Tp> inline Mat_<_Tp>::operator vector<_Tp>() const
{
vector<_Tp> v;
{
CV_Assert(n % DataType<_Tp>::channels == 0);
return this->Mat::operator Matx<typename DataType<_Tp>::channel_type, m, n>();
-}
+}
template<typename T1, typename T2, typename Op> inline void
process( const Mat_<T1>& m1, Mat_<T2>& m2, Op op )
}
}
-
+
/////////////////////////////// Input/Output Arrays /////////////////////////////////
-
+
template<typename _Tp> inline _InputArray::_InputArray(const vector<_Tp>& vec)
: flags(FIXED_TYPE + STD_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {}
: flags(FIXED_TYPE + STD_VECTOR_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {}
template<typename _Tp> inline _InputArray::_InputArray(const vector<Mat_<_Tp> >& vec)
- : flags(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type), obj((void*)&vec) {}
-
+ : flags(FIXED_TYPE + STD_VECTOR_MAT + DataType<_Tp>::type), obj((void*)&vec) {}
+
template<typename _Tp, int m, int n> inline _InputArray::_InputArray(const Matx<_Tp, m, n>& mtx)
: flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)&mtx), sz(n, m) {}
-
+
template<typename _Tp> inline _InputArray::_InputArray(const _Tp* vec, int n)
: flags(FIXED_TYPE + FIXED_SIZE + MATX + DataType<_Tp>::type), obj((void*)vec), sz(n, 1) {}
template<typename _Tp> inline _InputArray::_InputArray(const Mat_<_Tp>& m)
: flags(FIXED_TYPE + MAT + DataType<_Tp>::type), obj((void*)&m) {}
-
+
template<typename _Tp> inline _OutputArray::_OutputArray(vector<_Tp>& vec)
: _InputArray(vec) {}
template<typename _Tp> inline _OutputArray::_OutputArray(vector<vector<_Tp> >& vec)
: _InputArray(vec) {flags |= FIXED_SIZE;}
template<typename _Tp> inline _OutputArray::_OutputArray(const vector<Mat_<_Tp> >& vec)
: _InputArray(vec) {flags |= FIXED_SIZE;}
-
+
template<typename _Tp> inline _OutputArray::_OutputArray(const Mat_<_Tp>& m)
: _InputArray(m) {flags |= FIXED_SIZE;}
template<typename _Tp, int m, int n> inline _OutputArray::_OutputArray(const Matx<_Tp, m, n>& mtx)
: _InputArray(mtx) {}
template<typename _Tp> inline _OutputArray::_OutputArray(const _Tp* vec, int n)
: _InputArray(vec, n) {}
-
+
//////////////////////////////////// Matrix Expressions /////////////////////////////////////////
class CV_EXPORTS MatOp
-{
+{
public:
MatOp() {};
virtual ~MatOp() {};
-
+
virtual bool elementWise(const MatExpr& expr) const;
virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0;
virtual void roi(const MatExpr& expr, const Range& rowRange,
virtual void augAssignAnd(const MatExpr& expr, Mat& m) const;
virtual void augAssignOr(const MatExpr& expr, Mat& m) const;
virtual void augAssignXor(const MatExpr& expr, Mat& m) const;
-
+
virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;
virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const;
-
+
virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;
virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const;
-
+
virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const;
virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const;
-
+
virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const;
virtual void divide(double s, const MatExpr& expr, MatExpr& res) const;
-
+
virtual void abs(const MatExpr& expr, MatExpr& res) const;
-
+
virtual void transpose(const MatExpr& expr, MatExpr& res) const;
virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;
virtual void invert(const MatExpr& expr, int method, MatExpr& res) const;
-
+
virtual Size size(const MatExpr& expr) const;
virtual int type(const MatExpr& expr) const;
};
-
+
class CV_EXPORTS MatExpr
{
public:
op->assign(*this, m);
return m;
}
-
+
template<typename _Tp> operator Mat_<_Tp>() const
{
Mat_<_Tp> m;
op->assign(*this, m, DataType<_Tp>::type);
return m;
}
-
+
MatExpr row(int y) const;
MatExpr col(int x) const;
MatExpr diag(int d=0) const;
MatExpr operator()( const Range& rowRange, const Range& colRange ) const;
MatExpr operator()( const Rect& roi ) const;
-
+
Mat cross(const Mat& m) const;
double dot(const Mat& m) const;
-
+
MatExpr t() const;
MatExpr inv(int method = DECOMP_LU) const;
MatExpr mul(const MatExpr& e, double scale=1) const;
MatExpr mul(const Mat& m, double scale=1) const;
-
+
Size size() const;
int type() const;
-
+
const MatOp* op;
int flags;
-
+
Mat a, b, c;
double alpha, beta;
Scalar s;
};
-
+
CV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b);
CV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s);
CV_EXPORTS MatExpr operator * (const MatExpr& e, double s);
CV_EXPORTS MatExpr operator * (double s, const MatExpr& e);
CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2);
-
+
CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b);
CV_EXPORTS MatExpr operator / (const Mat& a, double s);
CV_EXPORTS MatExpr operator / (double s, const Mat& a);
CV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e);
CV_EXPORTS MatExpr operator / (const MatExpr& e, double s);
CV_EXPORTS MatExpr operator / (double s, const MatExpr& e);
-CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2);
+CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2);
CV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b);
CV_EXPORTS MatExpr operator < (const Mat& a, double s);
CV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b);
CV_EXPORTS MatExpr operator > (const Mat& a, double s);
-CV_EXPORTS MatExpr operator > (double s, const Mat& a);
-
+CV_EXPORTS MatExpr operator > (double s, const Mat& a);
+
CV_EXPORTS MatExpr min(const Mat& a, const Mat& b);
CV_EXPORTS MatExpr min(const Mat& a, double s);
CV_EXPORTS MatExpr min(double s, const Mat& a);
template<typename _Tp> static inline MatExpr min(double s, const Mat_<_Tp>& a)
{
return cv::min((const Mat&)a, s);
-}
+}
template<typename _Tp> static inline MatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b)
{
template<typename _Tp> static inline MatExpr max(double s, const Mat_<_Tp>& a)
{
return cv::max((const Mat&)a, s);
-}
+}
template<typename _Tp> static inline void min(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c)
{
cv::max((const Mat&)a, s, (Mat&)c);
}
-
+
CV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b);
CV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s);
CV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a);
CV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a);
CV_EXPORTS MatExpr operator ~(const Mat& m);
-
+
CV_EXPORTS MatExpr abs(const Mat& m);
CV_EXPORTS MatExpr abs(const MatExpr& e);
-
+
template<typename _Tp> static inline MatExpr abs(const Mat_<_Tp>& m)
{
return cv::abs((const Mat&)m);
}
////////////////////////////// Augmenting algebraic operations //////////////////////////////////
-
+
inline Mat& Mat::operator = (const MatExpr& e)
{
e.op->assign(e, *this);
return *this;
-}
+}
template<typename _Tp> inline Mat_<_Tp>::Mat_(const MatExpr& e)
{
{
add(a, s, (Mat&)a);
return (Mat&)a;
-}
+}
template<typename _Tp> static inline
Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
{
add(a, s, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
+}
static inline Mat& operator += (const Mat& a, const MatExpr& b)
{
- b.op->augAssignAdd(b, (Mat&)a);
+ b.op->augAssignAdd(b, (Mat&)a);
return (Mat&)a;
}
b.op->augAssignAdd(b, (Mat&)a);
return (Mat_<_Tp>&)a;
}
-
+
static inline Mat& operator -= (const Mat& a, const Mat& b)
{
subtract(a, b, (Mat&)a);
{
subtract(a, s, (Mat&)a);
return (Mat&)a;
-}
+}
template<typename _Tp> static inline
Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
{
subtract(a, s, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
+}
static inline Mat& operator -= (const Mat& a, const MatExpr& b)
{
- b.op->augAssignSubtract(b, (Mat&)a);
+ b.op->augAssignSubtract(b, (Mat&)a);
return (Mat&)a;
}
{
b.op->augAssignSubtract(b, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
+}
static inline Mat& operator *= (const Mat& a, const Mat& b)
{
{
a.convertTo((Mat&)a, -1, s);
return (Mat&)a;
-}
+}
template<typename _Tp> static inline
Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
{
a.convertTo((Mat&)a, -1, s);
return (Mat_<_Tp>&)a;
-}
+}
static inline Mat& operator *= (const Mat& a, const MatExpr& b)
{
- b.op->augAssignMultiply(b, (Mat&)a);
+ b.op->augAssignMultiply(b, (Mat&)a);
return (Mat&)a;
}
{
b.op->augAssignMultiply(b, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
-
+}
+
static inline Mat& operator /= (const Mat& a, const Mat& b)
{
divide(a, b, (Mat&)a);
{
a.convertTo((Mat&)a, -1, 1./s);
return (Mat&)a;
-}
+}
template<typename _Tp> static inline
Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
{
a.convertTo((Mat&)a, -1, 1./s);
return (Mat_<_Tp>&)a;
-}
+}
static inline Mat& operator /= (const Mat& a, const MatExpr& b)
{
- b.op->augAssignDivide(b, (Mat&)a);
+ b.op->augAssignDivide(b, (Mat&)a);
return (Mat&)a;
}
{
bitwise_and(a, s, (Mat&)a);
return (Mat&)a;
-}
+}
template<typename _Tp> static inline Mat_<_Tp>&
operator &= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
{
bitwise_and(a, b, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
+}
template<typename _Tp> static inline Mat_<_Tp>&
operator &= (const Mat_<_Tp>& a, const Scalar& s)
{
bitwise_and(a, s, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
-
+}
+
static inline Mat& operator |= (const Mat& a, const Mat& b)
{
bitwise_or(a, b, (Mat&)a);
{
bitwise_or(a, s, (Mat&)a);
return (Mat&)a;
-}
+}
template<typename _Tp> static inline Mat_<_Tp>&
operator |= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
{
bitwise_or(a, b, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
+}
template<typename _Tp> static inline Mat_<_Tp>&
operator |= (const Mat_<_Tp>& a, const Scalar& s)
{
bitwise_or(a, s, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
-
+}
+
static inline Mat& operator ^= (const Mat& a, const Mat& b)
{
bitwise_xor(a, b, (Mat&)a);
{
bitwise_xor(a, s, (Mat&)a);
return (Mat&)a;
-}
+}
template<typename _Tp> static inline Mat_<_Tp>&
operator ^= (const Mat_<_Tp>& a, const Mat_<_Tp>& b)
{
bitwise_xor(a, b, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
+}
template<typename _Tp> static inline Mat_<_Tp>&
operator ^= (const Mat_<_Tp>& a, const Scalar& s)
{
bitwise_xor(a, s, (Mat&)a);
return (Mat_<_Tp>&)a;
-}
+}
/////////////////////////////// Miscellaneous operations //////////////////////////////
-
+
template<typename _Tp> void split(const Mat& src, vector<Mat_<_Tp> >& mv)
{ split(src, (vector<Mat>&)mv ); }
//////////////////////////////////////////////////////////////
-
+
template<typename _Tp> inline MatExpr Mat_<_Tp>::zeros(int rows, int cols)
{
return Mat::zeros(rows, cols, DataType<_Tp>::type);
}
-
+
template<typename _Tp> inline MatExpr Mat_<_Tp>::zeros(Size sz)
{
return Mat::zeros(sz, DataType<_Tp>::type);
-}
-
+}
+
template<typename _Tp> inline MatExpr Mat_<_Tp>::ones(int rows, int cols)
{
return Mat::ones(rows, cols, DataType<_Tp>::type);
template<typename _Tp> inline MatExpr Mat_<_Tp>::ones(Size sz)
{
return Mat::ones(sz, DataType<_Tp>::type);
-}
-
+}
+
template<typename _Tp> inline MatExpr Mat_<_Tp>::eye(int rows, int cols)
{
return Mat::eye(rows, cols, DataType<_Tp>::type);
template<typename _Tp> inline MatExpr Mat_<_Tp>::eye(Size sz)
{
return Mat::eye(sz, DataType<_Tp>::type);
-}
-
+}
+
//////////////////////////////// Iterators & Comma initializers //////////////////////////////////
inline MatConstIterator::MatConstIterator()
int idx[]={_pt.y, _pt.x};
seek(idx);
}
-
+
inline MatConstIterator::MatConstIterator(const MatConstIterator& it)
: m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd)
{}
}
inline uchar* MatConstIterator::operator *() const { return ptr; }
-
+
inline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs)
{
if( !m || ofs == 0 )
if( m && (ptr -= elemSize) < sliceStart )
{
ptr += elemSize;
- seek(-1, true);
+ seek(-1, true);
}
return *this;
}
if( m && (ptr += elemSize) >= sliceEnd )
{
ptr -= elemSize;
- seek(1, true);
+ seek(1, true);
}
return *this;
}
template<typename _Tp> inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, Point _pt)
: MatConstIterator_<_Tp>(_m, _pt) {}
-
+
template<typename _Tp> inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, const int* _idx)
: MatConstIterator_<_Tp>(_m, _idx) {}
-
+
template<typename _Tp> inline MatIterator_<_Tp>::MatIterator_(const MatIterator_& it)
: MatConstIterator_<_Tp>(it) {}
template<typename _Tp> static inline bool
operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b)
-{ return a.m != b.m || a.ptr != b.ptr; }
-
+{ return a.m != b.m || a.ptr != b.ptr; }
+
static inline bool
operator < (const MatConstIterator& a, const MatConstIterator& b)
{ return a.ptr < b.ptr; }
static inline bool
operator > (const MatConstIterator& a, const MatConstIterator& b)
{ return a.ptr > b.ptr; }
-
+
static inline bool
operator <= (const MatConstIterator& a, const MatConstIterator& b)
{ return a.ptr <= b.ptr; }
static inline MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs)
{ MatConstIterator b = a; return b += -ofs; }
-
+
template<typename _Tp> static inline MatConstIterator_<_Tp>
operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs)
{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; }
template<typename _Tp> static inline MatConstIterator_<_Tp>
operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a)
{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; }
-
+
template<typename _Tp> static inline MatConstIterator_<_Tp>
operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs)
{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatConstIterator_<_Tp>&)t; }
inline uchar* MatConstIterator::operator [](ptrdiff_t i) const
{ return *(*this + i); }
-
+
template<typename _Tp> inline _Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const
{ return *(_Tp*)MatConstIterator::operator [](i); }
template<typename _Tp> static inline MatIterator_<_Tp>
operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs)
{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatIterator_<_Tp>&)t; }
-
+
template<typename _Tp> inline _Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const
{ return *(*this + i); }
{
CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() );
return Mat_<_Tp>(*this->it.m);
-}
-
+}
+
template<typename _Tp, typename T2> static inline MatCommaInitializer_<_Tp>
operator << (const Mat_<_Tp>& m, T2 val)
{
}
-inline void SparseMat::assignTo( SparseMat& m, int type ) const
+inline void SparseMat::assignTo( SparseMat& m, int _type ) const
{
- if( type < 0 )
+ if( _type < 0 )
m = *this;
else
- convertTo(m, type);
+ convertTo(m, _type);
}
inline void SparseMat::addref()
template<typename _Tp> inline _Tp& SparseMat::ref(int i0, size_t* hashval)
{ return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval); }
-
+
template<typename _Tp> inline _Tp& SparseMat::ref(int i0, int i1, size_t* hashval)
{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval); }
{
const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval);
return p ? *p : _Tp();
-}
-
+}
+
template<typename _Tp> inline _Tp SparseMat::value(int i0, int i1, size_t* hashval) const
{
const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval);
template<typename _Tp> inline const _Tp* SparseMat::find(int i0, size_t* hashval) const
{ return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); }
-
+
template<typename _Tp> inline const _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const
{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); }
inline SparseMatIterator SparseMat::end()
{ SparseMatIterator it(this); it.seekEnd(); return it; }
-
+
inline SparseMatConstIterator SparseMat::end() const
{ SparseMatConstIterator it(this); it.seekEnd(); return it; }
-
+
template<typename _Tp> inline SparseMatIterator_<_Tp> SparseMat::begin()
{ return SparseMatIterator_<_Tp>(this); }
-
+
template<typename _Tp> inline SparseMatConstIterator_<_Tp> SparseMat::begin() const
{ return SparseMatConstIterator_<_Tp>(this); }
-
+
template<typename _Tp> inline SparseMatIterator_<_Tp> SparseMat::end()
{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; }
template<typename _Tp> inline SparseMatConstIterator_<_Tp> SparseMat::end() const
{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; }
-
-
+
+
inline SparseMatConstIterator::SparseMatConstIterator()
: m(0), hashidx(0), ptr(0)
{
return it;
}
-
+
inline void SparseMatConstIterator::seekEnd()
{
if( m && m->hdr )
ptr = 0;
}
}
-
+
inline SparseMatIterator::SparseMatIterator()
{}
template<typename _Tp> inline _Tp
SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const
-{ return SparseMat::value<_Tp>(i0, hashval); }
-
+{ return SparseMat::value<_Tp>(i0, hashval); }
+
template<typename _Tp> inline _Tp&
SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval)
{ return SparseMat::ref<_Tp>(i0, i1, hashval); }
template<typename _Tp> inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::end()
{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; }
-
+
template<typename _Tp> inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const
{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; }
SparseMatConstIterator::operator ++();
return it;
}
-
+
}
#endif
\r
#include "opencv2/core/core.hpp"\r
\r
-namespace cv \r
+namespace cv\r
{\r
- //! Smart pointer for OpenGL buffer memory with reference counting.\r
- class CV_EXPORTS GlBuffer\r
+//! Smart pointer for OpenGL buffer memory with reference counting.\r
+class CV_EXPORTS GlBuffer\r
+{\r
+public:\r
+ enum Usage\r
{\r
- public:\r
- enum Usage\r
- {\r
- ARRAY_BUFFER = 0x8892, // buffer will use for OpenGL arrays (vertices, colors, normals, etc)\r
- TEXTURE_BUFFER = 0x88EC // buffer will ise for OpenGL textures\r
- };\r
-\r
- //! create empty buffer\r
- explicit GlBuffer(Usage usage);\r
-\r
- //! create buffer\r
- GlBuffer(int rows, int cols, int type, Usage usage);\r
- GlBuffer(Size size, int type, Usage usage);\r
-\r
- //! copy from host/device memory\r
- GlBuffer(InputArray mat, Usage usage);\r
-\r
- void create(int rows, int cols, int type, Usage usage);\r
- inline void create(Size size, int type, Usage usage) { create(size.height, size.width, type, usage); }\r
- inline void create(int rows, int cols, int type) { create(rows, cols, type, usage()); }\r
- inline void create(Size size, int type) { create(size.height, size.width, type, usage()); }\r
-\r
- void release();\r
-\r
- //! copy from host/device memory\r
- void copyFrom(InputArray mat);\r
-\r
- void bind() const;\r
- void unbind() const;\r
-\r
- //! map to host memory\r
- Mat mapHost();\r
- void unmapHost();\r
-\r
- //! map to device memory\r
- gpu::GpuMat mapDevice();\r
- void unmapDevice();\r
- \r
- inline int rows() const { return rows_; }\r
- inline int cols() const { return cols_; }\r
- inline Size size() const { return Size(cols_, rows_); }\r
- inline bool empty() const { return rows_ == 0 || cols_ == 0; }\r
-\r
- inline int type() const { return type_; }\r
- inline int depth() const { return CV_MAT_DEPTH(type_); }\r
- inline int channels() const { return CV_MAT_CN(type_); }\r
- inline int elemSize() const { return CV_ELEM_SIZE(type_); }\r
- inline int elemSize1() const { return CV_ELEM_SIZE1(type_); }\r
-\r
- inline Usage usage() const { return usage_; }\r
-\r
- class Impl;\r
- private:\r
- int rows_;\r
- int cols_;\r
- int type_;\r
- Usage usage_;\r
-\r
- Ptr<Impl> impl_;\r
+ ARRAY_BUFFER = 0x8892, // buffer will use for OpenGL arrays (vertices, colors, normals, etc)\r
+ TEXTURE_BUFFER = 0x88EC // buffer will ise for OpenGL textures\r
};\r
\r
- template <> CV_EXPORTS void Ptr<GlBuffer::Impl>::delete_obj();\r
+ //! create empty buffer\r
+ explicit GlBuffer(Usage usage);\r
+\r
+ //! create buffer\r
+ GlBuffer(int rows, int cols, int type, Usage usage);\r
+ GlBuffer(Size size, int type, Usage usage);\r
+\r
+ //! copy from host/device memory\r
+ GlBuffer(InputArray mat, Usage usage);\r
+\r
+ void create(int rows, int cols, int type, Usage usage);\r
+ void create(Size size, int type, Usage usage);\r
+ void create(int rows, int cols, int type);\r
+ void create(Size size, int type);\r
+\r
+ void release();\r
+\r
+ //! copy from host/device memory\r
+ void copyFrom(InputArray mat);\r
+\r
+ void bind() const;\r
+ void unbind() const;\r
+\r
+ //! map to host memory\r
+ Mat mapHost();\r
+ void unmapHost();\r
+\r
+ //! map to device memory\r
+ gpu::GpuMat mapDevice();\r
+ void unmapDevice();\r
+\r
+ inline int rows() const { return rows_; }\r
+ inline int cols() const { return cols_; }\r
+ inline Size size() const { return Size(cols_, rows_); }\r
+ inline bool empty() const { return rows_ == 0 || cols_ == 0; }\r
+\r
+ inline int type() const { return type_; }\r
+ inline int depth() const { return CV_MAT_DEPTH(type_); }\r
+ inline int channels() const { return CV_MAT_CN(type_); }\r
+ inline int elemSize() const { return CV_ELEM_SIZE(type_); }\r
+ inline int elemSize1() const { return CV_ELEM_SIZE1(type_); }\r
+\r
+ inline Usage usage() const { return usage_; }\r
+\r
+ class Impl;\r
+private:\r
+ int rows_;\r
+ int cols_;\r
+ int type_;\r
+ Usage usage_;\r
+\r
+ Ptr<Impl> impl_;\r
+};\r
+\r
+template <> CV_EXPORTS void Ptr<GlBuffer::Impl>::delete_obj();\r
+\r
+//! Smart pointer for OpenGL 2d texture memory with reference counting.\r
+class CV_EXPORTS GlTexture\r
+{\r
+public:\r
+ //! create empty texture\r
+ GlTexture();\r
+\r
+ //! create texture\r
+ GlTexture(int rows, int cols, int type);\r
+ GlTexture(Size size, int type);\r
+\r
+ //! copy from host/device memory\r
+ explicit GlTexture(InputArray mat, bool bgra = true);\r
+\r
+ void create(int rows, int cols, int type);\r
+ void create(Size size, int type);\r
+ void release();\r
+\r
+ //! copy from host/device memory\r
+ void copyFrom(InputArray mat, bool bgra = true);\r
+\r
+ void bind() const;\r
+ void unbind() const;\r
+\r
+ inline int rows() const { return rows_; }\r
+ inline int cols() const { return cols_; }\r
+ inline Size size() const { return Size(cols_, rows_); }\r
+ inline bool empty() const { return rows_ == 0 || cols_ == 0; }\r
\r
- //! Smart pointer for OpenGL 2d texture memory with reference counting.\r
- class CV_EXPORTS GlTexture\r
+ inline int type() const { return type_; }\r
+ inline int depth() const { return CV_MAT_DEPTH(type_); }\r
+ inline int channels() const { return CV_MAT_CN(type_); }\r
+ inline int elemSize() const { return CV_ELEM_SIZE(type_); }\r
+ inline int elemSize1() const { return CV_ELEM_SIZE1(type_); }\r
+\r
+ class Impl;\r
+private:\r
+ int rows_;\r
+ int cols_;\r
+ int type_;\r
+\r
+ Ptr<Impl> impl_;\r
+ GlBuffer buf_;\r
+};\r
+\r
+template <> CV_EXPORTS void Ptr<GlTexture::Impl>::delete_obj();\r
+\r
+//! OpenGL Arrays\r
+class CV_EXPORTS GlArrays\r
+{\r
+public:\r
+ inline GlArrays()\r
+ : vertex_(GlBuffer::ARRAY_BUFFER), color_(GlBuffer::ARRAY_BUFFER), bgra_(true), normal_(GlBuffer::ARRAY_BUFFER), texCoord_(GlBuffer::ARRAY_BUFFER)\r
{\r
- public:\r
- //! create empty texture\r
- GlTexture();\r
-\r
- //! create texture\r
- GlTexture(int rows, int cols, int type);\r
- GlTexture(Size size, int type);\r
-\r
- //! copy from host/device memory\r
- explicit GlTexture(InputArray mat, bool bgra = true);\r
-\r
- void create(int rows, int cols, int type);\r
- inline void create(Size size, int type) { create(size.height, size.width, type); }\r
- void release();\r
-\r
- //! copy from host/device memory\r
- void copyFrom(InputArray mat, bool bgra = true);\r
-\r
- void bind() const;\r
- void unbind() const;\r
-\r
- inline int rows() const { return rows_; }\r
- inline int cols() const { return cols_; }\r
- inline Size size() const { return Size(cols_, rows_); }\r
- inline bool empty() const { return rows_ == 0 || cols_ == 0; }\r
-\r
- inline int type() const { return type_; }\r
- inline int depth() const { return CV_MAT_DEPTH(type_); }\r
- inline int channels() const { return CV_MAT_CN(type_); }\r
- inline int elemSize() const { return CV_ELEM_SIZE(type_); }\r
- inline int elemSize1() const { return CV_ELEM_SIZE1(type_); }\r
-\r
- class Impl;\r
- private:\r
- int rows_;\r
- int cols_;\r
- int type_;\r
-\r
- Ptr<Impl> impl_;\r
- GlBuffer buf_;\r
- };\r
+ }\r
+\r
+ void setVertexArray(InputArray vertex);\r
+ inline void resetVertexArray() { vertex_.release(); }\r
+\r
+ void setColorArray(InputArray color, bool bgra = true);\r
+ inline void resetColorArray() { color_.release(); }\r
+\r
+ void setNormalArray(InputArray normal);\r
+ inline void resetNormalArray() { normal_.release(); }\r
+\r
+ void setTexCoordArray(InputArray texCoord);\r
+ inline void resetTexCoordArray() { texCoord_.release(); }\r
+\r
+ void bind() const;\r
+ void unbind() const;\r
\r
- template <> CV_EXPORTS void Ptr<GlTexture::Impl>::delete_obj();\r
+ inline int rows() const { return vertex_.rows(); }\r
+ inline int cols() const { return vertex_.cols(); }\r
+ inline Size size() const { return vertex_.size(); }\r
+ inline bool empty() const { return vertex_.empty(); }\r
\r
- //! OpenGL Arrays\r
- class CV_EXPORTS GlArrays\r
+private:\r
+ GlBuffer vertex_;\r
+ GlBuffer color_;\r
+ bool bgra_;\r
+ GlBuffer normal_;\r
+ GlBuffer texCoord_;\r
+};\r
+\r
+//! OpenGL Font\r
+class CV_EXPORTS GlFont\r
+{\r
+public:\r
+ enum Weight\r
{\r
- public:\r
- inline GlArrays() \r
- : vertex_(GlBuffer::ARRAY_BUFFER), color_(GlBuffer::ARRAY_BUFFER), bgra_(true), normal_(GlBuffer::ARRAY_BUFFER), texCoord_(GlBuffer::ARRAY_BUFFER)\r
- {\r
- }\r
-\r
- void setVertexArray(InputArray vertex);\r
- inline void resetVertexArray() { vertex_.release(); }\r
-\r
- void setColorArray(InputArray color, bool bgra = true);\r
- inline void resetColorArray() { color_.release(); }\r
- \r
- void setNormalArray(InputArray normal);\r
- inline void resetNormalArray() { normal_.release(); }\r
- \r
- void setTexCoordArray(InputArray texCoord);\r
- inline void resetTexCoordArray() { texCoord_.release(); }\r
-\r
- void bind() const;\r
- void unbind() const;\r
-\r
- inline int rows() const { return vertex_.rows(); }\r
- inline int cols() const { return vertex_.cols(); }\r
- inline Size size() const { return vertex_.size(); }\r
- inline bool empty() const { return vertex_.empty(); }\r
-\r
- private:\r
- GlBuffer vertex_;\r
- GlBuffer color_;\r
- bool bgra_;\r
- GlBuffer normal_;\r
- GlBuffer texCoord_;\r
+ WEIGHT_LIGHT = 300,\r
+ WEIGHT_NORMAL = 400,\r
+ WEIGHT_SEMIBOLD = 600,\r
+ WEIGHT_BOLD = 700,\r
+ WEIGHT_BLACK = 900\r
};\r
\r
- //! OpenGL Font\r
- class CV_EXPORTS GlFont\r
+ enum Style\r
{\r
- public:\r
- enum Weight \r
- {\r
- WEIGHT_LIGHT = 300,\r
- WEIGHT_NORMAL = 400,\r
- WEIGHT_SEMIBOLD = 600,\r
- WEIGHT_BOLD = 700,\r
- WEIGHT_BLACK = 900\r
- };\r
-\r
- enum Style \r
- { \r
- STYLE_NORMAL = 0,\r
- STYLE_ITALIC = 1,\r
- STYLE_UNDERLINE = 2\r
- };\r
-\r
- static Ptr<GlFont> get(const std::string& family, int height = 12, Weight weight = WEIGHT_NORMAL, Style style = STYLE_NORMAL);\r
-\r
- void draw(const char* str, int len) const;\r
-\r
- inline const std::string& family() const { return family_; }\r
- inline int height() const { return height_; }\r
- inline Weight weight() const { return weight_; }\r
- inline Style style() const { return style_; }\r
-\r
- private:\r
- GlFont(const std::string& family, int height, Weight weight, Style style);\r
-\r
- std::string family_;\r
- int height_;\r
- Weight weight_;\r
- Style style_;\r
-\r
- unsigned int base_;\r
-\r
- GlFont(const GlFont&);\r
- GlFont& operator =(const GlFont&);\r
+ STYLE_NORMAL = 0,\r
+ STYLE_ITALIC = 1,\r
+ STYLE_UNDERLINE = 2\r
};\r
\r
- //! render functions\r
-\r
- //! render texture rectangle in window\r
- CV_EXPORTS void render(const GlTexture& tex, \r
- Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0), \r
- Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0));\r
-\r
- //! render mode\r
- namespace RenderMode {\r
- enum {\r
- POINTS = 0x0000,\r
- LINES = 0x0001,\r
- LINE_LOOP = 0x0002,\r
- LINE_STRIP = 0x0003,\r
- TRIANGLES = 0x0004,\r
- TRIANGLE_STRIP = 0x0005,\r
- TRIANGLE_FAN = 0x0006,\r
- QUADS = 0x0007,\r
- QUAD_STRIP = 0x0008,\r
- POLYGON = 0x0009\r
- };\r
- }\r
+ static Ptr<GlFont> get(const std::string& family, int height = 12, Weight weight = WEIGHT_NORMAL, Style style = STYLE_NORMAL);\r
+\r
+ void draw(const char* str, int len) const;\r
+\r
+ inline const std::string& family() const { return family_; }\r
+ inline int height() const { return height_; }\r
+ inline Weight weight() const { return weight_; }\r
+ inline Style style() const { return style_; }\r
+\r
+private:\r
+ GlFont(const std::string& family, int height, Weight weight, Style style);\r
+\r
+ std::string family_;\r
+ int height_;\r
+ Weight weight_;\r
+ Style style_;\r
+\r
+ unsigned int base_;\r
+\r
+ GlFont(const GlFont&);\r
+ GlFont& operator =(const GlFont&);\r
+};\r
+\r
+//! render functions\r
+\r
+//! render texture rectangle in window\r
+CV_EXPORTS void render(const GlTexture& tex,\r
+ Rect_<double> wndRect = Rect_<double>(0.0, 0.0, 1.0, 1.0),\r
+ Rect_<double> texRect = Rect_<double>(0.0, 0.0, 1.0, 1.0));\r
+\r
+//! render mode\r
+namespace RenderMode {\r
+ enum {\r
+ POINTS = 0x0000,\r
+ LINES = 0x0001,\r
+ LINE_LOOP = 0x0002,\r
+ LINE_STRIP = 0x0003,\r
+ TRIANGLES = 0x0004,\r
+ TRIANGLE_STRIP = 0x0005,\r
+ TRIANGLE_FAN = 0x0006,\r
+ QUADS = 0x0007,\r
+ QUAD_STRIP = 0x0008,\r
+ POLYGON = 0x0009\r
+ };\r
+}\r
\r
- //! render OpenGL arrays\r
- CV_EXPORTS void render(const GlArrays& arr, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255));\r
+//! render OpenGL arrays\r
+CV_EXPORTS void render(const GlArrays& arr, int mode = RenderMode::POINTS, Scalar color = Scalar::all(255));\r
\r
- CV_EXPORTS void render(const std::string& str, const Ptr<GlFont>& font, Scalar color, Point2d pos);\r
+CV_EXPORTS void render(const std::string& str, const Ptr<GlFont>& font, Scalar color, Point2d pos);\r
\r
- //! OpenGL camera\r
- class CV_EXPORTS GlCamera\r
- {\r
- public:\r
- GlCamera();\r
+//! OpenGL camera\r
+class CV_EXPORTS GlCamera\r
+{\r
+public:\r
+ GlCamera();\r
\r
- void lookAt(Point3d eye, Point3d center, Point3d up);\r
- void setCameraPos(Point3d pos, double yaw, double pitch, double roll);\r
+ void lookAt(Point3d eye, Point3d center, Point3d up);\r
+ void setCameraPos(Point3d pos, double yaw, double pitch, double roll);\r
\r
- void setScale(Point3d scale);\r
+ void setScale(Point3d scale);\r
\r
- void setProjectionMatrix(const Mat& projectionMatrix, bool transpose = true);\r
- void setPerspectiveProjection(double fov, double aspect, double zNear, double zFar);\r
- void setOrthoProjection(double left, double right, double bottom, double top, double zNear, double zFar);\r
+ void setProjectionMatrix(const Mat& projectionMatrix, bool transpose = true);\r
+ void setPerspectiveProjection(double fov, double aspect, double zNear, double zFar);\r
+ void setOrthoProjection(double left, double right, double bottom, double top, double zNear, double zFar);\r
\r
- void setupProjectionMatrix() const;\r
- void setupModelViewMatrix() const;\r
+ void setupProjectionMatrix() const;\r
+ void setupModelViewMatrix() const;\r
\r
- private:\r
- Point3d eye_;\r
- Point3d center_;\r
- Point3d up_;\r
+private:\r
+ Point3d eye_;\r
+ Point3d center_;\r
+ Point3d up_;\r
\r
- Point3d pos_;\r
- double yaw_;\r
- double pitch_;\r
- double roll_;\r
+ Point3d pos_;\r
+ double yaw_;\r
+ double pitch_;\r
+ double roll_;\r
\r
- bool useLookAtParams_;\r
+ bool useLookAtParams_;\r
\r
- Point3d scale_;\r
+ Point3d scale_;\r
\r
- Mat projectionMatrix_;\r
+ Mat projectionMatrix_;\r
\r
- double fov_;\r
- double aspect_;\r
+ double fov_;\r
+ double aspect_;\r
\r
- double left_;\r
- double right_;\r
- double bottom_;\r
- double top_;\r
+ double left_;\r
+ double right_;\r
+ double bottom_;\r
+ double top_;\r
\r
- double zNear_;\r
- double zFar_;\r
+ double zNear_;\r
+ double zFar_;\r
\r
- bool perspectiveProjection_;\r
- };\r
+ bool perspectiveProjection_;\r
+};\r
\r
- namespace gpu \r
- {\r
- //! set a CUDA device to use OpenGL interoperability\r
- CV_EXPORTS void setGlDevice(int device = 0);\r
- }\r
+inline void GlBuffer::create(Size _size, int _type, Usage _usage) { create(_size.height, _size.width, _type, _usage); }\r
+inline void GlBuffer::create(int _rows, int _cols, int _type) { create(_rows, _cols, _type, usage()); }\r
+inline void GlBuffer::create(Size _size, int _type) { create(_size.height, _size.width, _type, usage()); }\r
+inline void GlTexture::create(Size _size, int _type) { create(_size.height, _size.width, _type); }\r
+\r
+namespace gpu\r
+{\r
+ //! set a CUDA device to use OpenGL interoperability\r
+ CV_EXPORTS void setGlDevice(int device = 0);\r
+}\r
} // namespace cv\r
\r
#endif // __cplusplus\r
#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32) // atomic increment on the linux version of the Intel(tm) compiler
#define CV_XADD(addr,delta) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(addr)), delta)
#elif defined __GNUC__
-
+
#if __GNUC__*10 + __GNUC_MINOR__ >= 42
#if !defined WIN32 && (defined __i486__ || defined __i586__ || \
#define CV_XADD __exchange_and_add
#endif
#endif
-
+
#elif defined WIN32 || defined _WIN32
#define WIN32_MEAN_AND_LEAN
#ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?)
#else
static inline int CV_XADD(int* addr, int delta)
- { int tmp = *addr; *addr += delta; return tmp; }
+ { int tmp = *addr; *addr += delta; return tmp; }
#endif
#include <limits>
namespace cv
{
-
+
using std::cos;
using std::sin;
using std::max;
using std::pow;
using std::sqrt;
-
+
/////////////// saturate_cast (used in image & signal processing) ///////////////////
template<typename _Tp> static inline _Tp saturate_cast(uchar v) { return _Tp(v); }
// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc.
template<> inline unsigned saturate_cast<unsigned>(float v){ return cvRound(v); }
template<> inline unsigned saturate_cast<unsigned>(double v) { return cvRound(v); }
-
+
inline int fast_abs(uchar v) { return v; }
inline int fast_abs(schar v) { return std::abs((int)v); }
inline int fast_abs(ushort v) { return v; }
for(int i = 10; i < channels; i++) val[i] = _Tp(0);
}
-
+
template<typename _Tp, int m, int n>
inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
_Tp v4, _Tp v5, _Tp v6, _Tp v7,
return s;
}
-
+
template<typename _Tp, int m, int n> inline double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const
{
double s = 0;
cv::randu(matM, Scalar(a), Scalar(b));
return M;
}
-
+
template<typename _Tp, int m, int n> inline
Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b)
{
cv::randn(matM, Scalar(a), Scalar(b));
return M;
}
-
+
template<typename _Tp, int m, int n> template<typename T2>
inline Matx<_Tp, m, n>::operator Matx<T2, m, n>() const
{
for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast<T2>(val[i]);
return M;
}
-
+
template<typename _Tp, int m, int n> template<int m1, int n1> inline
Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const
return Matx<_Tp, 1, n>(&val[i*n]);
}
-
+
template<typename _Tp, int m, int n> inline
Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const
{
return v;
}
-
+
template<typename _Tp, int m, int n> inline
typename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const
{
return d;
}
-
+
template<typename _Tp, int m, int n> inline
const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const
{
return this->val[i*n + j];
}
-
+
template<typename _Tp, int m, int n> inline
_Tp& Matx<_Tp, m, n>::operator ()(int i, int j)
{
return val[i];
}
-
+
template<typename _Tp1, typename _Tp2, int m, int n> static inline
Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b)
{
for( int i = 0; i < m*n; i++ )
a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]);
return a;
-}
+}
+
-
template<typename _Tp1, typename _Tp2, int m, int n> static inline
Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b)
{
for( int i = 0; i < m*n; i++ )
a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]);
return a;
-}
+}
template<typename _Tp, int m, int n> inline
val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]);
}
-
+
template<typename _Tp, int m, int n> inline
Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp)
{
for( int i = 0; i < m*n; i++ )
val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]);
}
-
-
+
+
template<typename _Tp, int m, int n> template<typename _T2> inline
Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp)
{
for( int i = 0; i < m*n; i++ )
val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
}
-
-
+
+
template<typename _Tp, int m, int n> inline
Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp)
{
for( int i = 0; i < m*n; i++ )
val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]);
}
-
-
+
+
template<typename _Tp, int m, int n> template<int l> inline
Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp)
{
val[i*n + j] = s;
}
}
-
-
+
+
template<typename _Tp, int m, int n> inline
Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp)
{
val[i*n + j] = a(j, i);
}
-
+
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
{
return Matx<_Tp, m, n>(a, b, Matx_AddOp());
}
-
-
+
+
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
{
return Matx<_Tp, m, n>(a, b, Matx_SubOp());
-}
-
+}
+
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha)
for( int i = 0; i < m*n; i++ )
a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
return a;
-}
-
+}
+
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha)
{
for( int i = 0; i < m*n; i++ )
a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
return a;
-}
+}
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha)
for( int i = 0; i < m*n; i++ )
a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
return a;
-}
+}
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha)
{
return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}
+}
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha)
{
return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}
+}
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha)
{
return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}
-
+}
+
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a)
{
return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}
+}
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a)
{
return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}
+}
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a)
{
return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}
-
+}
+
template<typename _Tp, int m, int n> static inline
Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a)
{
return Matx<_Tp, m, n>(a, b, Matx_MatMulOp());
}
-
+
template<typename _Tp, int m, int n> static inline
Vec<_Tp, m> operator * (const Matx<_Tp, m, n>& a, const Vec<_Tp, n>& b)
{
Matx<_Tp, m, 1> c(a, b, Matx_MatMulOp());
return reinterpret_cast<const Vec<_Tp, m>&>(c);
}
-
-
+
+
template<typename _Tp> static inline
Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b)
{
return Point_<_Tp>(tmp.val[0], tmp.val[1]);
}
-
+
template<typename _Tp> static inline
Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b)
{
Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, b.z);
return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]);
-}
+}
template<typename _Tp> static inline
{
Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, 1);
return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]);
-}
+}
+
-
template<typename _Tp> static inline
Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b)
{
return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1);
-}
+}
template<typename _Tp> static inline
return reinterpret_cast<const Scalar&>(c);
}
-
+
static inline
Scalar operator * (const Matx<double, 4, 4>& a, const Scalar& b)
{
return reinterpret_cast<const Scalar&>(c);
}
-
+
template<typename _Tp, int m, int n> inline
Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const
{
return Matx<_Tp, m, n>(*this, a, Matx_MulOp());
}
-
+
CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n);
CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n);
CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n);
-CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);
+CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);
template<typename _Tp, int m> struct CV_EXPORTS Matx_DetOp
return p;
}
};
-
+
template<typename _Tp> struct CV_EXPORTS Matx_DetOp<_Tp, 1>
{
a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1));
}
};
-
+
template<typename _Tp, int m> static inline
double determinant(const Matx<_Tp, m, m>& a)
{
- return Matx_DetOp<_Tp, m>()(a);
+ return Matx_DetOp<_Tp, m>()(a);
}
-
+
template<typename _Tp, int m, int n> static inline
double trace(const Matx<_Tp, m, n>& a)
for( int i = 0; i < std::min(m, n); i++ )
s += a(i,i);
return s;
-}
+}
+
-
template<typename _Tp, int m, int n> inline
Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const
{
bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const
{
Matx<_Tp, m, m> temp = a;
-
+
// assume that b is all 0's on input => make it a unity matrix
for( int i = 0; i < m; i++ )
b(i, i) = (_Tp)1;
-
+
if( method == DECOMP_CHOLESKY )
return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m);
-
+
return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0;
}
};
-
+
template<typename _Tp> struct CV_EXPORTS Matx_FastInvOp<_Tp, 2>
{
bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const
}
};
-
+
template<typename _Tp> struct CV_EXPORTS Matx_FastInvOp<_Tp, 3>
{
bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const
b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d;
b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d;
b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d;
-
+
b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d;
b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d;
b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d;
-
+
b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d;
b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d;
b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d;
}
};
-
+
template<typename _Tp, int m, int n> inline
Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method) const
{
x = b;
if( method == DECOMP_CHOLESKY )
return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n);
-
+
return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0;
}
};
}
};
-
+
template<typename _Tp> struct CV_EXPORTS Matx_FastSolveOp<_Tp, 3, 1>
{
bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b,
x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) -
a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) +
a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2)));
-
+
x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) -
b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) +
a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0)));
-
+
x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) -
a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) +
b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0)));
return true;
}
};
-
-
+
+
template<typename _Tp, int m, int n> template<int l> inline
Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const
{
return ok ? x : Matx<_Tp, n, l>::zeros();
}
-template<typename _Tp, int m, int n> inline
+template<typename _Tp, int m, int n> inline
Vec<_Tp, n> Matx<_Tp, m, n>::solve(const Vec<_Tp, m>& rhs, int method) const
{
Matx<_Tp, n, 1> x = solve(reinterpret_cast<const Matx<_Tp, m, 1>&>(rhs), method);
return reinterpret_cast<Vec<_Tp, n>&>(x);
}
-
+
template<typename _Tp, typename _AccTp> static inline
_AccTp normL2Sqr(const _Tp* a, int n)
{
s = std::max(s, (_AccTp)fast_abs(a[i]));
return s;
}
-
-
+
+
template<typename _Tp, typename _AccTp> static inline
_AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n)
{
#if CV_ENABLE_UNROLLED
for(; i <= n - 4; i += 4 )
{
- _AccTp v0 = a[i] - b[i], v1 = a[i+1] - b[i+1], v2 = a[i+2] - b[i+2], v3 = a[i+3] - b[i+3];
+ _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
s += v0*v0 + v1*v1 + v2*v2 + v3*v3;
}
#endif
for( ; i < n; i++ )
{
- _AccTp v = (_AccTp)(a[i] - b[i]);
+ _AccTp v = _AccTp(a[i] - b[i]);
s += v*v;
}
return s;
CV_EXPORTS int normL1_(const uchar* a, const uchar* b, int n);
CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n);
CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n, int cellSize);
-
+
template<> inline float normL2Sqr(const float* a, const float* b, int n)
{
if( n >= 8 )
return s;
}
-
+
template<typename _Tp, typename _AccTp> static inline
_AccTp normL1(const _Tp* a, const _Tp* b, int n)
{
#if CV_ENABLE_UNROLLED
for(; i <= n - 4; i += 4 )
{
- _AccTp v0 = a[i] - b[i], v1 = a[i+1] - b[i+1], v2 = a[i+2] - b[i+2], v3 = a[i+3] - b[i+3];
+ _AccTp v0 = _AccTp(a[i] - b[i]), v1 = _AccTp(a[i+1] - b[i+1]), v2 = _AccTp(a[i+2] - b[i+2]), v3 = _AccTp(a[i+3] - b[i+3]);
s += std::abs(v0) + std::abs(v1) + std::abs(v2) + std::abs(v3);
}
#endif
for( ; i < n; i++ )
{
- _AccTp v = (_AccTp)(a[i] - b[i]);
+ _AccTp v = _AccTp(a[i] - b[i]);
s += std::abs(v);
}
return s;
template<> inline int normL1(const uchar* a, const uchar* b, int n)
{
return normL1_(a, b, n);
-}
+}
template<typename _Tp, typename _AccTp> static inline
_AccTp normInf(const _Tp* a, const _Tp* b, int n)
}
return s;
}
-
+
template<typename _Tp, int m, int n> static inline
double norm(const Matx<_Tp, m, n>& M)
return std::sqrt(normL2Sqr<_Tp, double>(M.val, m*n));
}
-
+
template<typename _Tp, int m, int n> static inline
double norm(const Matx<_Tp, m, n>& M, int normType)
{
normType == NORM_L1 ? (double)normL1<_Tp, DataType<_Tp>::work_type>(M.val, m*n) :
std::sqrt((double)normL2Sqr<_Tp, DataType<_Tp>::work_type>(M.val, m*n));
}
-
-
+
+
template<typename _Tp, int m, int n> static inline
bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
{
if( a.val[i] != b.val[i] ) return false;
return true;
}
-
+
template<typename _Tp, int m, int n> static inline
bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
{
{
CV_DbgAssert( idx == n*m );
return *dst;
-}
+}
/////////////////////////// short vector (Vec) /////////////////////////////
_Tp v8, _Tp v9)
: Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9)
{}
-
+
template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(const _Tp* values)
: Matx<_Tp, cn, 1>(values)
{}
-
+
template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m)
: Matx<_Tp, cn, 1>(m.val)
template<typename _Tp, int cn> template<typename _T2> inline
Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op)
: Matx<_Tp, cn, 1>(a, alpha, op)
-{}
-
+{}
+
template<typename _Tp, int cn> inline Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha)
{
Vec v;
template<typename _Tp> Vec<_Tp, 4> conjugate(const Vec<_Tp, 4>& v)
{
return Vec<_Tp, 4>(v[0], -v[1], -v[2], -v[3]);
-}
-
+}
+
template<> inline Vec<float, 2> Vec<float, 2>::conj() const
{
return conjugate(*this);
{
return conjugate(*this);
}
-
+
template<typename _Tp, int cn> inline Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>& v) const
{
CV_Error(CV_StsError, "for arbitrary-size vector there is no cross-product defined");
return Vec<_Tp, cn>();
}
-
+
template<typename _Tp, int cn> template<typename T2>
inline Vec<_Tp, cn>::operator Vec<T2, cn>() const
{
CV_DbgAssert( (unsigned)i < (unsigned)cn );
return this->val[i];
}
-
+
template<typename _Tp, int cn> inline _Tp& Vec<_Tp, cn>::operator [](int i)
{
CV_DbgAssert( (unsigned)i < (unsigned)cn );
{
CV_DbgAssert( (unsigned)i < (unsigned)cn );
return this->val[i];
-}
-
+}
+
template<typename _Tp1, typename _Tp2, int cn> static inline Vec<_Tp1, cn>&
operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b)
{
for( int i = 0; i < cn; i++ )
a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]);
return a;
-}
+}
template<typename _Tp1, typename _Tp2, int cn> static inline Vec<_Tp1, cn>&
operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b)
for( int i = 0; i < cn; i++ )
a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]);
return a;
-}
-
+}
+
template<typename _Tp, int cn> static inline Vec<_Tp, cn>
operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b)
{
a[i] = saturate_cast<_Tp>(a[i]*alpha);
return a;
}
-
+
template<typename _Tp, int cn> static inline
Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha)
{
a[i] = saturate_cast<_Tp>(a[i]*ialpha);
return a;
}
-
+
template<typename _Tp, int cn> static inline
Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, float alpha)
{
for( int i = 0; i < cn; i++ )
a[i] = saturate_cast<_Tp>(a[i]*ialpha);
return a;
-}
-
+}
+
template<typename _Tp, int cn> static inline Vec<_Tp, cn>
operator * (const Vec<_Tp, cn>& a, int alpha)
{
operator * (double alpha, const Vec<_Tp, cn>& a)
{
return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());
-}
+}
template<typename _Tp, int cn> static inline Vec<_Tp, cn>
operator / (const Vec<_Tp, cn>& a, int alpha)
operator / (const Vec<_Tp, cn>& a, float alpha)
{
return Vec<_Tp, cn>(a, 1.f/alpha, Matx_ScaleOp());
-}
+}
template<typename _Tp, int cn> static inline Vec<_Tp, cn>
operator / (const Vec<_Tp, cn>& a, double alpha)
{
return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp());
-}
-
+}
+
template<typename _Tp, int cn> static inline Vec<_Tp, cn>
operator - (const Vec<_Tp, cn>& a)
{
saturate_cast<_Tp>(v1[0]*v2[2] - v1[1]*v2[3] + v1[2]*v2[0] + v1[3]*v2[1]),
saturate_cast<_Tp>(v1[0]*v2[3] + v1[1]*v2[2] - v1[2]*v2[1] + v1[3]*v2[0]));
}
-
+
template<typename _Tp> inline Vec<_Tp, 4>& operator *= (Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2)
{
v1 = v1 * v2;
return v1;
}
-
+
template<> inline Vec<float, 3> Vec<float, 3>::cross(const Vec<float, 3>& v) const
{
return Vec<float,3>(val[1]*v.val[2] - val[2]*v.val[1],
double nv = norm(v);
return v * (nv ? 1./nv : 0.);
}
-
+
template<typename _Tp, typename _T2, int cn> static inline
VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val)
{
VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec);
return (commaInitializer, val);
}
-
+
template<typename _Tp, int cn> inline
VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec)
: MatxCommaInitializer<_Tp, cn, 1>(_vec)
{
CV_DbgAssert( this->idx == cn );
return *this->dst;
-}
+}
//////////////////////////////// Complex //////////////////////////////
template<typename _Tp> static inline
bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b)
-{ return a.re != b.re || a.im != b.im; }
-
+{ return a.re != b.re || a.im != b.im; }
+
template<typename _Tp> static inline
Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b)
{ return Complex<_Tp>( a.re + b.re, a.im + b.im ); }
template<typename _Tp> inline double Point_<_Tp>::cross(const Point_& pt) const
{ return (double)x*pt.y - (double)y*pt.x; }
-
+
template<typename _Tp> static inline Point_<_Tp>&
operator += (Point_<_Tp>& a, const Point_<_Tp>& b)
{
a.x = saturate_cast<_Tp>(a.x*b);
a.y = saturate_cast<_Tp>(a.y*b);
return a;
-}
-
+}
+
template<typename _Tp> static inline double norm(const Point_<_Tp>& pt)
{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); }
template<typename _Tp> static inline Point_<_Tp> operator * (int a, const Point_<_Tp>& b)
{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); }
-
+
template<typename _Tp> static inline Point_<_Tp> operator * (const Point_<_Tp>& a, float b)
{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); }
{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); }
template<typename _Tp> static inline Point_<_Tp> operator * (double a, const Point_<_Tp>& b)
-{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); }
-
+{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); }
+
//////////////////////////////// 3D Point ////////////////////////////////
template<typename _Tp> inline Point3_<_Tp>::Point3_() : x(0), y(0), z(0) {}
{ return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); }
template<typename _Tp> inline double Point3_<_Tp>::ddot(const Point3_& pt) const
{ return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; }
-
+
template<typename _Tp> inline Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const
{
return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x);
a.z = saturate_cast<_Tp>(a.z + b.z);
return a;
}
-
+
template<typename _Tp> static inline Point3_<_Tp>&
operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b)
{
a.y = saturate_cast<_Tp>(a.y - b.y);
a.z = saturate_cast<_Tp>(a.z - b.z);
return a;
-}
-
+}
+
template<typename _Tp> static inline Point3_<_Tp>&
operator *= (Point3_<_Tp>& a, int b)
{
a.y = saturate_cast<_Tp>(a.y*b);
a.z = saturate_cast<_Tp>(a.z*b);
return a;
-}
-
+}
+
template<typename _Tp> static inline double norm(const Point3_<_Tp>& pt)
{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); }
template<typename _Tp> static inline bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b)
{ return a.x != b.x || a.y != b.y || a.z != b.z; }
-
+
template<typename _Tp> static inline Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b)
{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x),
saturate_cast<_Tp>(a.y + b.y),
{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a),
saturate_cast<_Tp>(b.y*a),
saturate_cast<_Tp>(b.z*a) ); }
-
+
//////////////////////////////// Size ////////////////////////////////
template<typename _Tp> inline Size_<_Tp>::Size_()
template<typename _Tp> static inline bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
{
return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height;
-}
-
+}
+
template<typename _Tp> static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b)
{
return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height );
CvBox2D box; box.center = center; box.size = size; box.angle = angle;
return box;
}
-
+
//////////////////////////////// Scalar_ ///////////////////////////////
template<typename _Tp> inline Scalar_<_Tp>::Scalar_()
saturate_cast<_Tp>(-a.val[2]), saturate_cast<_Tp>(-a.val[3]));
}
-
+
template<typename _Tp> static inline Scalar_<_Tp>
operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
{
saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]),
saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0]));
}
-
+
template<typename _Tp> static inline Scalar_<_Tp>&
operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
{
a = a*b;
return a;
-}
-
+}
+
template<typename _Tp> inline Scalar_<_Tp> Scalar_<_Tp>::conj() const
{
return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0]),
{
return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0;
}
-
+
template<typename _Tp> static inline
Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha)
{
saturate_cast<_Tp>(a.val[1] / alpha),
saturate_cast<_Tp>(a.val[2] / alpha),
saturate_cast<_Tp>(a.val[3] / alpha));
-}
+}
template<typename _Tp> static inline
Scalar_<float> operator / (const Scalar_<float>& a, float alpha)
{
float s = 1/alpha;
return Scalar_<float>(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s);
-}
+}
template<typename _Tp> static inline
Scalar_<double> operator / (const Scalar_<double>& a, double alpha)
{
double s = 1/alpha;
return Scalar_<double>(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s);
-}
-
+}
+
template<typename _Tp> static inline
Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha)
{
a = a/alpha;
return a;
}
-
+
template<typename _Tp> static inline
Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b)
{
_Tp s = a/(b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]);
return b.conj()*s;
-}
-
+}
+
template<typename _Tp> static inline
Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
{
a = a/b;
return a;
}
-
+
//////////////////////////////// Range /////////////////////////////////
inline Range::Range() : start(0), end(0) {}
inline Range::operator CvSlice() const
{ return *this != Range::all() ? cvSlice(start, end) : CV_WHOLE_SEQ; }
-
-
+
+
//////////////////////////////// Vector ////////////////////////////////
// template vector class. It is similar to STL's vector,
typedef const _Tp* const_iterator;
typedef _Tp& reference;
typedef const _Tp& const_reference;
-
+
struct CV_EXPORTS Hdr
{
Hdr() : data(0), datastart(0), refcount(0), size(0), capacity(0) {};
size_t size;
size_t capacity;
};
-
+
Vector() {}
Vector(size_t _size) { resize(_size); }
Vector(size_t _size, const _Tp& val)
}
Vector(_Tp* _data, size_t _size, bool _copyData=false)
{ set(_data, _size, _copyData); }
-
+
template<int n> Vector(const Vec<_Tp, n>& vec)
- { set((_Tp*)&vec.val[0], n, true); }
-
+ { set((_Tp*)&vec.val[0], n, true); }
+
Vector(const std::vector<_Tp>& vec, bool _copyData=false)
- { set(!vec.empty() ? (_Tp*)&vec[0] : 0, vec.size(), _copyData); }
-
+ { set(!vec.empty() ? (_Tp*)&vec[0] : 0, vec.size(), _copyData); }
+
Vector(const Vector& d) { *this = d; }
-
+
Vector(const Vector& d, const Range& r_)
{
Range r = r_ == Range::all() ? Range(0, d.size()) : r_;
hdr.capacity = hdr.size = r.size();
}
}
-
+
Vector<_Tp>& operator = (const Vector& d)
{
if( this != &d )
}
return *this;
}
-
+
~Vector() { release(); }
-
+
Vector<_Tp> clone() const
{ return hdr.data ? Vector<_Tp>(hdr.data, hdr.size, true) : Vector<_Tp>(); }
-
+
void copyTo(Vector<_Tp>& vec) const
{
size_t i, sz = size();
for( i = 0; i < sz; i++ )
dst[i] = src[i];
}
-
+
void copyTo(std::vector<_Tp>& vec) const
{
size_t i, sz = size();
for( i = 0; i < sz; i++ )
dst[i] = src[i];
}
-
+
operator CvMat() const
{ return cvMat((int)size(), 1, type(), (void*)hdr.data); }
-
+
_Tp& operator [] (size_t i) { CV_DbgAssert( i < size() ); return hdr.data[i]; }
const _Tp& operator [] (size_t i) const { CV_DbgAssert( i < size() ); return hdr.data[i]; }
Vector operator() (const Range& r) const { return Vector(*this, r); }
const _Tp& back() const { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; }
_Tp& front() { CV_DbgAssert(!empty()); return hdr.data[0]; }
const _Tp& front() const { CV_DbgAssert(!empty()); return hdr.data[0]; }
-
+
_Tp* begin() { return hdr.data; }
_Tp* end() { return hdr.data + hdr.size; }
const _Tp* begin() const { return hdr.data; }
const _Tp* end() const { return hdr.data + hdr.size; }
-
+
void addref() { if( hdr.refcount ) CV_XADD(hdr.refcount, 1); }
void release()
{
}
hdr = Hdr();
}
-
+
void set(_Tp* _data, size_t _size, bool _copyData=false)
{
if( !_copyData )
hdr.size = _size;
}
}
-
+
void reserve(size_t newCapacity)
{
_Tp* newData;
hdr.size = oldSize;
hdr.refcount = newRefcount;
}
-
+
void resize(size_t newSize)
{
size_t i;
hdr.data[i] = _Tp();
hdr.size = newSize;
}
-
+
Vector<_Tp>& push_back(const _Tp& elem)
{
if( hdr.size == hdr.capacity )
hdr.data[hdr.size++] = elem;
return *this;
}
-
+
Vector<_Tp>& pop_back()
{
if( hdr.size > 0 )
--hdr.size;
return *this;
}
-
+
size_t size() const { return hdr.size; }
size_t capacity() const { return hdr.capacity; }
bool empty() const { return hdr.size == 0; }
void clear() { resize(0); }
int type() const { return DataType<_Tp>::type; }
-
+
protected:
Hdr hdr;
-};
+};
+
-
template<typename _Tp> inline typename DataType<_Tp>::work_type
dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2)
{
}
return s;
}
-
+
// Multiply-with-Carry RNG
inline RNG::RNG() { state = 0xffffffff; }
inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; }
p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize);
return p;
}
-
+
/////////////////////////////// AutoBuffer ////////////////////////////////////////
template<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>::AutoBuffer()
template<typename _Tp> inline Ptr<_Tp>::~Ptr() { release(); }
-template<typename _Tp> inline Ptr<_Tp>::Ptr(const Ptr<_Tp>& ptr)
+template<typename _Tp> inline Ptr<_Tp>::Ptr(const Ptr<_Tp>& _ptr)
{
- obj = ptr.obj;
- refcount = ptr.refcount;
+ obj = _ptr.obj;
+ refcount = _ptr.refcount;
addref();
}
-template<typename _Tp> inline Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& ptr)
+template<typename _Tp> inline Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& _ptr)
{
- int* _refcount = ptr.refcount;
+ int* _refcount = _ptr.refcount;
if( _refcount )
CV_XADD(_refcount, 1);
release();
- obj = ptr.obj;
+ obj = _ptr.obj;
refcount = _refcount;
return *this;
}
p.refcount = refcount;
return p;
}
-
+
template<typename _Tp> template<typename _Tp2> inline const Ptr<_Tp2> Ptr<_Tp>::ptr() const
{
Ptr<_Tp2> p;
p.refcount = refcount;
return p;
}
-
+
//// specializied implementations of Ptr::delete_obj() for classic OpenCV types
template<> CV_EXPORTS void Ptr<CvMat>::delete_obj();
template<> CV_EXPORTS void Ptr<CvSparseMat>::delete_obj();
template<> CV_EXPORTS void Ptr<CvMemStorage>::delete_obj();
template<> CV_EXPORTS void Ptr<CvFileStorage>::delete_obj();
-
+
//////////////////////////////////////// XML & YAML I/O ////////////////////////////////////
CV_EXPORTS_W void write( FileStorage& fs, const string& name, int value );
{
WriteStructContext ws(fs, name, CV_NODE_SEQ+(DataType<_Tp>::fmt != 0 ? CV_NODE_FLOW : 0));
write(fs, vec);
-}
-
+}
+
CV_EXPORTS_W void write( FileStorage& fs, const string& name, const Mat& value );
CV_EXPORTS void write( FileStorage& fs, const string& name, const SparseMat& value );
-
+
template<typename _Tp> static inline FileStorage& operator << (FileStorage& fs, const _Tp& value)
{
if( !fs.isOpened() )
CV_NODE_IS_INT(node.node->tag) ? node.node->data.i :
CV_NODE_IS_REAL(node.node->tag) ? cvRound(node.node->data.f) : 0x7fffffff;
}
-
+
static inline void read(const FileNode& node, bool& value, bool default_value)
{
int temp; read(node, temp, (int)default_value);
int temp; read(node, temp, (int)default_value);
value = saturate_cast<short>(temp);
}
-
+
static inline void read(const FileNode& node, float& value, float default_value)
{
value = !node.node ? default_value :
CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() );
CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() );
-
+
inline FileNode::operator int() const
{
int value;
}
FileNodeIterator* it;
};
-
+
template<typename _Tp> class CV_EXPORTS VecReaderProxy<_Tp,1>
{
public:
read( it, vec );
}
}
-
+
inline FileNodeIterator FileNode::begin() const
{
return FileNodeIterator(fs, node);
return nclasses;
}
-
+
//////////////////////////////////////////////////////////////////////////////
// bridge C++ => C Seq API
CV_EXPORTS void clearSeq( CvSeq* seq );
CV_EXPORTS schar* getSeqElem( const CvSeq* seq, int index );
CV_EXPORTS void seqRemoveSlice( CvSeq* seq, CvSlice slice );
-CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr );
+CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr );
template<typename _Tp> inline Seq<_Tp>::Seq() : seq(0) {}
template<typename _Tp> inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq)
{ cvSeqPushMulti(seq, elem, (int)count, 0); }
template<typename _Tp> inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count)
-{ cvSeqPushMulti(seq, elem, (int)count, 1); }
-
+{ cvSeqPushMulti(seq, elem, (int)count, 1); }
+
template<typename _Tp> inline _Tp& Seq<_Tp>::back()
{ return *(_Tp*)getSeqElem(seq, -1); }
{ seqPopMulti(seq, elem, (int)count, 0); }
template<typename _Tp> inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count)
-{ seqPopMulti(seq, elem, (int)count, 1); }
+{ seqPopMulti(seq, elem, (int)count, 1); }
template<typename _Tp> inline void Seq<_Tp>::insert(int idx, const _Tp& elem)
{ seqInsert(seq, idx, &elem); }
-
+
template<typename _Tp> inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count)
{
CvMat m = cvMat(1, count, DataType<_Tp>::type, elems);
seqInsertSlice(seq, idx, &m);
}
-
+
template<typename _Tp> inline void Seq<_Tp>::remove(int idx)
{ seqRemove(seq, idx); }
-
+
template<typename _Tp> inline void Seq<_Tp>::remove(const Range& r)
{ seqRemoveSlice(seq, r); }
-
+
template<typename _Tp> inline void Seq<_Tp>::copyTo(vector<_Tp>& vec, const Range& range) const
{
size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start;
template<typename _Tp> inline SeqIterator<_Tp>::SeqIterator()
{ memset(this, 0, sizeof(*this)); }
-template<typename _Tp> inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& seq, bool seekEnd)
+template<typename _Tp> inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& _seq, bool seekEnd)
{
- cvStartReadSeq(seq.seq, this);
- index = seekEnd ? seq.seq->total : 0;
+ cvStartReadSeq(_seq.seq, this);
+ index = seekEnd ? _seq.seq->total : 0;
}
template<typename _Tp> inline void SeqIterator<_Tp>::seek(size_t pos)
delete obj;
return 0;
}
-
+
static void write(CvFileStorage* _fs, const char* name, const void* ptr, CvAttrList)
{
if(ptr && _fs)
((const _ClsName*)ptr)->write(fs, string(name));
}
}
-
+
static void* clone(const void* ptr)
{
if(!ptr)
}
};
-
+
class CV_EXPORTS Formatter
{
public:
vector<int> params;
};
-
+
/** Writes a point to an output stream in Matlab notation
*/
template<typename _Tp> inline std::ostream& operator<<(std::ostream& out, const Point_<_Tp>& p)
{
out << "[" << p.x << ", " << p.y << ", " << p.z << "]";
return out;
-}
+}
static inline Formatted format(const Mat& mtx, const char* fmt,
const vector<int>& params=vector<int>())
Mat my_mat = Mat::eye(3,3,CV_32F);
std::cout << my_mat;
@endverbatim
- */
+ */
static inline std::ostream& operator << (std::ostream& out, const Mat& mtx)
{
Formatter::get()->write(out, mtx);
Mat my_mat = Mat::eye(3,3,CV_32F);
std::cout << my_mat;
@endverbatim
- */
+ */
static inline std::ostream& operator << (std::ostream& out, const Formatted& fmtd)
{
fmtd.fmt->write(out, fmtd.mtx);
Formatter::get()->write(out, Mat(vec));
return out;
}
-
+
template<typename _Tp> inline Ptr<_Tp> Algorithm::create(const string& name)
{
return _create(name).ptr<_Tp>();
}
-
-template<typename _Tp> inline typename ParamType<_Tp>::member_type Algorithm::get(const string& name) const
+
+template<typename _Tp> inline typename ParamType<_Tp>::member_type Algorithm::get(const string& _name) const
{
typename ParamType<_Tp>::member_type value;
- info()->get(this, name.c_str(), ParamType<_Tp>::type, &value);
+ info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value);
return value;
}
-template<typename _Tp> inline typename ParamType<_Tp>::member_type Algorithm::get(const char* name) const
+template<typename _Tp> inline typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const
{
typename ParamType<_Tp>::member_type value;
- info()->get(this, name, ParamType<_Tp>::type, &value);
+ info()->get(this, _name, ParamType<_Tp>::type, &value);
return value;
-}
-
+}
+
}
#endif // __cplusplus
#ifndef __OPENCV_CORE_TYPES_H__
#define __OPENCV_CORE_TYPES_H__
-#if !defined _CRT_SECURE_NO_DEPRECATE && _MSC_VER > 1300
-#define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */
+#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER
+# if _MSC_VER > 1300
+# define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */
+# endif
#endif
#ifndef SKIP_INCLUDES
- #include <assert.h>
- #include <stdlib.h>
- #include <string.h>
- #include <float.h>
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <float.h>
#if !defined _MSC_VER && !defined __BORLANDC__
- #include <stdint.h>
+# include <stdint.h>
+#endif
+
+#if defined __ICL
+# define CV_ICC __ICL
+#elif defined __ICC
+# define CV_ICC __ICC
+#elif defined __ECL
+# define CV_ICC __ECL
+#elif defined __ECC
+# define CV_ICC __ECC
+#elif defined __INTEL_COMPILER
+# define CV_ICC __INTEL_COMPILER
+#endif
+
+#if defined CV_ICC && !defined CV_ENABLE_UNROLLED
+# define CV_ENABLE_UNROLLED 0
+#else
+# define CV_ENABLE_UNROLLED 1
+#endif
+
+#if (defined _M_X64 && defined _MSC_VER && _MSC_VER >= 1400) || (__GNUC__ >= 4 && defined __x86_64__)
+# if defined WIN32
+# include <intrin.h>
+# endif
+# if __SSE2__ || !defined __GNUC__
+# include <emmintrin.h>
+# endif
+#endif
+
+#if defined __BORLANDC__
+# include <fastmath.h>
+#else
+# include <math.h>
+#endif
+
+#ifdef HAVE_IPL
+# ifndef __IPL_H__
+# if defined WIN32 || defined _WIN32
+# include <ipl.h>
+# else
+# include <ipl/ipl.h>
+# endif
+# endif
+#elif defined __IPL_H__
+# define HAVE_IPL
#endif
- #if defined __ICL
- #define CV_ICC __ICL
- #elif defined __ICC
- #define CV_ICC __ICC
- #elif defined __ECL
- #define CV_ICC __ECL
- #elif defined __ECC
- #define CV_ICC __ECC
- #elif defined __INTEL_COMPILER
- #define CV_ICC __INTEL_COMPILER
- #endif
-
- #if (_MSC_VER >= 1400 && defined _M_X64) || (__GNUC__ >= 4 && defined __x86_64__)
- #if defined WIN32
- #include <intrin.h>
- #endif
- #if __SSE2__ || !defined __GNUC__
- #include <emmintrin.h>
- #endif
- #endif
-
- #if defined __BORLANDC__
- #include <fastmath.h>
- #else
- #include <math.h>
- #endif
-
- #ifdef HAVE_IPL
- #ifndef __IPL_H__
- #if defined WIN32 || defined _WIN32
- #include <ipl.h>
- #else
- #include <ipl/ipl.h>
- #endif
- #endif
- #elif defined __IPL_H__
- #define HAVE_IPL
- #endif
#endif // SKIP_INCLUDES
#if defined WIN32 || defined _WIN32
- #define CV_CDECL __cdecl
- #define CV_STDCALL __stdcall
+# define CV_CDECL __cdecl
+# define CV_STDCALL __stdcall
#else
- #define CV_CDECL
- #define CV_STDCALL
+# define CV_CDECL
+# define CV_STDCALL
#endif
#ifndef CV_EXTERN_C
- #ifdef __cplusplus
- #define CV_EXTERN_C extern "C"
- #define CV_DEFAULT(val) = val
- #else
- #define CV_EXTERN_C
- #define CV_DEFAULT(val)
- #endif
+# ifdef __cplusplus
+# define CV_EXTERN_C extern "C"
+# define CV_DEFAULT(val) = val
+# else
+# define CV_EXTERN_C
+# define CV_DEFAULT(val)
+# endif
#endif
#ifndef CV_EXTERN_C_FUNCPTR
- #ifdef __cplusplus
- #define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; }
- #else
- #define CV_EXTERN_C_FUNCPTR(x) typedef x
- #endif
+# ifdef __cplusplus
+# define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; }
+# else
+# define CV_EXTERN_C_FUNCPTR(x) typedef x
+# endif
#endif
#ifndef CV_INLINE
-#if defined __cplusplus
- #define CV_INLINE inline
-#elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__
- #define CV_INLINE __inline
-#else
- #define CV_INLINE static
-#endif
+# if defined __cplusplus
+# define CV_INLINE inline
+# elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__
+# define CV_INLINE __inline
+# else
+# define CV_INLINE static
+# endif
#endif /* CV_INLINE */
#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS
- #define CV_EXPORTS __declspec(dllexport)
+# define CV_EXPORTS __declspec(dllexport)
#else
- #define CV_EXPORTS
+# define CV_EXPORTS
#endif
#ifndef CVAPI
- #define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL
+# define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL
#endif
#if defined _MSC_VER || defined __BORLANDC__
-typedef __int64 int64;
-typedef unsigned __int64 uint64;
-#define CV_BIG_INT(n) n##I64
-#define CV_BIG_UINT(n) n##UI64
+ typedef __int64 int64;
+ typedef unsigned __int64 uint64;
+# define CV_BIG_INT(n) n##I64
+# define CV_BIG_UINT(n) n##UI64
#else
-typedef int64_t int64;
-typedef uint64_t uint64;
-#define CV_BIG_INT(n) n##LL
-#define CV_BIG_UINT(n) n##ULL
+ typedef int64_t int64;
+ typedef uint64_t uint64;
+# define CV_BIG_INT(n) n##LL
+# define CV_BIG_UINT(n) n##ULL
#endif
#ifndef HAVE_IPL
-typedef unsigned char uchar;
-typedef unsigned short ushort;
+ typedef unsigned char uchar;
+ typedef unsigned short ushort;
#endif
typedef signed char schar;
typedef int CVStatus;
-enum {
+enum {
CV_StsOk= 0, /* everithing is ok */
CV_StsBackTrace= -1, /* pseudo error for back trace */
CV_StsError= -2, /* unknown /unspecified error */
CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */
CV_StsObjectNotFound= -204, /* request can't be completed */
CV_StsUnmatchedFormats= -205, /* formats of input/output arrays differ */
- CV_StsBadFlag= -206, /* flag is wrong or not supported */
- CV_StsBadPoint= -207, /* bad CvPoint */
+ CV_StsBadFlag= -206, /* flag is wrong or not supported */
+ CV_StsBadPoint= -207, /* bad CvPoint */
CV_StsBadMask= -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/
CV_StsUnmatchedSizes= -209, /* sizes of input/output structures do not match */
CV_StsUnsupportedFormat= -210, /* the data format/type is not supported by the function*/
CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */
CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */
CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */
- CV_StsAssert= -215, /* assertion failed */
- CV_GpuNotSupported= -216,
+ CV_StsAssert= -215, /* assertion failed */
+ CV_GpuNotSupported= -216,
CV_GpuApiCallError= -217,
CV_OpenGlNotSupported= -218,
CV_OpenGlApiCallError= -219
\****************************************************************************************/
#ifdef HAVE_TEGRA_OPTIMIZATION
-# include "tegra_round.hpp"
+# include "tegra_round.hpp"
#endif
#define CV_PI 3.1415926535897932384626433832795
#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t))
#ifndef MIN
-#define MIN(a,b) ((a) > (b) ? (b) : (a))
+# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
-#define MAX(a,b) ((a) < (b) ? (b) : (a))
+# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
/* min & max without jumps */
/* absolute value without jumps */
#ifndef __cplusplus
-#define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0))
+# define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0))
#else
-#define CV_IABS(a) abs(a)
+# define CV_IABS(a) abs(a)
#endif
#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b)))
#define CV_SIGN(a) CV_CMP((a),0)
}
return t;
#elif defined HAVE_LRINT || defined CV_ICC || defined __GNUC__
-# ifdef HAVE_TEGRA_OPTIMIZATION
+# ifdef HAVE_TEGRA_OPTIMIZATION
TEGRA_ROUND(value);
-# else
+# else
return (int)lrint(value);
-# endif
+# endif
#else
// while this is not IEEE754-compliant rounding, it's usually a good enough approximation
return (int)(value + (value >= 0 ? 0.5 : -0.5));
}
#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
-#include "emmintrin.h"
+# include "emmintrin.h"
#endif
CV_INLINE int cvFloor( double value )
}
CvModuleInfo;
-#endif /*_CXCORE_TYPES_H_*/
+#endif /*__OPENCV_CORE_TYPES_H__*/
/* End of file. */
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts/ts.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
Mat src(sz, type);
Scalar s;
-
+
declare.in(src, WARMUP_RNG).out(s);
-
+
TEST_CYCLE() s = mean(src);
-
+
SANITY_CHECK(s, 1e-6);
}
Mat src(sz, type);
Mat mask = Mat::ones(src.size(), CV_8U);
Scalar s;
-
+
declare.in(src, WARMUP_RNG).in(mask).out(s);
-
+
TEST_CYCLE() s = mean(src, mask);
-
+
SANITY_CHECK(s, 1e-6);
}
declare.in(src, WARMUP_RNG).out(mean, dev);
TEST_CYCLE() meanStdDev(src, mean, dev);
-
+
SANITY_CHECK(mean, 1e-6);
SANITY_CHECK(dev, 1e-6);
}
Scalar dev;
declare.in(src, WARMUP_RNG).in(mask).out(mean, dev);
-
+
TEST_CYCLE() meanStdDev(src, mean, dev, mask);
-
+
SANITY_CHECK(mean, 1e-6);
SANITY_CHECK(dev, 1e-6);
}
int cnt = 0;
declare.in(src, WARMUP_RNG);
-
+
TEST_CYCLE() cnt = countNonZero(src);
-
+
SANITY_CHECK(cnt);
}
{
using std::pair;
-
+
template<typename _KeyTp, typename _ValueTp> struct sorted_vector
{
sorted_vector() {}
size_t size() const { return vec.size(); }
_ValueTp& operator [](size_t idx) { return vec[idx]; }
const _ValueTp& operator [](size_t idx) const { return vec[idx]; }
-
+
void add(const _KeyTp& k, const _ValueTp& val)
{
pair<_KeyTp, _ValueTp> p(k, val);
std::swap(vec[i-1], vec[i]);
CV_Assert( i == 0 || vec[i].first != vec[i-1].first );
}
-
+
bool find(const _KeyTp& key, _ValueTp& value) const
{
size_t a = 0, b = vec.size();
else
b = c;
}
-
+
if( a < vec.size() && vec[a].first == key )
{
value = vec[a].second;
}
return false;
}
-
+
void get_keys(vector<_KeyTp>& keys) const
{
size_t i = 0, n = vec.size();
keys.resize(n);
-
+
for( i = 0; i < n; i++ )
keys[i] = vec[i].first;
}
-
+
vector<pair<_KeyTp, _ValueTp> > vec;
};
-
+
template<typename _ValueTp> inline const _ValueTp* findstr(const sorted_vector<string, _ValueTp>& vec,
const char* key)
{
if( !key )
return 0;
-
+
size_t a = 0, b = vec.vec.size();
while( b > a )
{
else
b = c;
}
-
+
if( strcmp(vec.vec[a].first.c_str(), key) == 0 )
return &vec.vec[a].second;
return 0;
}
-
+
Param::Param()
{
type = 0;
setter = 0;
}
-
+
Param::Param(int _type, bool _readonly, int _offset,
Algorithm::Getter _getter, Algorithm::Setter _setter,
const string& _help)
string _name;
};
-
+
static sorted_vector<string, Algorithm::Constructor>& alglist()
{
static sorted_vector<string, Algorithm::Constructor> alglist_var;
Algorithm::Algorithm()
{
}
-
+
Algorithm::~Algorithm()
{
}
-
+
string Algorithm::name() const
{
return info()->name();
}
-
-void Algorithm::set(const string& name, int value)
+
+void Algorithm::set(const string& parameter, int value)
{
- info()->set(this, name.c_str(), ParamType<int>::type, &value);
+ info()->set(this, parameter.c_str(), ParamType<int>::type, &value);
}
-void Algorithm::set(const string& name, double value)
+void Algorithm::set(const string& parameter, double value)
{
- info()->set(this, name.c_str(), ParamType<double>::type, &value);
+ info()->set(this, parameter.c_str(), ParamType<double>::type, &value);
}
-void Algorithm::set(const string& name, bool value)
+void Algorithm::set(const string& parameter, bool value)
{
- info()->set(this, name.c_str(), ParamType<bool>::type, &value);
+ info()->set(this, parameter.c_str(), ParamType<bool>::type, &value);
}
-void Algorithm::set(const string& name, const string& value)
+void Algorithm::set(const string& parameter, const string& value)
{
- info()->set(this, name.c_str(), ParamType<string>::type, &value);
+ info()->set(this, parameter.c_str(), ParamType<string>::type, &value);
}
-void Algorithm::set(const string& name, const Mat& value)
+void Algorithm::set(const string& parameter, const Mat& value)
{
- info()->set(this, name.c_str(), ParamType<Mat>::type, &value);
+ info()->set(this, parameter.c_str(), ParamType<Mat>::type, &value);
}
-void Algorithm::set(const string& name, const vector<Mat>& value)
+void Algorithm::set(const string& parameter, const vector<Mat>& value)
{
- info()->set(this, name.c_str(), ParamType<vector<Mat> >::type, &value);
-}
-
-void Algorithm::set(const string& name, const Ptr<Algorithm>& value)
+ info()->set(this, parameter.c_str(), ParamType<vector<Mat> >::type, &value);
+}
+
+void Algorithm::set(const string& parameter, const Ptr<Algorithm>& value)
{
- info()->set(this, name.c_str(), ParamType<Algorithm>::type, &value);
+ info()->set(this, parameter.c_str(), ParamType<Algorithm>::type, &value);
}
-void Algorithm::set(const char* name, int value)
+void Algorithm::set(const char* parameter, int value)
{
- info()->set(this, name, ParamType<int>::type, &value);
+ info()->set(this, parameter, ParamType<int>::type, &value);
}
-void Algorithm::set(const char* name, double value)
+void Algorithm::set(const char* parameter, double value)
{
- info()->set(this, name, ParamType<double>::type, &value);
+ info()->set(this, parameter, ParamType<double>::type, &value);
}
-void Algorithm::set(const char* name, bool value)
+void Algorithm::set(const char* parameter, bool value)
{
- info()->set(this, name, ParamType<bool>::type, &value);
+ info()->set(this, parameter, ParamType<bool>::type, &value);
}
-void Algorithm::set(const char* name, const string& value)
+void Algorithm::set(const char* parameter, const string& value)
{
- info()->set(this, name, ParamType<string>::type, &value);
+ info()->set(this, parameter, ParamType<string>::type, &value);
}
-void Algorithm::set(const char* name, const Mat& value)
+void Algorithm::set(const char* parameter, const Mat& value)
{
- info()->set(this, name, ParamType<Mat>::type, &value);
+ info()->set(this, parameter, ParamType<Mat>::type, &value);
}
-void Algorithm::set(const char* name, const vector<Mat>& value)
+void Algorithm::set(const char* parameter, const vector<Mat>& value)
{
- info()->set(this, name, ParamType<vector<Mat> >::type, &value);
-}
-
-void Algorithm::set(const char* name, const Ptr<Algorithm>& value)
+ info()->set(this, parameter, ParamType<vector<Mat> >::type, &value);
+}
+
+void Algorithm::set(const char* parameter, const Ptr<Algorithm>& value)
{
- info()->set(this, name, ParamType<Algorithm>::type, &value);
+ info()->set(this, parameter, ParamType<Algorithm>::type, &value);
}
-
-int Algorithm::getInt(const string& name) const
+
+int Algorithm::getInt(const string& parameter) const
{
- return get<int>(name);
+ return get<int>(parameter);
}
-
-double Algorithm::getDouble(const string& name) const
+
+double Algorithm::getDouble(const string& parameter) const
{
- return get<double>(name);
+ return get<double>(parameter);
}
-bool Algorithm::getBool(const string& name) const
+bool Algorithm::getBool(const string& parameter) const
{
- return get<bool>(name);
+ return get<bool>(parameter);
}
-string Algorithm::getString(const string& name) const
+string Algorithm::getString(const string& parameter) const
{
- return get<string>(name);
+ return get<string>(parameter);
}
-Mat Algorithm::getMat(const string& name) const
+Mat Algorithm::getMat(const string& parameter) const
{
- return get<Mat>(name);
+ return get<Mat>(parameter);
}
-vector<Mat> Algorithm::getMatVector(const string& name) const
+vector<Mat> Algorithm::getMatVector(const string& parameter) const
{
- return get<vector<Mat> >(name);
+ return get<vector<Mat> >(parameter);
}
-Ptr<Algorithm> Algorithm::getAlgorithm(const string& name) const
+Ptr<Algorithm> Algorithm::getAlgorithm(const string& parameter) const
{
- return get<Algorithm>(name);
+ return get<Algorithm>(parameter);
}
-
-string Algorithm::paramHelp(const string& name) const
+
+string Algorithm::paramHelp(const string& parameter) const
{
- return info()->paramHelp(name.c_str());
+ return info()->paramHelp(parameter.c_str());
}
-
-int Algorithm::paramType(const string& name) const
+
+int Algorithm::paramType(const string& parameter) const
{
- return info()->paramType(name.c_str());
+ return info()->paramType(parameter.c_str());
}
-int Algorithm::paramType(const char* name) const
+int Algorithm::paramType(const char* parameter) const
{
- return info()->paramType(name);
-}
-
+ return info()->paramType(parameter);
+}
+
void Algorithm::getParams(vector<string>& names) const
{
info()->getParams(names);
}
-
+
void Algorithm::write(FileStorage& fs) const
{
info()->write(this, fs);
}
-
+
void Algorithm::read(const FileNode& fn)
{
info()->read(this, fn);
-}
+}
+
-
AlgorithmInfo::AlgorithmInfo(const string& _name, Algorithm::Constructor create)
{
data = new AlgorithmInfoData;
AlgorithmInfo::~AlgorithmInfo()
{
delete data;
-}
-
+}
+
void AlgorithmInfo::write(const Algorithm* algo, FileStorage& fs) const
{
size_t i = 0, nparams = data->params.vec.size();
{
size_t i = 0, nparams = data->params.vec.size();
AlgorithmInfo* info = algo->info();
-
+
for( i = 0; i < nparams; i++ )
{
const Param& p = data->params.vec[i].second;
else
CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported parameter type");
}
-}
+}
string AlgorithmInfo::name() const
{
return data->_name;
}
-
+
union GetSetParam
{
int (Algorithm::*get_int)() const;
Mat (Algorithm::*get_mat)() const;
vector<Mat> (Algorithm::*get_mat_vector)() const;
Ptr<Algorithm> (Algorithm::*get_algo)() const;
-
+
void (Algorithm::*set_int)(int);
void (Algorithm::*set_bool)(bool);
void (Algorithm::*set_double)(double);
void (Algorithm::*set_algo)(const Ptr<Algorithm>&);
};
-void AlgorithmInfo::set(Algorithm* algo, const char* name, int argType, const void* value, bool force) const
+void AlgorithmInfo::set(Algorithm* algo, const char* parameter, int argType, const void* value, bool force) const
{
- const Param* p = findstr(data->params, name);
+ const Param* p = findstr(data->params, parameter);
if( !p )
- CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", name ? name : "<NULL>") );
+ CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", parameter ? parameter : "<NULL>") );
if( !force && p->readonly )
- CV_Error_( CV_StsError, ("Parameter '%s' is readonly", name));
+ CV_Error_( CV_StsError, ("Parameter '%s' is readonly", parameter));
GetSetParam f;
f.set_int = p->setter;
else
CV_Error(CV_StsBadArg, "Unknown/unsupported parameter type");
}
-
-void AlgorithmInfo::get(const Algorithm* algo, const char* name, int argType, void* value) const
+
+void AlgorithmInfo::get(const Algorithm* algo, const char* parameter, int argType, void* value) const
{
- const Param* p = findstr(data->params, name);
+ const Param* p = findstr(data->params, parameter);
if( !p )
- CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", name ? name : "<NULL>") );
-
+ CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", parameter ? parameter : "<NULL>") );
+
GetSetParam f;
f.get_int = p->getter;
-
+
if( argType == Param::INT || argType == Param::BOOLEAN || argType == Param::REAL )
{
if( p->type == Param::INT )
{
CV_Assert( argType == Param::INT || argType == Param::REAL );
int val = p->getter ? (algo->*f.get_int)() : *(int*)((uchar*)algo + p->offset);
-
+
if( argType == Param::INT )
*(int*)value = val;
else
{
CV_Assert( argType == Param::INT || argType == Param::BOOLEAN || argType == Param::REAL );
bool val = p->getter ? (algo->*f.get_bool)() : *(bool*)((uchar*)algo + p->offset);
-
+
if( argType == Param::INT )
*(int*)value = (int)val;
else if( argType == Param::BOOLEAN )
{
CV_Assert( argType == Param::REAL );
double val = p->getter ? (algo->*f.get_double)() : *(double*)((uchar*)algo + p->offset);
-
+
*(double*)value = val;
}
}
else if( argType == Param::STRING )
{
CV_Assert( p->type == Param::STRING );
-
+
*(string*)value = p->getter ? (algo->*f.get_string)() :
*(string*)((uchar*)algo + p->offset);
}
else if( argType == Param::MAT )
{
CV_Assert( p->type == Param::MAT );
-
+
*(Mat*)value = p->getter ? (algo->*f.get_mat)() :
*(Mat*)((uchar*)algo + p->offset);
}
else if( argType == Param::MAT_VECTOR )
{
CV_Assert( p->type == Param::MAT_VECTOR );
-
+
*(vector<Mat>*)value = p->getter ? (algo->*f.get_mat_vector)() :
*(vector<Mat>*)((uchar*)algo + p->offset);
}
else if( argType == Param::ALGORITHM )
{
CV_Assert( p->type == Param::ALGORITHM );
-
+
*(Ptr<Algorithm>*)value = p->getter ? (algo->*f.get_algo)() :
*(Ptr<Algorithm>*)((uchar*)algo + p->offset);
}
CV_Error(CV_StsBadArg, "Unknown/unsupported parameter type");
}
-
-int AlgorithmInfo::paramType(const char* name) const
+
+int AlgorithmInfo::paramType(const char* parameter) const
{
- const Param* p = findstr(data->params, name);
+ const Param* p = findstr(data->params, parameter);
if( !p )
- CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", name ? name : "<NULL>") );
+ CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", parameter ? parameter : "<NULL>") );
return p->type;
}
-
-
-string AlgorithmInfo::paramHelp(const char* name) const
+
+
+string AlgorithmInfo::paramHelp(const char* parameter) const
{
- const Param* p = findstr(data->params, name);
+ const Param* p = findstr(data->params, parameter);
if( !p )
- CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", name ? name : "<NULL>") );
+ CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", parameter ? parameter : "<NULL>") );
return p->help;
}
{
data->params.get_keys(names);
}
-
-
-void AlgorithmInfo::addParam_(Algorithm& algo, const char* name, int argType,
- void* value, bool readOnly,
+
+
+void AlgorithmInfo::addParam_(Algorithm& algo, const char* parameter, int argType,
+ void* value, bool readOnly,
Algorithm::Getter getter, Algorithm::Setter setter,
const string& help)
{
argType == Param::REAL || argType == Param::STRING ||
argType == Param::MAT || argType == Param::MAT_VECTOR ||
argType == Param::ALGORITHM );
- data->params.add(string(name), Param(argType, readOnly,
+ data->params.add(string(parameter), Param(argType, readOnly,
(int)((size_t)value - (size_t)(void*)&algo),
getter, setter, help));
}
-
-
-void AlgorithmInfo::addParam(Algorithm& algo, const char* name,
- int& value, bool readOnly,
+
+
+void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter,
+ int& value, bool readOnly,
int (Algorithm::*getter)(),
void (Algorithm::*setter)(int),
const string& help)
{
- addParam_(algo, name, ParamType<int>::type, &value, readOnly,
+ addParam_(algo, parameter, ParamType<int>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
-void AlgorithmInfo::addParam(Algorithm& algo, const char* name,
- bool& value, bool readOnly,
+void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter,
+ bool& value, bool readOnly,
int (Algorithm::*getter)(),
void (Algorithm::*setter)(int),
const string& help)
{
- addParam_(algo, name, ParamType<bool>::type, &value, readOnly,
+ addParam_(algo, parameter, ParamType<bool>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
-
-void AlgorithmInfo::addParam(Algorithm& algo, const char* name,
- double& value, bool readOnly,
+
+void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter,
+ double& value, bool readOnly,
double (Algorithm::*getter)(),
void (Algorithm::*setter)(double),
const string& help)
{
- addParam_(algo, name, ParamType<double>::type, &value, readOnly,
+ addParam_(algo, parameter, ParamType<double>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
-void AlgorithmInfo::addParam(Algorithm& algo, const char* name,
- string& value, bool readOnly,
+void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter,
+ string& value, bool readOnly,
string (Algorithm::*getter)(),
void (Algorithm::*setter)(const string&),
const string& help)
{
- addParam_(algo, name, ParamType<string>::type, &value, readOnly,
+ addParam_(algo, parameter, ParamType<string>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
-void AlgorithmInfo::addParam(Algorithm& algo, const char* name,
- Mat& value, bool readOnly,
+void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter,
+ Mat& value, bool readOnly,
Mat (Algorithm::*getter)(),
void (Algorithm::*setter)(const Mat&),
const string& help)
{
- addParam_(algo, name, ParamType<Mat>::type, &value, readOnly,
+ addParam_(algo, parameter, ParamType<Mat>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
-void AlgorithmInfo::addParam(Algorithm& algo, const char* name,
- vector<Mat>& value, bool readOnly,
+void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter,
+ vector<Mat>& value, bool readOnly,
vector<Mat> (Algorithm::*getter)(),
void (Algorithm::*setter)(const vector<Mat>&),
const string& help)
{
- addParam_(algo, name, ParamType<vector<Mat> >::type, &value, readOnly,
+ addParam_(algo, parameter, ParamType<vector<Mat> >::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
-}
-
-void AlgorithmInfo::addParam(Algorithm& algo, const char* name,
- Ptr<Algorithm>& value, bool readOnly,
+}
+
+void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter,
+ Ptr<Algorithm>& value, bool readOnly,
Ptr<Algorithm> (Algorithm::*getter)(),
void (Algorithm::*setter)(const Ptr<Algorithm>&),
const string& help)
{
- addParam_(algo, name, ParamType<Algorithm>::type, &value, readOnly,
+ addParam_(algo, parameter, ParamType<Algorithm>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
-}
+}
}
-
+
/* End of file. */
#if CV_USE_SYSTEM_MALLOC
+#if defined WIN32 || defined _WIN32
void deleteThreadAllocData() {}
+#endif
void* fastMalloc( size_t size )
{
adata[-1] = udata;
return adata;
}
-
+
void fastFree(void* ptr)
{
if(ptr)
{
uchar* udata = ((uchar**)ptr)[-1];
CV_DbgAssert(udata < (uchar*)ptr &&
- ((uchar*)ptr - udata) <= (ptrdiff_t)(sizeof(void*)+CV_MALLOC_ALIGN));
+ ((uchar*)ptr - udata) <= (ptrdiff_t)(sizeof(void*)+CV_MALLOC_ALIGN));
free(udata);
}
}
#ifdef WIN32
#ifdef WINCE
-# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
+# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
#endif //WINCE
static DWORD tlsKey;
freePtr = block;
if( !data )
{
- block = gcPtr;
+ block = gcPtr;
for( int k = 0; k < 2; k++ )
{
SANITY_CHECK(block);
Block*& startPtr = tls->bins[idx][START];
Block*& freePtr = tls->bins[idx][FREE];
Block*& gcPtr = tls->bins[idx][GC];
-
+
if( block == block->next )
{
CV_DbgAssert( startPtr == block && freePtr == block && gcPtr == block );
scbuf[i] = scbuf[i - esz];
}
-void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
+static void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
InputArray _mask, const BinaryFunc* tab, bool bitwise)
{
int kind1 = _src1.kind(), kind2 = _src2.kind();
namespace cv
{
-void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
+static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
InputArray _mask, int dtype, BinaryFunc* tab, bool muldiv=false, void* usrdata=0)
{
int kind1 = _src1.kind(), kind2 = _src2.kind();
using namespace std;
using namespace cv;
+namespace {
void helpParser()
{
printf("\nThe CommandLineParser class is designed for command line arguments parsing\n"
return name;
}
+}//namespace
+
CommandLineParser::CommandLineParser(int argc, const char* const argv[], const char* keys)
{
std::string keys_buffer;
dst2[i] = src[j+2]; dst3[i] = src[j+3];
}
}
-
+
for( ; k < cn; k += 4 )
{
T *dst0 = dst[k], *dst1 = dst[k+1], *dst2 = dst[k+2], *dst3 = dst[k+3];
}
}
}
-
+
template<typename T> static void
merge_( const T** src, T* dst, int len, int cn )
{
dst[j+2] = src2[i]; dst[j+3] = src3[i];
}
}
-
+
for( ; k < cn; k += 4 )
{
const T *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
{
split_(src, dst, len, cn);
}
-
+
static void split64s(const int64* src, int64** dst, int len, int cn )
{
split_(src, dst, len, cn);
static void merge64s(const int64** src, int64* dst, int len, int cn )
{
merge_(src, dst, len, cn);
-}
+}
typedef void (*SplitFunc)(const uchar* src, uchar** dst, int len, int cn);
typedef void (*MergeFunc)(const uchar** src, uchar* dst, int len, int cn);
(MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge8u), (MergeFunc)GET_OPTIMIZED(merge16u), (MergeFunc)GET_OPTIMIZED(merge16u),
(MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge32s), (MergeFunc)GET_OPTIMIZED(merge64s), 0
};
-
+
}
-
+
void cv::split(const Mat& src, Mat* mv)
{
int k, depth = src.depth(), cn = src.channels();
SplitFunc func = splitTab[depth];
CV_Assert( func != 0 );
-
+
int esz = (int)src.elemSize(), esz1 = (int)src.elemSize1();
int blocksize0 = (BLOCK_SIZE + esz-1)/esz;
AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
const Mat** arrays = (const Mat**)(uchar*)_buf;
uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
-
+
arrays[0] = &src;
for( k = 0; k < cn; k++ )
{
mv[k].create(src.dims, src.size, depth);
arrays[k+1] = &mv[k];
}
-
+
NAryMatIterator it(arrays, ptrs, cn+1);
int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0);
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( int j = 0; j < total; j += blocksize )
{
int bsz = std::min(total - j, blocksize);
func( ptrs[0], &ptrs[1], bsz, cn );
-
+
if( j + blocksize < total )
{
ptrs[0] += bsz*esz;
}
}
}
-
+
void cv::split(const Mat& m, vector<Mat>& mv)
{
mv.resize(!m.empty() ? m.channels() : 0);
if(!m.empty())
split(m, &mv[0]);
}
-
+
void cv::merge(const Mat* mv, size_t n, OutputArray _dst)
{
CV_Assert( mv && n > 0 );
-
+
int depth = mv[0].depth();
bool allch1 = true;
int k, cn = 0;
size_t i;
-
+
for( i = 0; i < n; i++ )
{
CV_Assert(mv[i].size == mv[0].size && mv[i].depth() == depth);
allch1 = allch1 && mv[i].channels() == 1;
cn += mv[i].channels();
}
-
+
CV_Assert( 0 < cn && cn <= CV_CN_MAX );
_dst.create(mv[0].dims, mv[0].size, CV_MAKETYPE(depth, cn));
Mat dst = _dst.getMat();
-
+
if( n == 1 )
{
mv[0].copyTo(dst);
return;
}
-
+
if( !allch1 )
{
AutoBuffer<int> pairs(cn*2);
int j, ni=0;
-
+
for( i = 0, j = 0; i < n; i++, j += ni )
{
ni = mv[i].channels();
mixChannels( mv, n, &dst, 1, &pairs[0], cn );
return;
}
-
+
size_t esz = dst.elemSize(), esz1 = dst.elemSize1();
int blocksize0 = (int)((BLOCK_SIZE + esz-1)/esz);
AutoBuffer<uchar> _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16);
const Mat** arrays = (const Mat**)(uchar*)_buf;
uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16);
-
+
arrays[0] = &dst;
for( k = 0; k < cn; k++ )
arrays[k+1] = &mv[k];
-
+
NAryMatIterator it(arrays, ptrs, cn+1);
int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0);
MergeFunc func = mergeTab[depth];
-
+
for( i = 0; i < it.nplanes; i++, ++it )
{
for( int j = 0; j < total; j += blocksize )
{
int bsz = std::min(total - j, blocksize);
func( (const uchar**)&ptrs[1], ptrs[0], bsz, cn );
-
+
if( j + blocksize < total )
{
ptrs[0] += bsz*esz;
- for( int k = 0; k < cn; k++ )
- ptrs[k+1] += bsz*esz1;
+ for( int t = 0; t < cn; t++ )
+ ptrs[t+1] += bsz*esz1;
}
}
}
void cv::merge(const vector<Mat>& mv, OutputArray _dst)
{
merge(!mv.empty() ? &mv[0] : 0, mv.size(), _dst);
-}
+}
/****************************************************************************************\
* Generalized split/merge: mixing channels *
}
}
-
+
static void mixChannels8u( const uchar** src, const int* sdelta,
uchar** dst, const int* ddelta,
int len, int npairs )
{
mixChannels_(src, sdelta, dst, ddelta, len, npairs);
}
-
+
static void mixChannels64s( const int64** src, const int* sdelta,
int64** dst, const int* ddelta,
int len, int npairs )
{
mixChannels_(src, sdelta, dst, ddelta, len, npairs);
}
-
+
typedef void (*MixChannelsFunc)( const uchar** src, const int* sdelta,
uchar** dst, const int* ddelta, int len, int npairs );
{
(MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u,
(MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s,
- (MixChannelsFunc)mixChannels64s, 0
+ (MixChannelsFunc)mixChannels64s, 0
};
-
+
}
-
+
void cv::mixChannels( const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, const int* fromTo, size_t npairs )
{
if( npairs == 0 )
return;
CV_Assert( src && nsrcs > 0 && dst && ndsts > 0 && fromTo && npairs > 0 );
-
+
size_t i, j, k, esz1 = dst[0].elemSize1();
int depth = dst[0].depth();
uchar** dsts = (uchar**)(srcs + npairs);
int* tab = (int*)(dsts + npairs);
int *sdelta = (int*)(tab + npairs*4), *ddelta = sdelta + npairs;
-
+
for( i = 0; i < nsrcs; i++ )
arrays[i] = &src[i];
for( i = 0; i < ndsts; i++ )
arrays[i + nsrcs] = &dst[i];
ptrs[nsrcs + ndsts] = 0;
-
+
for( i = 0; i < npairs; i++ )
{
int i0 = fromTo[i*2], i1 = fromTo[i*2+1];
tab[i*4] = (int)(nsrcs + ndsts); tab[i*4+1] = 0;
sdelta[i] = 0;
}
-
+
for( j = 0; j < ndsts; i1 -= dst[j].channels(), j++ )
if( i1 < dst[j].channels() )
break;
NAryMatIterator it(arrays, ptrs, (int)(nsrcs + ndsts));
int total = (int)it.size, blocksize = std::min(total, (int)((BLOCK_SIZE + esz1-1)/esz1));
MixChannelsFunc func = mixchTab[depth];
-
+
for( i = 0; i < it.nplanes; i++, ++it )
{
for( k = 0; k < npairs; k++ )
srcs[k] = ptrs[tab[k*4]] + tab[k*4+1];
dsts[k] = ptrs[tab[k*4+2]] + tab[k*4+3];
}
-
- for( int j = 0; j < total; j += blocksize )
+
+ for( int t = 0; t < total; t += blocksize )
{
- int bsz = std::min(total - j, blocksize);
+ int bsz = std::min(total - t, blocksize);
func( srcs, sdelta, dsts, ddelta, bsz, (int)npairs );
-
- if( j + blocksize < total )
+
+ if( t + blocksize < total )
for( k = 0; k < npairs; k++ )
{
srcs[k] += blocksize*sdelta[k]*esz1;
int i;
int nsrc = src_is_mat ? 1 : (int)src.total();
int ndst = dst_is_mat ? 1 : (int)dst.total();
-
+
CV_Assert(fromTo.size()%2 == 0 && nsrc > 0 && ndst > 0);
cv::AutoBuffer<Mat> _buf(nsrc + ndst);
Mat* buf = _buf;
{
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
-
+
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
t1 = saturate_cast<DT>(std::abs(src[x+3]*scale + shift));
dst[x+2] = t0; dst[x+3] = t1;
}
- #endif
+ #endif
for( ; x < size.width; x++ )
dst[x] = saturate_cast<DT>(std::abs(src[x]*scale + shift));
}
-}
+}
template<typename T, typename DT, typename WT> static void
{
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
-
+
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
template<> void
cvtScale_<short, short, float>( const short* src, size_t sstep,
short* dst, size_t dstep, Size size,
- float scale, float shift )
+ float scale, float shift )
{
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
r1 = _mm_cvtps_epi32(rf1);
r0 = _mm_packs_epi32(r0, r1);
_mm_storeu_si128((__m128i*)(dst + x), r0);
- }
+ }
}
#endif
for(; x < size.width; x++ )
dst[x] = saturate_cast<short>(src[x]*scale + shift);
- }
+ }
}
{
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
-
+
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
{
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
-
+
for( ; size.height--; src += sstep, dst += dstep )
{
int x = 0;
{
__m128 src128 = _mm_loadu_ps (src + x);
__m128i src_int128 = _mm_cvtps_epi32 (src128);
-
- src128 = _mm_loadu_ps (src + x + 4);
+
+ src128 = _mm_loadu_ps (src + x + 4);
__m128i src1_int128 = _mm_cvtps_epi32 (src128);
-
+
src1_int128 = _mm_packs_epi32(src_int128, src1_int128);
_mm_storeu_si128((__m128i*)(dst + x),src1_int128);
}
{
sstep /= sizeof(src[0]);
dstep /= sizeof(dst[0]);
-
+
for( ; size.height--; src += sstep, dst += dstep )
memcpy(dst, src, size.width*sizeof(src[0]));
}
-
+
#define DEF_CVT_SCALE_ABS_FUNC(suffix, tfunc, stype, dtype, wtype) \
static void cvtScaleAbs##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
dtype* dst, size_t dstep, Size size, double* scale) \
{ \
cvtScale_(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \
}
-
-
+
+
#define DEF_CVT_FUNC(suffix, stype, dtype) \
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
dtype* dst, size_t dstep, Size size, double*) \
{ \
cpy_(src, sstep, dst, dstep, size); \
}
-
-
+
+
DEF_CVT_SCALE_ABS_FUNC(8u, cvtScaleAbs_, uchar, uchar, float);
DEF_CVT_SCALE_ABS_FUNC(8s8u, cvtScaleAbs_, schar, uchar, float);
DEF_CVT_SCALE_ABS_FUNC(16u8u, cvtScaleAbs_, ushort, uchar, float);
DEF_CVT_SCALE_ABS_FUNC(16s8u, cvtScaleAbs_, short, uchar, float);
DEF_CVT_SCALE_ABS_FUNC(32s8u, cvtScaleAbs_, int, uchar, float);
DEF_CVT_SCALE_ABS_FUNC(32f8u, cvtScaleAbs_, float, uchar, float);
-DEF_CVT_SCALE_ABS_FUNC(64f8u, cvtScaleAbs_, double, uchar, float);
+DEF_CVT_SCALE_ABS_FUNC(64f8u, cvtScaleAbs_, double, uchar, float);
DEF_CVT_SCALE_FUNC(8u, uchar, uchar, float);
DEF_CVT_SCALE_FUNC(8s8u, schar, uchar, float);
DEF_CVT_SCALE_FUNC(16s8u, short, uchar, float);
DEF_CVT_SCALE_FUNC(32s8u, int, uchar, float);
DEF_CVT_SCALE_FUNC(32f8u, float, uchar, float);
-DEF_CVT_SCALE_FUNC(64f8u, double, uchar, float);
+DEF_CVT_SCALE_FUNC(64f8u, double, uchar, float);
DEF_CVT_SCALE_FUNC(8u8s, uchar, schar, float);
DEF_CVT_SCALE_FUNC(8s, schar, schar, float);
DEF_CVT_SCALE_FUNC(16s8s, short, schar, float);
DEF_CVT_SCALE_FUNC(32s8s, int, schar, float);
DEF_CVT_SCALE_FUNC(32f8s, float, schar, float);
-DEF_CVT_SCALE_FUNC(64f8s, double, schar, float);
+DEF_CVT_SCALE_FUNC(64f8s, double, schar, float);
DEF_CVT_SCALE_FUNC(8u16u, uchar, ushort, float);
DEF_CVT_SCALE_FUNC(8s16u, schar, ushort, float);
DEF_CVT_SCALE_FUNC(16s16u, short, ushort, float);
DEF_CVT_SCALE_FUNC(32s16u, int, ushort, float);
DEF_CVT_SCALE_FUNC(32f16u, float, ushort, float);
-DEF_CVT_SCALE_FUNC(64f16u, double, ushort, float);
+DEF_CVT_SCALE_FUNC(64f16u, double, ushort, float);
DEF_CVT_SCALE_FUNC(8u16s, uchar, short, float);
DEF_CVT_SCALE_FUNC(8s16s, schar, short, float);
DEF_CVT_SCALE_FUNC(32s16s, int, short, float);
DEF_CVT_SCALE_FUNC(32f16s, float, short, float);
DEF_CVT_SCALE_FUNC(64f16s, double, short, float);
-
+
DEF_CVT_SCALE_FUNC(8u32s, uchar, int, float);
DEF_CVT_SCALE_FUNC(8s32s, schar, int, float);
DEF_CVT_SCALE_FUNC(16u32s, ushort, int, float);
DEF_CVT_FUNC(32s64f, int, double);
DEF_CVT_FUNC(32f64f, float, double);
DEF_CPY_FUNC(64s, int64);
-
+
static BinaryFunc cvtScaleAbsTab[] =
{
(BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u,
0, 0, 0, 0, 0, 0, 0, 0
}
};
-
+
BinaryFunc getConvertFunc(int sdepth, int ddepth)
{
return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
BinaryFunc getConvertScaleFunc(int sdepth, int ddepth)
{
return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
-}
-
}
-
+
+}
+
void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
{
Mat src = _src.getMat();
Mat dst = _dst.getMat();
BinaryFunc func = cvtScaleAbsTab[src.depth()];
CV_Assert( func != 0 );
-
+
if( src.dims <= 2 )
{
Size sz = getContinuousSize(src, dst, cn);
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
Size sz((int)it.size*cn, 1);
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
func( ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale );
}
}
Mat src = *this;
-
+
BinaryFunc func = noScale ? getConvertFunc(sdepth, ddepth) : getConvertScaleFunc(sdepth, ddepth);
double scale[] = {alpha, beta};
int cn = channels();
CV_Assert( func != 0 );
-
+
if( dims <= 2 )
{
_dst.create( size(), _type );
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
Size sz((int)(it.size*cn), 1);
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
func(ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale);
}
static void LUT8u_64f( const uchar* src, const double* lut, double* dst, int len, int cn, int lutcn )
{
LUT8u_( src, lut, dst, len, cn, lutcn );
-}
-
+}
+
typedef void (*LUTFunc)( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn );
-
+
static LUTFunc lutTab[] =
{
(LUTFunc)LUT8u_8u, (LUTFunc)LUT8u_8s, (LUTFunc)LUT8u_16u, (LUTFunc)LUT8u_16s,
};
}
-
+
void cv::LUT( InputArray _src, InputArray _lut, OutputArray _dst, int interpolation )
{
Mat src = _src.getMat(), lut = _lut.getMat();
LUTFunc func = lutTab[lut.depth()];
CV_Assert( func != 0 );
-
+
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
int len = (int)it.size;
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
func(ptrs[0], lut.data, ptrs[1], len, cn, lutcn);
}
int norm_type, int rtype, InputArray _mask )
{
Mat src = _src.getMat(), mask = _mask.getMat();
-
+
double scale = 1, shift = 0;
if( norm_type == CV_MINMAX )
{
}
else
CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
-
+
if( rtype < 0 )
rtype = _dst.fixedType() ? _dst.depth() : src.depth();
-
+
_dst.create(src.dims, src.size, CV_MAKETYPE(rtype, src.channels()));
Mat dst = _dst.getMat();
-
+
if( !mask.data )
src.convertTo( dst, rtype, scale, shift );
else
double scale, double shift )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
-
+
CV_Assert( src.size == dst.size && src.channels() == dst.channels() );
src.convertTo(dst, dst.type(), scale, shift);
}
const T* src = (const T*)_src;
T* dst = (T*)_dst;
int x = 0;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for( ; x <= size.width - 4; x += 4 )
{
if( mask[x] )
}
}
}
-
-
+
+
#define DEF_COPY_MASK(suffix, type) \
static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \
uchar* dst, size_t dstep, Size size, void*) \
{ \
copyMask_<type>(src, sstep, mask, mstep, dst, dstep, size); \
}
-
-
+
+
DEF_COPY_MASK(8u, uchar);
DEF_COPY_MASK(16u, ushort);
DEF_COPY_MASK(8uC3, Vec3b);
DEF_COPY_MASK(32sC4, Vec4i);
DEF_COPY_MASK(32sC6, Vec6i);
DEF_COPY_MASK(32sC8, Vec8i);
-
+
BinaryFunc copyMaskTab[] =
{
0,
0, 0, 0, 0, 0, 0, 0,
copyMask32sC8
};
-
+
BinaryFunc getCopyMaskFunc(size_t esz)
{
return esz <= 32 && copyMaskTab[esz] ? copyMaskTab[esz] : copyMaskGeneric;
convertTo( _dst, dtype );
return;
}
-
+
if( empty() )
{
_dst.release();
return;
}
-
+
if( dims <= 2 )
{
_dst.create( rows, cols, type() );
Mat dst = _dst.getMat();
if( data == dst.data )
return;
-
+
if( rows > 0 && cols > 0 )
{
const uchar* sptr = data;
uchar* dptr = dst.data;
-
+
// to handle the copying 1xn matrix => nx1 std vector.
Size sz = size() == dst.size() ?
getContinuousSize(*this, dst) :
getContinuousSize(*this);
size_t len = sz.width*elemSize();
-
+
for( ; sz.height--; sptr += step, dptr += dst.step )
memcpy( dptr, sptr, len );
}
return;
}
-
+
_dst.create( dims, size, type() );
Mat dst = _dst.getMat();
if( data == dst.data )
return;
-
+
if( total() != 0 )
{
const Mat* arrays[] = { this, &dst };
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs, 2);
- size_t size = it.size*elemSize();
-
+ size_t sz = it.size*elemSize();
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
- memcpy(ptrs[1], ptrs[0], size);
+ memcpy(ptrs[1], ptrs[0], sz);
}
}
copyTo(_dst);
return;
}
-
+
int cn = channels(), mcn = mask.channels();
CV_Assert( mask.depth() == CV_8U && (mcn == 1 || mcn == cn) );
bool colorMask = mcn > 1;
-
+
size_t esz = colorMask ? elemSize1() : elemSize();
BinaryFunc copymask = getCopyMaskFunc(esz);
-
+
uchar* data0 = _dst.getMat().data;
_dst.create( dims, size, type() );
Mat dst = _dst.getMat();
-
+
if( dst.data != data0 ) // do not leave dst uninitialized
dst = Scalar(0);
-
+
if( dims <= 2 )
{
Size sz = getContinuousSize(*this, dst, mask, mcn);
copymask(data, step, mask.data, mask.step, dst.data, dst.step, sz, &esz);
return;
}
-
+
const Mat* arrays[] = { this, &dst, &mask, 0 };
uchar* ptrs[3];
NAryMatIterator it(arrays, ptrs);
Size sz((int)(it.size*mcn), 1);
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
copymask(ptrs[0], 0, ptrs[2], 0, ptrs[1], 0, sz, &esz);
}
Mat& Mat::operator = (const Scalar& s)
{
const Mat* arrays[] = { this };
- uchar* ptr;
- NAryMatIterator it(arrays, &ptr, 1);
- size_t size = it.size*elemSize();
-
+ uchar* dptr;
+ NAryMatIterator it(arrays, &dptr, 1);
+ size_t elsize = it.size*elemSize();
+
if( s[0] == 0 && s[1] == 0 && s[2] == 0 && s[3] == 0 )
{
for( size_t i = 0; i < it.nplanes; i++, ++it )
- memset( ptr, 0, size );
+ memset( dptr, 0, elsize );
}
else
{
double scalar[12];
scalarToRawData(s, scalar, type(), 12);
size_t blockSize = 12*elemSize1();
-
- for( size_t j = 0; j < size; j += blockSize )
+
+ for( size_t j = 0; j < elsize; j += blockSize )
{
- size_t sz = MIN(blockSize, size - j);
- memcpy( ptr + j, scalar, sz );
+ size_t sz = MIN(blockSize, elsize - j);
+ memcpy( dptr + j, scalar, sz );
}
}
-
+
for( size_t i = 1; i < it.nplanes; i++ )
{
++it;
- memcpy( ptr, data, size );
+ memcpy( dptr, data, elsize );
}
}
return *this;
}
-
+
Mat& Mat::setTo(InputArray _value, InputArray _mask)
{
if( !data )
return *this;
-
+
Mat value = _value.getMat(), mask = _mask.getMat();
-
+
CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::MAT ));
CV_Assert( mask.empty() || mask.type() == CV_8U );
-
+
size_t esz = elemSize();
BinaryFunc copymask = getCopyMaskFunc(esz);
-
+
const Mat* arrays[] = { this, !mask.empty() ? &mask : 0, 0 };
uchar* ptrs[2]={0,0};
NAryMatIterator it(arrays, ptrs);
- int total = (int)it.size, blockSize0 = std::min(total, (int)((BLOCK_SIZE + esz-1)/esz));
+ int totalsz = (int)it.size, blockSize0 = std::min(totalsz, (int)((BLOCK_SIZE + esz-1)/esz));
AutoBuffer<uchar> _scbuf(blockSize0*esz + 32);
uchar* scbuf = alignPtr((uchar*)_scbuf, (int)sizeof(double));
convertAndUnrollScalar( value, type(), scbuf, blockSize0 );
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
- for( int j = 0; j < total; j += blockSize0 )
+ for( int j = 0; j < totalsz; j += blockSize0 )
{
- Size sz(std::min(blockSize0, total - j), 1);
+ Size sz(std::min(blockSize0, totalsz - j), 1);
size_t blockSize = sz.width*esz;
if( ptrs[1] )
{
int i, j, limit = (int)(((size.width + 1)/2)*esz);
AutoBuffer<int> _tab(size.width*esz);
int* tab = _tab;
-
+
for( i = 0; i < size.width; i++ )
for( size_t k = 0; k < esz; k++ )
tab[i*esz + k] = (int)((size.width - i - 1)*esz + k);
void flip( InputArray _src, OutputArray _dst, int flip_mode )
{
Mat src = _src.getMat();
-
+
CV_Assert( src.dims <= 2 );
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
flipVert( src.data, src.step, dst.data, dst.step, src.size(), esz );
else
flipHoriz( src.data, src.step, dst.data, dst.step, src.size(), esz );
-
+
if( flip_mode < 0 )
flipHoriz( dst.data, dst.step, dst.data, dst.step, dst.size(), esz );
}
{
Mat src = _src.getMat();
CV_Assert( src.dims <= 2 );
-
+
_dst.create(src.rows*ny, src.cols*nx, src.type());
Mat dst = _dst.getMat();
Size ssize = src.size(), dsize = dst.size();
}
cv::Mat src = cv::cvarrToMat(srcarr, false, true, 1), dst = cv::cvarrToMat(dstarr, false, true, 1);
CV_Assert( src.depth() == dst.depth() && src.size == dst.size );
-
+
int coi1 = 0, coi2 = 0;
if( CV_IS_IMAGE(srcarr) )
coi1 = cvGetImageCOI((const IplImage*)srcarr);
if( CV_IS_IMAGE(dstarr) )
coi2 = cvGetImageCOI((const IplImage*)dstarr);
-
+
if( coi1 || coi2 )
{
CV_Assert( (coi1 != 0 || src.channels() == 1) &&
(coi2 != 0 || dst.channels() == 1) );
-
+
int pair[] = { std::max(coi1-1, 0), std::max(coi2-1, 0) };
cv::mixChannels( &src, 1, &dst, 1, pair, 1 );
return;
}
else
CV_Assert( src.channels() == dst.channels() );
-
+
if( !maskarr )
src.copyTo(dst);
else
{
cv::Mat src = cv::cvarrToMat(srcarr);
cv::Mat dst;
-
+
if (!dstarr)
dst = src;
else
dst = cv::cvarrToMat(dstarr);
-
+
CV_Assert( src.type() == dst.type() && src.size() == dst.size() );
cv::flip( src, dst, flip_mode );
}
}
}
-
+
return allseq;
}
// both cv (CvFeatureTree) and ml (kNN).
// The algorithm is taken from:
-// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
-// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
-// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
+// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
+// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
+// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
const int MAX_TREE_DEPTH = 32;
maxDepth = -1;
normType = NORM_L2;
build(_points, _labels, _copyData);
-}
-
+}
+
struct SubTree
{
SubTree() : first(0), last(0), nodeIdx(0), depth(0) {}
else
a = i0;
}
-
+
float pivot = vals[ofs[middle]];
int less = 0, more = 0;
for( k = a0; k < middle; k++ )
}
}
-
+
void KDTree::build(InputArray _points, bool _copyData)
{
build(_points, noArray(), _copyData);
points.release();
points.create(_points.size(), _points.type());
}
-
- int i, j, n = _points.rows, dims = _points.cols, top = 0;
+
+ int i, j, n = _points.rows, ptdims = _points.cols, top = 0;
const float* data = _points.ptr<float>(0);
float* dstdata = points.ptr<float>(0);
size_t step = _points.step1();
int ptpos = 0;
labels.resize(n);
const int* _labels_data = 0;
-
+
if( !_labels.empty() )
{
int nlabels = _labels.checkVector(1, CV_32S, true);
_labels_data = (const int*)_labels.data;
}
- Mat sumstack(MAX_TREE_DEPTH*2, dims*2, CV_64F);
+ Mat sumstack(MAX_TREE_DEPTH*2, ptdims*2, CV_64F);
SubTree stack[MAX_TREE_DEPTH*2];
-
+
vector<size_t> _ptofs(n);
size_t* ptofs = &_ptofs[0];
computeSums(points, ptofs, 0, n-1, sumstack.ptr<double>(top));
stack[top++] = SubTree(0, n-1, 0, 0);
int _maxDepth = 0;
-
+
while( --top >= 0 )
{
int first = stack[top].first, last = stack[top].last;
{
const float* src = data + ptofs[first];
float* dst = dstdata + idx*dstep;
- for( j = 0; j < dims; j++ )
+ for( j = 0; j < ptdims; j++ )
dst[j] = src[j];
}
- labels[idx] = _labels_data ? _labels_data[idx0] : idx0;
+ labels[idx] = _labels_data ? _labels_data[idx0] : idx0;
_maxDepth = std::max(_maxDepth, depth);
continue;
}
// find the dimensionality with the biggest variance
- for( j = 0; j < dims; j++ )
+ for( j = 0; j < ptdims; j++ )
{
double m = sums[j*2]*invCount;
double varj = sums[j*2+1]*invCount - m*m;
nodes[nidx].boundary = medianPartition(ptofs, first, last, data + dim);
int middle = (first + last)/2;
- double *lsums = (double*)sums, *rsums = lsums + dims*2;
+ double *lsums = (double*)sums, *rsums = lsums + ptdims*2;
computeSums(points, ptofs, middle+1, last, rsums);
- for( j = 0; j < dims*2; j++ )
+ for( j = 0; j < ptdims*2; j++ )
lsums[j] = sums[j] - rsums[j];
stack[top++] = SubTree(first, middle, left, depth+1);
stack[top++] = SubTree(middle+1, last, right, depth+1);
int KDTree::findNearest(InputArray _vec, int K, int emax,
OutputArray _neighborsIdx, OutputArray _neighbors,
OutputArray _dist, OutputArray _labels) const
-
+
{
Mat vecmat = _vec.getMat();
CV_Assert( vecmat.isContinuous() && vecmat.type() == CV_32F && vecmat.total() == (size_t)points.cols );
const float* vec = vecmat.ptr<float>();
K = std::min(K, points.rows);
- int dims = points.cols;
+ int ptdims = points.cols;
CV_Assert(K > 0 && (normType == NORM_L2 || normType == NORM_L1));
{
float d, alt_d = 0.f;
int nidx;
-
+
if( e == 0 )
nidx = 0;
else
i = left;
}
}
-
+
if( ncount == K && alt_d > dist[ncount-1] )
continue;
}
if( nidx < 0 )
break;
const Node& n = nodes[nidx];
-
+
if( n.idx < 0 )
{
i = ~n.idx;
const float* row = points.ptr<float>(i);
if( normType == NORM_L2 )
- for( j = 0, d = 0.f; j < dims; j++ )
+ for( j = 0, d = 0.f; j < ptdims; j++ )
{
float t = vec[j] - row[j];
d += t*t;
}
else
- for( j = 0, d = 0.f; j < dims; j++ )
+ for( j = 0, d = 0.f; j < ptdims; j++ )
d += std::abs(vec[j] - row[j]);
-
+
dist[ncount] = d;
idx[ncount] = i;
for( i = ncount-1; i >= 0; i-- )
}
ncount += ncount < K;
e++;
- break;
+ break;
}
-
+
int alt;
if( vec[n.idx] <= n.boundary )
{
nidx = n.right;
alt = n.left;
}
-
+
d = vec[n.idx] - n.boundary;
if( normType == NORM_L2 )
d = d*d + alt_d;
OutputArray _neighbors,
OutputArray _labels ) const
{
- int dims = points.cols;
+ int ptdims = points.cols;
Mat lowerBound = _lowerBound.getMat(), upperBound = _upperBound.getMat();
CV_Assert( lowerBound.size == upperBound.size &&
lowerBound.isContinuous() &&
upperBound.isContinuous() &&
lowerBound.type() == upperBound.type() &&
lowerBound.type() == CV_32F &&
- lowerBound.total() == (size_t)dims );
+ lowerBound.total() == (size_t)ptdims );
const float* L = lowerBound.ptr<float>();
const float* R = upperBound.ptr<float>();
-
+
vector<int> idx;
AutoBuffer<int> _stack(MAX_TREE_DEPTH*2 + 1);
int* stack = _stack;
int top = 0;
-
+
stack[top++] = 0;
while( --top >= 0 )
{
int j, i = ~n.idx;
const float* row = points.ptr<float>(i);
- for( j = 0; j < dims; j++ )
+ for( j = 0; j < ptdims; j++ )
if( row[j] < L[j] || row[j] >= R[j] )
break;
- if( j == dims )
+ if( j == ptdims )
idx.push_back(i);
continue;
}
getPoints( idx, _neighbors, _labels );
}
-
+
void KDTree::getPoints(InputArray _idx, OutputArray _pts, OutputArray _labels) const
{
Mat idxmat = _idx.getMat(), pts, labelsmat;
(idxmat.cols == 1 || idxmat.rows == 1) );
const int* idx = idxmat.ptr<int>();
int* dstlabels = 0;
-
- int dims = points.cols;
+
+ int ptdims = points.cols;
int i, nidx = (int)idxmat.total();
if( nidx == 0 )
{
_labels.release();
return;
}
-
+
if( _pts.needed() )
{
- _pts.create( nidx, dims, points.type());
+ _pts.create( nidx, ptdims, points.type());
pts = _pts.getMat();
}
-
+
if(_labels.needed())
{
_labels.create(nidx, 1, CV_32S, -1, true);
dstlabels = labelsmat.ptr<int>();
}
const int* srclabels = !labels.empty() ? &labels[0] : 0;
-
+
for( i = 0; i < nidx; i++ )
{
int k = idx[i];
CV_Assert( (unsigned)k < (unsigned)points.rows );
const float* src = points.ptr<float>(k);
if( pts.data )
- std::copy(src, src + dims, pts.ptr<float>(i));
+ std::copy(src, src + ptdims, pts.ptr<float>(i));
if( dstlabels )
dstlabels[i] = srclabels ? srclabels[k] : k;
}
{
return !points.empty() ? points.cols : 0;
}
-
+
////////////////////////////////////////////////////////////////////////////////
-
+
schar* seqPush( CvSeq* seq, const void* element )
{
return cvSeqPush(seq, element);
}
int bt_pix0 = (int)img.elemSize(), bt_pix = bt_pix0;
- size_t step = img.step;
+ size_t istep = img.step;
int dx = pt2.x - pt1.x;
int dy = pt2.y - pt1.y;
bt_pix = (bt_pix ^ s) - s;
}
- ptr = (uchar*)(img.data + pt1.y * step + pt1.x * bt_pix0);
+ ptr = (uchar*)(img.data + pt1.y * istep + pt1.x * bt_pix0);
s = dy < 0 ? -1 : 0;
dy = (dy ^ s) - s;
- step = (step ^ s) - s;
+ istep = (istep ^ s) - s;
s = dy > dx ? -1 : 0;
dy ^= dx & s;
dx ^= dy & s;
- bt_pix ^= step & s;
- step ^= bt_pix & s;
- bt_pix ^= step & s;
+ bt_pix ^= istep & s;
+ istep ^= bt_pix & s;
+ bt_pix ^= istep & s;
if( connectivity == 8 )
{
err = dx - (dy + dy);
plusDelta = dx + dx;
minusDelta = -(dy + dy);
- plusStep = (int)step;
+ plusStep = (int)istep;
minusStep = bt_pix;
count = dx + 1;
}
err = 0;
plusDelta = (dx + dx) + (dy + dy);
minusDelta = -(dy + dy);
- plusStep = (int)step - bt_pix;
+ plusStep = (int)istep - bt_pix;
minusStep = bt_pix;
count = dx + dy + 1;
}
// On Win64 optimized versions of DFT and DCT fail the tests (fixed in VS2010)
#if defined _MSC_VER && !defined CV_ICC && defined _M_X64 && _MSC_VER < 1600
-#pragma optimize("", off)
-#pragma warning( disable : 4748 )
+# pragma optimize("", off)
+# pragma warning(disable: 4748)
#endif
/****************************************************************************************\
dataend += step * (rows - 1) + minstep;\r
}\r
\r
-cv::gpu::GpuMat::GpuMat(const GpuMat& m, Range rowRange, Range colRange)\r
+cv::gpu::GpuMat::GpuMat(const GpuMat& m, Range _rowRange, Range _colRange)\r
{\r
flags = m.flags;\r
step = m.step; refcount = m.refcount;\r
data = m.data; datastart = m.datastart; dataend = m.dataend;\r
\r
- if (rowRange == Range::all())\r
+ if (_rowRange == Range::all())\r
rows = m.rows;\r
else\r
{\r
- CV_Assert(0 <= rowRange.start && rowRange.start <= rowRange.end && rowRange.end <= m.rows);\r
+ CV_Assert(0 <= _rowRange.start && _rowRange.start <= _rowRange.end && _rowRange.end <= m.rows);\r
\r
- rows = rowRange.size();\r
- data += step*rowRange.start;\r
+ rows = _rowRange.size();\r
+ data += step*_rowRange.start;\r
}\r
\r
- if (colRange == Range::all())\r
+ if (_colRange == Range::all())\r
cols = m.cols;\r
else\r
{\r
- CV_Assert(0 <= colRange.start && colRange.start <= colRange.end && colRange.end <= m.cols);\r
+ CV_Assert(0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols);\r
\r
- cols = colRange.size();\r
- data += colRange.start*elemSize();\r
+ cols = _colRange.size();\r
+ data += _colRange.start*elemSize();\r
flags &= cols < m.cols ? ~Mat::CONTINUOUS_FLAG : -1;\r
}\r
\r
static const float atan2_p3 = -0.3258083974640975f*(float)(180/CV_PI);
static const float atan2_p5 = 0.1555786518463281f*(float)(180/CV_PI);
static const float atan2_p7 = -0.04432655554792128f*(float)(180/CV_PI);
-
+
float fastAtan2( float y, float x )
{
float ax = std::abs(x), ay = std::abs(y);
a = _mm_mul_ps(_mm_add_ps(a, p5), c2);
a = _mm_mul_ps(_mm_add_ps(a, p3), c2);
a = _mm_mul_ps(_mm_add_ps(a, p1), c);
-
+
__m128 b = _mm_sub_ps(_90, a);
a = _mm_xor_ps(a, _mm_and_ps(_mm_xor_ps(a, b), mask));
-
+
b = _mm_sub_ps(_180, a);
mask = _mm_cmplt_ps(x, z);
a = _mm_xor_ps(a, _mm_and_ps(_mm_xor_ps(a, b), mask));
-
+
b = _mm_sub_ps(_360, a);
mask = _mm_cmplt_ps(y, z);
a = _mm_xor_ps(a, _mm_and_ps(_mm_xor_ps(a, b), mask));
-
+
a = _mm_mul_ps(a, scale4);
_mm_storeu_ps(angle + i, a);
}
static void Magnitude_32f(const float* x, const float* y, float* mag, int len)
{
int i = 0;
-
+
#if CV_SSE
if( USE_SSE2 )
{
static void Magnitude_64f(const double* x, const double* y, double* mag, int len)
{
int i = 0;
-
-#if CV_SSE2
+
+#if CV_SSE2
if( USE_SSE2 )
{
for( ; i <= len - 4; i += 4 )
}
}
#endif
-
+
for( ; i < len; i++ )
{
double x0 = x[i], y0 = y[i];
}
}
-
+
static void InvSqrt_32f(const float* src, float* dst, int len)
{
int i = 0;
-
-#if CV_SSE
+
+#if CV_SSE
if( USE_SSE2 )
- {
+ {
__m128 _0_5 = _mm_set1_ps(0.5f), _1_5 = _mm_set1_ps(1.5f);
if( (((size_t)src|(size_t)dst) & 15) == 0 )
for( ; i <= len - 8; i += 8 )
}
}
#endif
-
+
for( ; i < len; i++ )
dst[i] = 1/std::sqrt(src[i]);
}
-
+
static void InvSqrt_64f(const double* src, double* dst, int len)
{
for( int i = 0; i < len; i++ )
dst[i] = 1/std::sqrt(src[i]);
-}
-
-
+}
+
+
static void Sqrt_32f(const float* src, float* dst, int len)
{
int i = 0;
-
-#if CV_SSE
+
+#if CV_SSE
if( USE_SSE2 )
{
if( (((size_t)src|(size_t)dst) & 15) == 0 )
_mm_storeu_ps(dst + i, t0); _mm_storeu_ps(dst + i + 4, t1);
}
}
-#endif
-
+#endif
+
for( ; i < len; i++ )
dst[i] = std::sqrt(src[i]);
}
-
+
static void Sqrt_64f(const double* src, double* dst, int len)
{
int i = 0;
-
-#if CV_SSE2
+
+#if CV_SSE2
if( USE_SSE2 )
{
if( (((size_t)src|(size_t)dst) & 15) == 0 )
}
}
#endif
-
+
for( ; i < len; i++ )
dst[i] = std::sqrt(src[i]);
}
CV_Assert( X.size == Y.size && type == Y.type() && (depth == CV_32F || depth == CV_64F));
dst.create(X.dims, X.size, X.type());
Mat Mag = dst.getMat();
-
+
const Mat* arrays[] = {&X, &Y, &Mag, 0};
uchar* ptrs[3];
NAryMatIterator it(arrays, ptrs);
int len = (int)it.size*cn;
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
if( depth == CV_32F )
}
}
-
+
void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegrees )
{
Mat X = src1.getMat(), Y = src2.getMat();
CV_Assert( X.size == Y.size && type == Y.type() && (depth == CV_32F || depth == CV_64F));
dst.create( X.dims, X.size, type );
Mat Angle = dst.getMat();
-
+
const Mat* arrays[] = {&X, &Y, &Angle, 0};
uchar* ptrs[3];
NAryMatIterator it(arrays, ptrs);
float* buf[2] = {0, 0};
int j, k, total = (int)(it.size*cn), blockSize = total;
size_t esz1 = X.elemSize1();
-
+
if( depth == CV_64F )
{
blockSize = std::min(blockSize, ((BLOCK_SIZE+cn-1)/cn)*cn);
buf[0] = _buf;
buf[1] = buf[0] + blockSize;
}
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
buf[0][k] = (float)x[k];
buf[1][k] = (float)y[k];
}
-
+
FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees );
for( k = 0; k < len; k++ )
angle[k] = buf[0][k];
}
}
}
-
-
+
+
void cartToPolar( InputArray src1, InputArray src2,
OutputArray dst1, OutputArray dst2, bool angleInDegrees )
{
dst1.create( X.dims, X.size, type );
dst2.create( X.dims, X.size, type );
Mat Mag = dst1.getMat(), Angle = dst2.getMat();
-
+
const Mat* arrays[] = {&X, &Y, &Mag, &Angle, 0};
uchar* ptrs[4];
NAryMatIterator it(arrays, ptrs);
float* buf[2] = {0, 0};
int j, k, total = (int)(it.size*cn), blockSize = std::min(total, ((BLOCK_SIZE+cn-1)/cn)*cn);
size_t esz1 = X.elemSize1();
-
+
if( depth == CV_64F )
{
_buf.allocate(blockSize*2);
buf[0] = _buf;
buf[1] = buf[0] + blockSize;
}
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
{
const double *x = (const double*)ptrs[0], *y = (const double*)ptrs[1];
double *angle = (double*)ptrs[3];
-
+
Magnitude_64f(x, y, (double*)ptrs[2], len);
for( k = 0; k < len; k++ )
{
buf[0][k] = (float)x[k];
buf[1][k] = (float)y[k];
}
-
+
FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees );
for( k = 0; k < len; k++ )
angle[k] = buf[0][k];
dst1.create( Angle.dims, Angle.size, type );
dst2.create( Angle.dims, Angle.size, type );
Mat X = dst1.getMat(), Y = dst2.getMat();
-
+
const Mat* arrays[] = {&Mag, &Angle, &X, &Y, 0};
uchar* ptrs[4];
NAryMatIterator it(arrays, ptrs);
float* buf[2] = {0, 0};
int j, k, total = (int)(it.size*cn), blockSize = std::min(total, ((BLOCK_SIZE+cn-1)/cn)*cn);
size_t esz1 = Angle.elemSize1();
-
+
if( depth == CV_64F )
{
_buf.allocate(blockSize*2);
buf[0] = _buf;
buf[1] = buf[0] + blockSize;
}
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
{
const float *mag = (const float*)ptrs[0], *angle = (const float*)ptrs[1];
float *x = (float*)ptrs[2], *y = (float*)ptrs[3];
-
+
SinCos_32f( angle, y, x, len, angleInDegrees );
if( mag )
for( k = 0; k < len; k++ )
{
const double *mag = (const double*)ptrs[0], *angle = (const double*)ptrs[1];
double *x = (double*)ptrs[2], *y = (double*)ptrs[3];
-
+
for( k = 0; k < len; k++ )
buf[0][k] = (float)angle[k];
-
+
SinCos_32f( buf[0], buf[1], buf[0], len, angleInDegrees );
if( mag )
for( k = 0; k < len; k++ )
x[k] = buf[0][k]; y[k] = buf[1][k];
}
}
-
+
if( ptrs[0] )
ptrs[0] += len*esz1;
ptrs[1] += len*esz1;
(!defined __APPLE__ && defined __GNUC__ && __GNUC__*100 + __GNUC_MINOR__ < 402)
#undef CV_SSE2
#define CV_SSE2 0
-#endif
-
+#endif
+
static const double exp_prescale = 1.4426950408889634073599246810019 * (1 << EXPTAB_SCALE);
static const double exp_postscale = 1./(1 << EXPTAB_SCALE);
static const double exp_max_val = 3000.*(1 << EXPTAB_SCALE); // log10(DBL_MAX) < 3000
A3 = (float)(.6931471805521448196800669615864773144641 / EXPPOLY_32F_A0),
A2 = (float)(.2402265109513301490103372422686535526573 / EXPPOLY_32F_A0),
A1 = (float)(.5550339366753125211915322047004666939128e-1 / EXPPOLY_32F_A0);
-
+
#undef EXPPOLY
#define EXPPOLY(x) \
(((((x) + A1)*(x) + A2)*(x) + A3)*(x) + A4)
-
+
int i = 0;
const Cv32suf* x = (const Cv32suf*)_x;
Cv32suf buf[4];
static const __m128 postscale4 = _mm_set1_ps((float)exp_postscale);
static const __m128 maxval4 = _mm_set1_ps((float)(exp_max_val/exp_prescale));
static const __m128 minval4 = _mm_set1_ps((float)(-exp_max_val/exp_prescale));
-
+
static const __m128 mA1 = _mm_set1_ps(A1);
static const __m128 mA2 = _mm_set1_ps(A2);
static const __m128 mA3 = _mm_set1_ps(A3);
static const __m128 mA4 = _mm_set1_ps(A4);
bool y_aligned = (size_t)(void*)y % 16 == 0;
-
+
ushort CV_DECL_ALIGNED(16) tab_idx[8];
-
+
for( ; i <= n - 8; i += 8 )
{
__m128 xf0, xf1;
xf0 = _mm_loadu_ps(&x[i].f);
xf1 = _mm_loadu_ps(&x[i+4].f);
__m128i xi0, xi1, xi2, xi3;
-
+
xf0 = _mm_min_ps(_mm_max_ps(xf0, minval4), maxval4);
xf1 = _mm_min_ps(_mm_max_ps(xf1, minval4), maxval4);
-
+
__m128d xd0 = _mm_cvtps_pd(xf0);
__m128d xd2 = _mm_cvtps_pd(_mm_movehl_ps(xf0, xf0));
__m128d xd1 = _mm_cvtps_pd(xf1);
__m128d xd3 = _mm_cvtps_pd(_mm_movehl_ps(xf1, xf1));
-
+
xd0 = _mm_mul_pd(xd0, prescale2);
xd2 = _mm_mul_pd(xd2, prescale2);
xd1 = _mm_mul_pd(xd1, prescale2);
xd3 = _mm_mul_pd(xd3, prescale2);
-
+
xi0 = _mm_cvtpd_epi32(xd0);
xi2 = _mm_cvtpd_epi32(xd2);
-
+
xi1 = _mm_cvtpd_epi32(xd1);
xi3 = _mm_cvtpd_epi32(xd3);
-
+
xd0 = _mm_sub_pd(xd0, _mm_cvtepi32_pd(xi0));
xd2 = _mm_sub_pd(xd2, _mm_cvtepi32_pd(xi2));
xd1 = _mm_sub_pd(xd1, _mm_cvtepi32_pd(xi1));
xd3 = _mm_sub_pd(xd3, _mm_cvtepi32_pd(xi3));
-
+
xf0 = _mm_movelh_ps(_mm_cvtpd_ps(xd0), _mm_cvtpd_ps(xd2));
xf1 = _mm_movelh_ps(_mm_cvtpd_ps(xd1), _mm_cvtpd_ps(xd3));
-
+
xf0 = _mm_mul_ps(xf0, postscale4);
xf1 = _mm_mul_ps(xf1, postscale4);
xi0 = _mm_unpacklo_epi64(xi0, xi2);
xi1 = _mm_unpacklo_epi64(xi1, xi3);
xi0 = _mm_packs_epi32(xi0, xi1);
-
+
_mm_store_si128((__m128i*)tab_idx, _mm_and_si128(xi0, _mm_set1_epi16(EXPTAB_MASK)));
-
+
xi0 = _mm_add_epi16(_mm_srai_epi16(xi0, EXPTAB_SCALE), _mm_set1_epi16(127));
xi0 = _mm_max_epi16(xi0, _mm_setzero_si128());
xi0 = _mm_min_epi16(xi0, _mm_set1_epi16(255));
xi1 = _mm_unpackhi_epi16(xi0, _mm_setzero_si128());
xi0 = _mm_unpacklo_epi16(xi0, _mm_setzero_si128());
-
+
__m128d yd0 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[0]), _mm_load_sd(expTab + tab_idx[1]));
__m128d yd1 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[2]), _mm_load_sd(expTab + tab_idx[3]));
__m128d yd2 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[4]), _mm_load_sd(expTab + tab_idx[5]));
__m128d yd3 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[6]), _mm_load_sd(expTab + tab_idx[7]));
-
+
__m128 yf0 = _mm_movelh_ps(_mm_cvtpd_ps(yd0), _mm_cvtpd_ps(yd1));
__m128 yf1 = _mm_movelh_ps(_mm_cvtpd_ps(yd2), _mm_cvtpd_ps(yd3));
yf0 = _mm_mul_ps(yf0, _mm_castsi128_ps(_mm_slli_epi32(xi0, 23)));
yf1 = _mm_mul_ps(yf1, _mm_castsi128_ps(_mm_slli_epi32(xi1, 23)));
-
+
__m128 zf0 = _mm_add_ps(xf0, mA1);
__m128 zf1 = _mm_add_ps(xf1, mA1);
-
+
zf0 = _mm_add_ps(_mm_mul_ps(zf0, xf0), mA2);
zf1 = _mm_add_ps(_mm_mul_ps(zf1, xf1), mA2);
-
+
zf0 = _mm_add_ps(_mm_mul_ps(zf0, xf0), mA3);
zf1 = _mm_add_ps(_mm_mul_ps(zf1, xf1), mA3);
-
+
zf0 = _mm_add_ps(_mm_mul_ps(zf0, xf0), mA4);
zf1 = _mm_add_ps(_mm_mul_ps(zf1, xf1), mA4);
-
+
zf0 = _mm_mul_ps(zf0, yf0);
zf1 = _mm_mul_ps(zf1, yf1);
-
+
if( y_aligned )
{
_mm_store_ps(y + i, zf0);
double x2 = x[i + 2].f * exp_prescale;
double x3 = x[i + 3].f * exp_prescale;
int val0, val1, val2, val3, t;
-
+
if( ((x[i].i >> 23) & 255) > 127 + 10 )
x0 = x[i].i < 0 ? -exp_max_val : exp_max_val;
-
+
if( ((x[i+1].i >> 23) & 255) > 127 + 10 )
x1 = x[i+1].i < 0 ? -exp_max_val : exp_max_val;
-
+
if( ((x[i+2].i >> 23) & 255) > 127 + 10 )
x2 = x[i+2].i < 0 ? -exp_max_val : exp_max_val;
-
+
if( ((x[i+3].i >> 23) & 255) > 127 + 10 )
x3 = x[i+3].i < 0 ? -exp_max_val : exp_max_val;
-
+
val0 = cvRound(x0);
val1 = cvRound(x1);
val2 = cvRound(x2);
val3 = cvRound(x3);
-
+
x0 = (x0 - val0)*exp_postscale;
x1 = (x1 - val1)*exp_postscale;
x2 = (x2 - val2)*exp_postscale;
x3 = (x3 - val3)*exp_postscale;
-
+
t = (val0 >> EXPTAB_SCALE) + 127;
t = !(t & ~255) ? t : t < 0 ? 0 : 255;
buf[0].i = t << 23;
-
+
t = (val1 >> EXPTAB_SCALE) + 127;
t = !(t & ~255) ? t : t < 0 ? 0 : 255;
buf[1].i = t << 23;
-
+
t = (val2 >> EXPTAB_SCALE) + 127;
t = !(t & ~255) ? t : t < 0 ? 0 : 255;
buf[2].i = t << 23;
-
+
t = (val3 >> EXPTAB_SCALE) + 127;
t = !(t & ~255) ? t : t < 0 ? 0 : 255;
buf[3].i = t << 23;
-
+
x0 = buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY( x0 );
x1 = buf[1].f * expTab[val1 & EXPTAB_MASK] * EXPPOLY( x1 );
-
+
y[i] = (float)x0;
y[i + 1] = (float)x1;
-
+
x2 = buf[2].f * expTab[val2 & EXPTAB_MASK] * EXPPOLY( x2 );
x3 = buf[3].f * expTab[val3 & EXPTAB_MASK] * EXPPOLY( x3 );
-
+
y[i + 2] = (float)x2;
y[i + 3] = (float)x3;
}
-
+
for( ; i < n; i++ )
{
double x0 = x[i].f * exp_prescale;
int val0, t;
-
+
if( ((x[i].i >> 23) & 255) > 127 + 10 )
x0 = x[i].i < 0 ? -exp_max_val : exp_max_val;
-
+
val0 = cvRound(x0);
t = (val0 >> EXPTAB_SCALE) + 127;
t = !(t & ~255) ? t : t < 0 ? 0 : 255;
-
+
buf[0].i = t << 23;
x0 = (x0 - val0)*exp_postscale;
-
+
y[i] = (float)(buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY(x0));
}
}
-
+
static void Exp_64f( const double *_x, double *y, int n )
{
A2 = .55504108793649567998466049042729e-1 / EXPPOLY_32F_A0,
A1 = .96180973140732918010002372686186e-2 / EXPPOLY_32F_A0,
A0 = .13369713757180123244806654839424e-2 / EXPPOLY_32F_A0;
-
+
#undef EXPPOLY
#define EXPPOLY(x) (((((A0*(x) + A1)*(x) + A2)*(x) + A3)*(x) + A4)*(x) + A5)
-
+
int i = 0;
Cv64suf buf[4];
const Cv64suf* x = (const Cv64suf*)_x;
-
+
#if CV_SSE2
if( USE_SSE2 )
{
static const __m128d postscale2 = _mm_set1_pd(exp_postscale);
static const __m128d maxval2 = _mm_set1_pd(exp_max_val);
static const __m128d minval2 = _mm_set1_pd(-exp_max_val);
-
+
static const __m128d mA0 = _mm_set1_pd(A0);
static const __m128d mA1 = _mm_set1_pd(A1);
static const __m128d mA2 = _mm_set1_pd(A2);
static const __m128d mA3 = _mm_set1_pd(A3);
static const __m128d mA4 = _mm_set1_pd(A4);
static const __m128d mA5 = _mm_set1_pd(A5);
-
+
int CV_DECL_ALIGNED(16) tab_idx[4];
-
+
for( ; i <= n - 4; i += 4 )
{
__m128d xf0 = _mm_loadu_pd(&x[i].f), xf1 = _mm_loadu_pd(&x[i+2].f);
xf1 = _mm_min_pd(_mm_max_pd(xf1, minval2), maxval2);
xf0 = _mm_mul_pd(xf0, prescale2);
xf1 = _mm_mul_pd(xf1, prescale2);
-
+
xi0 = _mm_cvtpd_epi32(xf0);
xi1 = _mm_cvtpd_epi32(xf1);
xf0 = _mm_mul_pd(_mm_sub_pd(xf0, _mm_cvtepi32_pd(xi0)), postscale2);
xf1 = _mm_mul_pd(_mm_sub_pd(xf1, _mm_cvtepi32_pd(xi1)), postscale2);
-
+
xi0 = _mm_unpacklo_epi64(xi0, xi1);
_mm_store_si128((__m128i*)tab_idx, _mm_and_si128(xi0, _mm_set1_epi32(EXPTAB_MASK)));
-
+
xi0 = _mm_add_epi32(_mm_srai_epi32(xi0, EXPTAB_SCALE), _mm_set1_epi32(1023));
xi0 = _mm_packs_epi32(xi0, xi0);
xi0 = _mm_max_epi16(xi0, _mm_setzero_si128());
xi0 = _mm_unpacklo_epi16(xi0, _mm_setzero_si128());
xi1 = _mm_unpackhi_epi32(xi0, _mm_setzero_si128());
xi0 = _mm_unpacklo_epi32(xi0, _mm_setzero_si128());
-
+
__m128d yf0 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[0]), _mm_load_sd(expTab + tab_idx[1]));
__m128d yf1 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[2]), _mm_load_sd(expTab + tab_idx[3]));
yf0 = _mm_mul_pd(yf0, _mm_castsi128_pd(_mm_slli_epi64(xi0, 52)));
yf1 = _mm_mul_pd(yf1, _mm_castsi128_pd(_mm_slli_epi64(xi1, 52)));
-
+
__m128d zf0 = _mm_add_pd(_mm_mul_pd(mA0, xf0), mA1);
__m128d zf1 = _mm_add_pd(_mm_mul_pd(mA0, xf1), mA1);
-
+
zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA2);
zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA2);
-
+
zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA3);
zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA3);
-
+
zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA4);
zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA4);
-
+
zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA5);
zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA5);
-
+
zf0 = _mm_mul_pd(zf0, yf0);
zf1 = _mm_mul_pd(zf1, yf1);
-
+
_mm_storeu_pd(y + i, zf0);
_mm_storeu_pd(y + i + 2, zf1);
}
double x1 = x[i + 1].f * exp_prescale;
double x2 = x[i + 2].f * exp_prescale;
double x3 = x[i + 3].f * exp_prescale;
-
+
double y0, y1, y2, y3;
int val0, val1, val2, val3, t;
-
+
t = (int)(x[i].i >> 52);
if( (t & 2047) > 1023 + 10 )
x0 = t < 0 ? -exp_max_val : exp_max_val;
-
+
t = (int)(x[i+1].i >> 52);
if( (t & 2047) > 1023 + 10 )
x1 = t < 0 ? -exp_max_val : exp_max_val;
-
+
t = (int)(x[i+2].i >> 52);
if( (t & 2047) > 1023 + 10 )
x2 = t < 0 ? -exp_max_val : exp_max_val;
-
+
t = (int)(x[i+3].i >> 52);
if( (t & 2047) > 1023 + 10 )
x3 = t < 0 ? -exp_max_val : exp_max_val;
-
+
val0 = cvRound(x0);
val1 = cvRound(x1);
val2 = cvRound(x2);
val3 = cvRound(x3);
-
+
x0 = (x0 - val0)*exp_postscale;
x1 = (x1 - val1)*exp_postscale;
x2 = (x2 - val2)*exp_postscale;
x3 = (x3 - val3)*exp_postscale;
-
+
t = (val0 >> EXPTAB_SCALE) + 1023;
t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
buf[0].i = (int64)t << 52;
-
+
t = (val1 >> EXPTAB_SCALE) + 1023;
t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
buf[1].i = (int64)t << 52;
-
+
t = (val2 >> EXPTAB_SCALE) + 1023;
t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
buf[2].i = (int64)t << 52;
-
+
t = (val3 >> EXPTAB_SCALE) + 1023;
t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
buf[3].i = (int64)t << 52;
-
+
y0 = buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY( x0 );
y1 = buf[1].f * expTab[val1 & EXPTAB_MASK] * EXPPOLY( x1 );
-
+
y[i] = y0;
y[i + 1] = y1;
-
+
y2 = buf[2].f * expTab[val2 & EXPTAB_MASK] * EXPPOLY( x2 );
y3 = buf[3].f * expTab[val3 & EXPTAB_MASK] * EXPPOLY( x3 );
-
+
y[i + 2] = y2;
y[i + 3] = y3;
}
-
+
for( ; i < n; i++ )
{
double x0 = x[i].f * exp_prescale;
int val0, t;
-
+
t = (int)(x[i].i >> 52);
if( (t & 2047) > 1023 + 10 )
x0 = t < 0 ? -exp_max_val : exp_max_val;
-
+
val0 = cvRound(x0);
t = (val0 >> EXPTAB_SCALE) + 1023;
t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
-
+
buf[0].i = (int64)t << 52;
x0 = (x0 - val0)*exp_postscale;
-
+
y[i] = buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY( x0 );
}
}
{
Mat src = _src.getMat();
int type = src.type(), depth = src.depth(), cn = src.channels();
-
+
_dst.create( src.dims, src.size, type );
Mat dst = _dst.getMat();
-
+
CV_Assert( depth == CV_32F || depth == CV_64F );
-
+
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
int len = (int)(it.size*cn);
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
if( depth == CV_32F )
static const __m128d ln2_2 = _mm_set1_pd(ln_2);
static const __m128 _1_4 = _mm_set1_ps(1.f);
static const __m128 shift4 = _mm_set1_ps(-1.f/512);
-
+
static const __m128 mA0 = _mm_set1_ps(A0);
static const __m128 mA1 = _mm_set1_ps(A1);
static const __m128 mA2 = _mm_set1_ps(A2);
-
+
int CV_DECL_ALIGNED(16) idx[4];
-
+
for( ; i <= n - 4; i += 4 )
- {
+ {
__m128i h0 = _mm_loadu_si128((const __m128i*)(x + i));
__m128i yi0 = _mm_sub_epi32(_mm_and_si128(_mm_srli_epi32(h0, 23), _mm_set1_epi32(255)), _mm_set1_epi32(127));
__m128d yd0 = _mm_mul_pd(_mm_cvtepi32_pd(yi0), ln2_2);
__m128d yd1 = _mm_mul_pd(_mm_cvtepi32_pd(_mm_unpackhi_epi64(yi0,yi0)), ln2_2);
-
+
__m128i xi0 = _mm_or_si128(_mm_and_si128(h0, _mm_set1_epi32(LOGTAB_MASK2_32F)), _mm_set1_epi32(127 << 23));
-
+
h0 = _mm_and_si128(_mm_srli_epi32(h0, 23 - LOGTAB_SCALE - 1), _mm_set1_epi32(LOGTAB_MASK*2));
_mm_store_si128((__m128i*)idx, h0);
h0 = _mm_cmpeq_epi32(h0, _mm_set1_epi32(510));
-
+
__m128d t0, t1, t2, t3, t4;
t0 = _mm_load_pd(icvLogTab + idx[0]);
t2 = _mm_load_pd(icvLogTab + idx[1]);
t4 = _mm_load_pd(icvLogTab + idx[3]);
t3 = _mm_unpackhi_pd(t2, t4);
t2 = _mm_unpacklo_pd(t2, t4);
-
+
yd0 = _mm_add_pd(yd0, t0);
yd1 = _mm_add_pd(yd1, t2);
-
+
__m128 yf0 = _mm_movelh_ps(_mm_cvtpd_ps(yd0), _mm_cvtpd_ps(yd1));
-
+
__m128 xf0 = _mm_sub_ps(_mm_castsi128_ps(xi0), _1_4);
xf0 = _mm_mul_ps(xf0, _mm_movelh_ps(_mm_cvtpd_ps(t1), _mm_cvtpd_ps(t3)));
xf0 = _mm_add_ps(xf0, _mm_and_ps(_mm_castsi128_ps(h0), shift4));
-
+
__m128 zf0 = _mm_mul_ps(xf0, mA0);
zf0 = _mm_mul_ps(_mm_add_ps(zf0, mA1), xf0);
zf0 = _mm_mul_ps(_mm_add_ps(zf0, mA2), xf0);
yf0 = _mm_add_ps(yf0, zf0);
-
+
_mm_storeu_ps(y + i, yf0);
}
}
static const __m128d ln2_2 = _mm_set1_pd(ln_2);
static const __m128d _1_2 = _mm_set1_pd(1.);
static const __m128d shift2 = _mm_set1_pd(-1./512);
-
+
static const __m128i log_and_mask2 = _mm_set_epi32(LOGTAB_MASK2, 0xffffffff, LOGTAB_MASK2, 0xffffffff);
static const __m128i log_or_mask2 = _mm_set_epi32(1023 << 20, 0, 1023 << 20, 0);
-
+
static const __m128d mA0 = _mm_set1_pd(A0);
static const __m128d mA1 = _mm_set1_pd(A1);
static const __m128d mA2 = _mm_set1_pd(A2);
static const __m128d mA5 = _mm_set1_pd(A5);
static const __m128d mA6 = _mm_set1_pd(A6);
static const __m128d mA7 = _mm_set1_pd(A7);
-
+
int CV_DECL_ALIGNED(16) idx[4];
-
+
for( ; i <= n - 4; i += 4 )
{
__m128i h0 = _mm_loadu_si128((const __m128i*)(x + i));
__m128i h1 = _mm_loadu_si128((const __m128i*)(x + i + 2));
-
+
__m128d xd0 = _mm_castsi128_pd(_mm_or_si128(_mm_and_si128(h0, log_and_mask2), log_or_mask2));
__m128d xd1 = _mm_castsi128_pd(_mm_or_si128(_mm_and_si128(h1, log_and_mask2), log_or_mask2));
-
+
h0 = _mm_unpackhi_epi32(_mm_unpacklo_epi32(h0, h1), _mm_unpackhi_epi32(h0, h1));
-
+
__m128i yi0 = _mm_sub_epi32(_mm_and_si128(_mm_srli_epi32(h0, 20),
_mm_set1_epi32(2047)), _mm_set1_epi32(1023));
__m128d yd0 = _mm_mul_pd(_mm_cvtepi32_pd(yi0), ln2_2);
__m128d yd1 = _mm_mul_pd(_mm_cvtepi32_pd(_mm_unpackhi_epi64(yi0, yi0)), ln2_2);
-
+
h0 = _mm_and_si128(_mm_srli_epi32(h0, 20 - LOGTAB_SCALE - 1), _mm_set1_epi32(LOGTAB_MASK * 2));
_mm_store_si128((__m128i*)idx, h0);
h0 = _mm_cmpeq_epi32(h0, _mm_set1_epi32(510));
-
+
__m128d t0, t1, t2, t3, t4;
t0 = _mm_load_pd(icvLogTab + idx[0]);
t2 = _mm_load_pd(icvLogTab + idx[1]);
t4 = _mm_load_pd(icvLogTab + idx[3]);
t3 = _mm_unpackhi_pd(t2, t4);
t2 = _mm_unpacklo_pd(t2, t4);
-
+
yd0 = _mm_add_pd(yd0, t0);
yd1 = _mm_add_pd(yd1, t2);
-
+
xd0 = _mm_mul_pd(_mm_sub_pd(xd0, _1_2), t1);
xd1 = _mm_mul_pd(_mm_sub_pd(xd1, _1_2), t3);
-
+
xd0 = _mm_add_pd(xd0, _mm_and_pd(_mm_castsi128_pd(_mm_unpacklo_epi32(h0, h0)), shift2));
xd1 = _mm_add_pd(xd1, _mm_and_pd(_mm_castsi128_pd(_mm_unpackhi_epi32(h0, h0)), shift2));
-
+
__m128d zd0 = _mm_mul_pd(xd0, mA0);
__m128d zd1 = _mm_mul_pd(xd1, mA0);
zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA1), xd0);
zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA6), xd1);
zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA7), xd0);
zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA7), xd1);
-
+
yd0 = _mm_add_pd(yd0, zd0);
yd1 = _mm_add_pd(yd1, zd1);
-
+
_mm_storeu_pd(y + i, yd0);
_mm_storeu_pd(y + i + 2, yd1);
}
y[i + 2] = y2;
y[i + 3] = y3;
}
-
+
for( ; i < n; i++ )
{
int h0 = X[i].i.hi;
{
Mat src = _src.getMat();
int type = src.type(), depth = src.depth(), cn = src.channels();
-
+
_dst.create( src.dims, src.size, type );
Mat dst = _dst.getMat();
-
+
CV_Assert( depth == CV_32F || depth == CV_64F );
-
+
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
int len = (int)(it.size*cn);
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
if( depth == CV_32F )
else
Log_64f( (const double*)ptrs[0], (double*)ptrs[1], len );
}
-}
+}
/****************************************************************************************\
* P O W E R *
}
}
-
-void iPow8u(const uchar* src, uchar* dst, int len, int power)
+
+static void iPow8u(const uchar* src, uchar* dst, int len, int power)
{
iPow_<uchar, int>(src, dst, len, power);
}
-void iPow8s(const schar* src, schar* dst, int len, int power)
+static void iPow8s(const schar* src, schar* dst, int len, int power)
{
iPow_<schar, int>(src, dst, len, power);
}
-
-void iPow16u(const ushort* src, ushort* dst, int len, int power)
+
+static void iPow16u(const ushort* src, ushort* dst, int len, int power)
{
iPow_<ushort, int>(src, dst, len, power);
}
-void iPow16s(const short* src, short* dst, int len, int power)
+static void iPow16s(const short* src, short* dst, int len, int power)
{
iPow_<short, int>(src, dst, len, power);
}
-
-void iPow32s(const int* src, int* dst, int len, int power)
+
+static void iPow32s(const int* src, int* dst, int len, int power)
{
iPow_<int, int>(src, dst, len, power);
}
-void iPow32f(const float* src, float* dst, int len, int power)
+static void iPow32f(const float* src, float* dst, int len, int power)
{
iPow_<float, float>(src, dst, len, power);
}
-void iPow64f(const double* src, double* dst, int len, int power)
+static void iPow64f(const double* src, double* dst, int len, int power)
{
iPow_<double, double>(src, dst, len, power);
}
-
+
typedef void (*IPowFunc)( const uchar* src, uchar* dst, int len, int power );
-
+
static IPowFunc ipowTab[] =
{
(IPowFunc)iPow8u, (IPowFunc)iPow8s, (IPowFunc)iPow16u, (IPowFunc)iPow16s,
(IPowFunc)iPow32s, (IPowFunc)iPow32f, (IPowFunc)iPow64f, 0
};
-
+
void pow( InputArray _src, double power, OutputArray _dst )
{
Mat src = _src.getMat();
int type = src.type(), depth = src.depth(), cn = src.channels();
-
+
_dst.create( src.dims, src.size, type );
Mat dst = _dst.getMat();
-
+
int ipower = cvRound(power);
bool is_ipower = false;
-
+
if( fabs(ipower - power) < DBL_EPSILON )
{
if( ipower < 0 )
ipower = -ipower;
src = dst;
}
-
+
switch( ipower )
{
case 0:
}
else
CV_Assert( depth == CV_32F || depth == CV_64F );
-
+
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
int len = (int)(it.size*cn);
-
+
if( is_ipower )
{
IPowFunc func = ipowTab[depth];
CV_Assert( func != 0 );
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
func( ptrs[0], ptrs[1], len, ipower );
}
MathFunc func = power < 0 ?
(depth == CV_32F ? (MathFunc)InvSqrt_32f : (MathFunc)InvSqrt_64f) :
(depth == CV_32F ? (MathFunc)Sqrt_32f : (MathFunc)Sqrt_64f);
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
func( ptrs[0], ptrs[1], len );
}
{
int j, k, blockSize = std::min(len, ((BLOCK_SIZE + cn-1)/cn)*cn);
size_t esz1 = src.elemSize1();
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < len; j += blockSize )
{
const float* x = (const float*)ptrs[0];
float* y = (float*)ptrs[1];
-
+
Log_32f(x, y, bsz);
for( k = 0; k < bsz; k++ )
y[k] = (float)(y[k]*power);
{
const double* x = (const double*)ptrs[0];
double* y = (double*)ptrs[1];
-
+
Log_64f(x, y, bsz);
for( k = 0; k < bsz; k++ )
y[k] *= power;
template<int depth>
bool checkIntegerRange(cv::Mat src, Point& bad_pt, int minVal, int maxVal, double& bad_value)
{
- typedef mat_type_assotiations<depth> type_ass;
-
+ typedef mat_type_assotiations<depth> type_ass;
+
if (minVal < type_ass::min_allowable && maxVal > type_ass::max_allowable)
{
return true;
for (int j = 0; j < as_one_channel.rows; ++j)
for (int i = 0; i < as_one_channel.cols; ++i)
- {
+ {
if (as_one_channel.at<typename type_ass::type>(j ,i) < minVal || as_one_channel.at<typename type_ass::type>(j ,i) > maxVal)
- {
- bad_pt.y = j ;
+ {
+ bad_pt.y = j ;
bad_pt.x = i % src.channels();
bad_value = as_one_channel.at<typename type_ass::type>(j ,i);
return false;
}
}
bad_value = 0.0;
-
+
return true;
}
-typedef bool (*check_range_function)(cv::Mat src, Point& bad_pt, int minVal, int maxVal, double& bad_value);
+typedef bool (*check_range_function)(cv::Mat src, Point& bad_pt, int minVal, int maxVal, double& bad_value);
-check_range_function check_range_functions[] =
+check_range_function check_range_functions[] =
{
&checkIntegerRange<CV_8U>,
&checkIntegerRange<CV_8S>,
const Mat* arrays[] = {&src, 0};
Mat planes[1];
NAryMatIterator it(arrays, planes);
-
+
for ( size_t i = 0; i < it.nplanes; i++, ++it )
{
if (!checkRange( it.planes[0], quiet, pt, minVal, maxVal ))
}
return true;
}
-
+
int depth = src.depth();
Point badPt(-1, -1);
double badValue = 0;
return badPt.x < 0;
}
-
+
void patchNaNs( InputOutputArray _a, double _val )
{
Mat a = _a.getMat();
CV_Assert( a.depth() == CV_32F );
-
+
const Mat* arrays[] = {&a, 0};
int* ptrs[1];
NAryMatIterator it(arrays, (uchar**)ptrs);
size_t len = it.size*a.channels();
Cv32suf val;
val.f = (float)_val;
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
int* tptr = ptrs[0];
}
}
-
+
void exp(const float* src, float* dst, int n)
{
Exp_32f(src, dst, n);
}
-
+
void log(const float* src, float* dst, int n)
{
Log_32f(src, dst, n);
}
-
+
void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees)
{
FastAtan2_32f(y, x, dst, n, angleInDegrees);
}
-
+
void magnitude(const float* x, const float* y, float* dst, int n)
{
Magnitude_32f(x, y, dst, n);
const int n0 = 3;
Mat coeffs = _coeffs.getMat();
int ctype = coeffs.type();
-
+
CV_Assert( ctype == CV_32F || ctype == CV_64F );
CV_Assert( (coeffs.size() == Size(n0, 1) ||
coeffs.size() == Size(n0+1, 1) ||
coeffs.size() == Size(1, n0) ||
coeffs.size() == Size(1, n0+1)) );
-
+
_roots.create(n0, 1, ctype, -1, true, DEPTH_MASK_FLT);
Mat roots = _roots.getMat();
-
+
int i = -1, n = 0;
double a0 = 1., a1, a2, a3;
double x0 = 0., x1 = 0., x2 = 0.;
int ncoeffs = coeffs.rows + coeffs.cols - 1;
-
+
if( ctype == CV_32FC1 )
{
if( ncoeffs == 4 )
a0 = coeffs.at<float>(++i);
-
+
a1 = coeffs.at<float>(i+1);
a2 = coeffs.at<float>(i+2);
a3 = coeffs.at<float>(i+3);
{
if( ncoeffs == 4 )
a0 = coeffs.at<double>(++i);
-
+
a1 = coeffs.at<double>(i+1);
a2 = coeffs.at<double>(i+2);
a3 = coeffs.at<double>(i+3);
}
-
+
if( a0 == 0 )
{
if( a1 == 0 )
a1 *= a0;
a2 *= a0;
a3 *= a0;
-
+
double Q = (a1 * a1 - 3 * a2) * (1./9);
double R = (2 * a1 * a1 * a1 - 9 * a1 * a2 + 27 * a3) * (1./54);
double Qcubed = Q * Q * Q;
double d = Qcubed - R * R;
-
+
if( d >= 0 )
{
double theta = acos(R / sqrt(Qcubed));
n = 1;
}
}
-
+
if( roots.type() == CV_32FC1 )
{
roots.at<float>(0) = (float)x0;
roots.at<double>(1) = x1;
roots.at<double>(2) = x2;
}
-
+
return n;
}
Mat coeffs0 = _coeffs0.getMat();
int ctype = _coeffs0.type();
int cdepth = CV_MAT_DEPTH(ctype);
-
+
CV_Assert( CV_MAT_DEPTH(ctype) >= CV_32F && CV_MAT_CN(ctype) <= 2 );
CV_Assert( coeffs0.rows == 1 || coeffs0.cols == 1 );
-
+
int n = coeffs0.cols + coeffs0.rows - 2;
- _roots0.create(n, 1, CV_MAKETYPE(cdepth, 2), -1, true, DEPTH_MASK_FLT);
+ _roots0.create(n, 1, CV_MAKETYPE(cdepth, 2), -1, true, DEPTH_MASK_FLT);
Mat roots0 = _roots0.getMat();
-
+
AutoBuffer<C> buf(n*2+2);
C *coeffs = buf, *roots = coeffs + n + 1;
Mat coeffs1(coeffs0.size(), CV_MAKETYPE(CV_64F, coeffs0.channels()), coeffs0.channels() == 2 ? coeffs : roots);
for( ; size.height--; src += src_step, dst += dst_step )
{
- j=0;
+ j=0;
#if CV_ENABLE_UNROLLED
for( ; j <= size.width - 4; j += 4 )
{
for( k = 0; k < n; k++, b_data += b_step )
{
WT al(a_data[k]);
- j=0;
+ j=0;
#if CV_ENABLE_UNROLLED
for(; j <= m - 4; j += 4 )
{
if( _c_data )
{
c_data = _c_data;
- j=0;
- #if CV_ENABLE_UNROLLED
+ j=0;
+ #if CV_ENABLE_UNROLLED
for(; j <= d_size.width - 4; j += 4, c_data += 4*c_step1 )
{
WT t0 = alpha*d_buf[j];
}
else
{
- j = 0;
- #if CV_ENABLE_UNROLLED
+ j = 0;
+ #if CV_ENABLE_UNROLLED
for( ; j <= d_size.width - 4; j += 4 )
{
WT t0 = alpha*d_buf[j];
d_data[j+2] = T(t0);
d_data[j+3] = T(t1);
}
- #endif
+ #endif
for( ; j < d_size.width; j++ )
d_data[j] = T(alpha*d_buf[j]);
}
alpha, beta, flags);
}
-
+
static void GEMMSingleMul_32fc( const Complexf* a_data, size_t a_step,
const Complexf* b_data, size_t b_step,
const Complexf* c_data, size_t c_step,
GEMMSingleMul<Complexd,Complexd>(a_data, a_step, b_data, b_step, c_data,
c_step, d_data, d_step, a_size, d_size,
alpha, beta, flags);
-}
+}
static void GEMMBlockMul_32f( const float* a_data, size_t a_step,
const float* b_data, size_t b_step,
}
void cv::gemm( InputArray matA, InputArray matB, double alpha,
- InputArray matC, double beta, OutputArray matD, int flags )
+ InputArray matC, double beta, OutputArray _matD, int flags )
{
const int block_lin_size = 128;
const int block_size = block_lin_size * block_lin_size;
((flags&GEMM_3_T) != 0 && C.rows == d_size.width && C.cols == d_size.height)));
}
- matD.create( d_size.height, d_size.width, type );
- Mat D = matD.getMat();
+ _matD.create( d_size.height, d_size.width, type );
+ Mat D = _matD.getMat();
if( (flags & GEMM_3_T) != 0 && C.data == D.data )
{
transpose( C, C );
t1 = src1[i+3]*alpha + src2[i+3];
dst[i+2] = t0; dst[i+3] = t1;
}
- for(; i < len; i++ )
+ for(; i < len; i++ )
dst[i] = src1[i]*alpha + src2[i];
}
}
else
#endif
- //vz why do we need unroll here?
+ //vz why do we need unroll here?
for( ; i <= len - 4; i += 4 )
{
double t0, t1;
t1 = src1[i+3]*alpha + src2[i+3];
dst[i+2] = t0; dst[i+3] = t1;
}
- for(; i < len; i++ )
+ for(; i < len; i++ )
dst[i] = src1[i]*alpha + src2[i];
}
float falpha = (float)alpha;
void* palpha = depth == CV_32F ? (void*)&falpha : (void*)α
- ScaleAddFunc func = depth == CV_32F ? (ScaleAddFunc)scaleAdd_32f : (ScaleAddFunc)scaleAdd_64f;
+ ScaleAddFunc func = depth == CV_32F ? (ScaleAddFunc)scaleAdd_32f : (ScaleAddFunc)scaleAdd_64f;
if( src1.isContinuous() && src2.isContinuous() && dst.isContinuous() )
{
_mean = mean.reshape(1, size.height);
}
-void cv::calcCovarMatrix( InputArray _data, OutputArray _covar, InputOutputArray _mean, int flags, int ctype )
+void cv::calcCovarMatrix( InputArray _src, OutputArray _covar, InputOutputArray _mean, int flags, int ctype )
{
- if(_data.kind() == _InputArray::STD_VECTOR_MAT)
+ if(_src.kind() == _InputArray::STD_VECTOR_MAT)
{
std::vector<cv::Mat> src;
- _data.getMatVector(src);
+ _src.getMatVector(src);
CV_Assert( src.size() > 0 );
return;
}
- Mat data = _data.getMat(), mean;
+ Mat data = _src.getMat(), mean;
CV_Assert( ((flags & CV_COVAR_ROWS) != 0) ^ ((flags & CV_COVAR_COLS) != 0) );
bool takeRows = (flags & CV_COVAR_ROWS) != 0;
int type = data.type();
else
{
ctype = std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), CV_32F);
- reduce( _data, _mean, takeRows ? 0 : 1, CV_REDUCE_AVG, ctype );
+ reduce( _src, _mean, takeRows ? 0 : 1, CV_REDUCE_AVG, ctype );
mean = _mean.getMat();
}
double cv::Mahalanobis( InputArray _v1, InputArray _v2, InputArray _icovar )
{
- Mat v1 = _v1.getMat(), v2 = _v2.getMat(), icovar = _icovar.getMat();
+ Mat v1 = _v1.getMat(), v2 = _v2.getMat(), icovar = _icovar.getMat();
int type = v1.type(), depth = v1.depth();
Size sz = v1.size();
int i, j, len = sz.width*sz.height*v1.channels();
{
double row_sum = 0;
j = 0;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for(; j <= len - 4; j += 4 )
row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] +
diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3];
{
double row_sum = 0;
j = 0;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for(; j <= len - 4; j += 4 )
row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] +
diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3];
{
int i = 0;
double result = 0;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for( ; i <= len - 4; i += 4 )
result += (double)src1[i]*src2[i] + (double)src1[i+1]*src2[i+1] +
(double)src1[i+2]*src2[i+2] + (double)src1[i+3]*src2[i+3];
{
blockSize = std::min(len0 - i, blockSize0);
__m128i s = _mm_setzero_si128();
- j = 0;
+ j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
__m128i b0 = _mm_loadu_si128((const __m128i*)(src1 + j));
PCA::PCA() {}
-PCA::PCA(InputArray data, InputArray mean, int flags, int maxComponents)
+PCA::PCA(InputArray data, InputArray _mean, int flags, int maxComponents)
{
- operator()(data, mean, flags, maxComponents);
+ operator()(data, _mean, flags, maxComponents);
}
PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, int maxComponents)
pca.mean.copyTo(mean);
pca.eigenvectors.copyTo(eigenvectors);
}
-
+
void cv::PCAProject(InputArray data, InputArray mean,
InputArray eigenvectors, OutputArray result)
{
bool elementWise(const MatExpr& /*expr*/) const { return true; }
void assign(const MatExpr& expr, Mat& m, int type=-1) const;
-
+
static void makeExpr(MatExpr& res, const Mat& m);
};
static MatOp_Identity g_MatOp_Identity;
-
+
class MatOp_AddEx : public MatOp
{
public:
MatOp_AddEx() {}
virtual ~MatOp_AddEx() {}
-
+
bool elementWise(const MatExpr& /*expr*/) const { return true; }
void assign(const MatExpr& expr, Mat& m, int type=-1) const;
-
+
void add(const MatExpr& e1, const Scalar& s, MatExpr& res) const;
void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const;
void multiply(const MatExpr& e1, double s, MatExpr& res) const;
void divide(double s, const MatExpr& e, MatExpr& res) const;
-
+
void transpose(const MatExpr& e1, MatExpr& res) const;
void abs(const MatExpr& expr, MatExpr& res) const;
-
+
static void makeExpr(MatExpr& res, const Mat& a, const Mat& b, double alpha, double beta, const Scalar& s=Scalar());
};
static MatOp_AddEx g_MatOp_AddEx;
-
+
class MatOp_Bin : public MatOp
{
public:
MatOp_Bin() {}
virtual ~MatOp_Bin() {}
-
+
bool elementWise(const MatExpr& /*expr*/) const { return true; }
void assign(const MatExpr& expr, Mat& m, int type=-1) const;
-
+
void multiply(const MatExpr& e1, double s, MatExpr& res) const;
void divide(double s, const MatExpr& e, MatExpr& res) const;
-
+
static void makeExpr(MatExpr& res, char op, const Mat& a, const Mat& b, double scale=1);
static void makeExpr(MatExpr& res, char op, const Mat& a, const Scalar& s);
};
static MatOp_Bin g_MatOp_Bin;
-
+
class MatOp_Cmp : public MatOp
{
public:
MatOp_Cmp() {}
virtual ~MatOp_Cmp() {}
-
+
bool elementWise(const MatExpr& /*expr*/) const { return true; }
void assign(const MatExpr& expr, Mat& m, int type=-1) const;
-
+
static void makeExpr(MatExpr& res, int cmpop, const Mat& a, const Mat& b);
static void makeExpr(MatExpr& res, int cmpop, const Mat& a, double alpha);
};
-
+
static MatOp_Cmp g_MatOp_Cmp;
-
+
class MatOp_GEMM : public MatOp
{
public:
MatOp_GEMM() {}
virtual ~MatOp_GEMM() {}
-
+
bool elementWise(const MatExpr& /*expr*/) const { return false; }
void assign(const MatExpr& expr, Mat& m, int type=-1) const;
-
+
void add(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const;
void subtract(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const;
void multiply(const MatExpr& e, double s, MatExpr& res) const;
-
+
void transpose(const MatExpr& expr, MatExpr& res) const;
-
+
static void makeExpr(MatExpr& res, int flags, const Mat& a, const Mat& b,
double alpha=1, const Mat& c=Mat(), double beta=1);
};
public:
MatOp_Invert() {}
virtual ~MatOp_Invert() {}
-
+
bool elementWise(const MatExpr& /*expr*/) const { return false; }
void assign(const MatExpr& expr, Mat& m, int type=-1) const;
-
+
void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const;
-
+
static void makeExpr(MatExpr& res, int method, const Mat& m);
-};
+};
static MatOp_Invert g_MatOp_Invert;
public:
MatOp_T() {}
virtual ~MatOp_T() {}
-
+
bool elementWise(const MatExpr& /*expr*/) const { return false; }
void assign(const MatExpr& expr, Mat& m, int type=-1) const;
-
+
void multiply(const MatExpr& e1, double s, MatExpr& res) const;
void transpose(const MatExpr& expr, MatExpr& res) const;
-
+
static void makeExpr(MatExpr& res, const Mat& a, double alpha=1);
};
public:
MatOp_Solve() {}
virtual ~MatOp_Solve() {}
-
+
bool elementWise(const MatExpr& /*expr*/) const { return false; }
void assign(const MatExpr& expr, Mat& m, int type=-1) const;
-
+
static void makeExpr(MatExpr& res, int method, const Mat& a, const Mat& b);
};
public:
MatOp_Initializer() {}
virtual ~MatOp_Initializer() {}
-
+
bool elementWise(const MatExpr& /*expr*/) const { return false; }
void assign(const MatExpr& expr, Mat& m, int type=-1) const;
-
+
void multiply(const MatExpr& e, double s, MatExpr& res) const;
-
+
static void makeExpr(MatExpr& res, int method, Size sz, int type, double alpha=1);
};
static MatOp_Initializer g_MatOp_Initializer;
-
+
static inline bool isIdentity(const MatExpr& e) { return e.op == &g_MatOp_Identity; }
static inline bool isAddEx(const MatExpr& e) { return e.op == &g_MatOp_AddEx; }
static inline bool isScaled(const MatExpr& e) { return isAddEx(e) && (!e.b.data || e.beta == 0) && e.s == Scalar(); }
static inline bool isGEMM(const MatExpr& e) { return e.op == &g_MatOp_GEMM; }
static inline bool isMatProd(const MatExpr& e) { return e.op == &g_MatOp_GEMM && (!e.c.data || e.beta == 0); }
static inline bool isInitializer(const MatExpr& e) { return e.op == &g_MatOp_Initializer; }
-
+
/////////////////////////////////////////////////////////////////////////////////////////////////////
-
+
bool MatOp::elementWise(const MatExpr& /*expr*/) const
{
return false;
}
-
+
void MatOp::roi(const MatExpr& expr, const Range& rowRange, const Range& colRange, MatExpr& e) const
{
if( elementWise(expr) )
e = MatExpr(&g_MatOp_Identity, 0, m(rowRange, colRange), Mat(), Mat());
}
}
-
+
void MatOp::diag(const MatExpr& expr, int d, MatExpr& e) const
{
if( elementWise(expr) )
}
}
-
+
void MatOp::augAssignAdd(const MatExpr& expr, Mat& m) const
{
Mat temp;
m += temp;
}
-
+
void MatOp::augAssignSubtract(const MatExpr& expr, Mat& m) const
{
Mat temp;
m -= temp;
}
-
+
void MatOp::augAssignMultiply(const MatExpr& expr, Mat& m) const
{
Mat temp;
m *= temp;
}
-
+
void MatOp::augAssignDivide(const MatExpr& expr, Mat& m) const
{
Mat temp;
expr.op->assign(expr, temp);
m /= temp;
}
-
-
+
+
void MatOp::augAssignAnd(const MatExpr& expr, Mat& m) const
{
Mat temp;
m &= temp;
}
-
+
void MatOp::augAssignOr(const MatExpr& expr, Mat& m) const
{
Mat temp;
m |= temp;
}
-
+
void MatOp::augAssignXor(const MatExpr& expr, Mat& m) const
{
Mat temp;
expr.op->assign(expr, temp);
m /= temp;
}
-
+
void MatOp::add(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const
{
}
else
e1.op->assign(e1, m1);
-
+
if( isAddEx(e2) && (!e2.b.data || e2.beta == 0) )
{
m2 = e2.a;
beta = e2.alpha;
s += e2.s;
- }
+ }
else
e2.op->assign(e2, m2);
MatOp_AddEx::makeExpr(res, m1, m2, alpha, beta, s);
e2.op->add(e1, e2, res);
}
-
+
void MatOp::add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const
{
Mat m1;
MatOp_AddEx::makeExpr(res, m1, Mat(), 1, 0, s);
}
-
+
void MatOp::subtract(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const
{
if( this == e2.op )
}
else
e1.op->assign(e1, m1);
-
+
if( isAddEx(e2) && (!e2.b.data || e2.beta == 0) )
{
m2 = e2.a;
beta = -e2.alpha;
s -= e2.s;
- }
+ }
else
e2.op->assign(e2, m2);
MatOp_AddEx::makeExpr(res, m1, m2, alpha, beta, s);
e2.op->subtract(e1, e2, res);
}
-
+
void MatOp::subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const
{
Mat m;
MatOp_AddEx::makeExpr(res, m, Mat(), -1, 0, s);
}
-
+
void MatOp::multiply(const MatExpr& e1, const MatExpr& e2, MatExpr& res, double scale) const
{
if( this == e2.op )
{
Mat m1, m2;
-
+
if( isReciprocal(e1) )
{
if( isScaled(e2) )
}
else
e1.op->assign(e1, m1);
-
+
if( isScaled(e2) )
{
m2 = e2.a;
}
else
e2.op->assign(e2, m2);
-
+
MatOp_Bin::makeExpr(res, op, m1, m2, scale);
}
}
else
e2.op->multiply(e1, e2, res, scale);
}
-
-
+
+
void MatOp::multiply(const MatExpr& expr, double s, MatExpr& res) const
{
Mat m;
expr.op->assign(expr, m);
- MatOp_AddEx::makeExpr(res, m, Mat(), s, 0);
+ MatOp_AddEx::makeExpr(res, m, Mat(), s, 0);
}
-
-
+
+
void MatOp::divide(const MatExpr& e1, const MatExpr& e2, MatExpr& res, double scale) const
{
if( this == e2.op )
{
Mat m1, m2;
char op = '/';
-
+
if( isScaled(e1) )
{
m1 = e1.a;
}
else
e1.op->assign(e1, m1);
-
+
if( isScaled(e2) )
{
m2 = e2.a;
e2.op->divide(e1, e2, res, scale);
}
-
+
void MatOp::divide(double s, const MatExpr& expr, MatExpr& res) const
{
Mat m;
MatOp_Bin::makeExpr(res, '/', m, Mat(), s);
}
-
+
void MatOp::abs(const MatExpr& expr, MatExpr& res) const
{
Mat m;
MatOp_Bin::makeExpr(res, 'a', m, Mat());
}
-
+
void MatOp::transpose(const MatExpr& expr, MatExpr& res) const
{
Mat m;
MatOp_T::makeExpr(res, m, 1);
}
-
+
void MatOp::matmul(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const
{
if( this == e2.op )
double scale = 1;
int flags = 0;
Mat m1, m2;
-
+
if( isT(e1) )
{
flags = CV_GEMM_A_T;
}
else
e1.op->assign(e1, m1);
-
+
if( isT(e2) )
{
flags |= CV_GEMM_B_T;
}
else
e2.op->assign(e2, m2);
-
+
MatOp_GEMM::makeExpr(res, flags, m1, m2, scale);
}
else
e2.op->matmul(e1, e2, res);
}
-
+
void MatOp::invert(const MatExpr& expr, int method, MatExpr& res) const
{
Mat m;
expr.op->assign(expr, m);
MatOp_Invert::makeExpr(res, method, m);
}
-
-
+
+
Size MatOp::size(const MatExpr& expr) const
{
return !expr.a.empty() ? expr.a.size() : expr.b.empty() ? expr.b.size() : expr.c.size();
int MatOp::type(const MatExpr& expr) const
{
return !expr.a.empty() ? expr.a.type() : expr.b.empty() ? expr.b.type() : expr.c.type();
-}
-
+}
+
//////////////////////////////////////////////////////////////////////////////////////////////////
MatExpr::MatExpr(const Mat& m) : op(&g_MatOp_Identity), flags(0), a(m), b(Mat()), c(Mat()), alpha(1), beta(0), s(Scalar())
{
}
-
+
MatExpr MatExpr::row(int y) const
{
MatExpr e;
MatOp_AddEx::makeExpr(e, a, b, 1, 1);
return e;
}
-
+
MatExpr operator + (const Mat& a, const Scalar& s)
{
MatExpr e;
MatOp_AddEx::makeExpr(e, a, Mat(), 1, 0, s);
return e;
}
-
+
MatExpr operator + (const Scalar& s, const Mat& a)
{
MatExpr e;
MatOp_AddEx::makeExpr(e, a, Mat(), 1, 0, s);
return e;
-}
-
+}
+
MatExpr operator + (const MatExpr& e, const Mat& m)
{
MatExpr en;
e.op->add(e, MatExpr(m), en);
return en;
}
-
+
MatExpr operator + (const Mat& m, const MatExpr& e)
{
MatExpr en;
e.op->add(e, MatExpr(m), en);
return en;
-}
-
+}
+
MatExpr operator + (const MatExpr& e, const Scalar& s)
{
MatExpr en;
e.op->add(e, s, en);
return en;
}
-
+
MatExpr operator + (const Scalar& s, const MatExpr& e)
{
MatExpr en;
MatOp_AddEx::makeExpr(e, a, b, 1, -1);
return e;
}
-
+
MatExpr operator - (const Mat& a, const Scalar& s)
{
MatExpr e;
MatOp_AddEx::makeExpr(e, a, Mat(), 1, 0, -s);
return e;
}
-
+
MatExpr operator - (const Scalar& s, const Mat& a)
{
MatExpr e;
MatOp_AddEx::makeExpr(e, a, Mat(), -1, 0, s);
return e;
}
-
+
MatExpr operator - (const MatExpr& e, const Mat& m)
{
MatExpr en;
e.op->subtract(e, MatExpr(m), en);
return en;
}
-
+
MatExpr operator - (const Mat& m, const MatExpr& e)
{
MatExpr en;
e.op->subtract(MatExpr(m), e, en);
return en;
}
-
+
MatExpr operator - (const MatExpr& e, const Scalar& s)
{
MatExpr en;
e.op->add(e, -s, en);
return en;
}
-
+
MatExpr operator - (const Scalar& s, const MatExpr& e)
{
MatExpr en;
e.op->subtract(s, e, en);
return en;
}
-
+
MatExpr operator - (const MatExpr& e1, const MatExpr& e2)
{
MatExpr en;
MatOp_AddEx::makeExpr(e, m, Mat(), -1, 0);
return e;
}
-
+
MatExpr operator - (const MatExpr& e)
{
MatExpr en;
MatOp_Bin::makeExpr(e, '/', a, b);
return e;
}
-
+
MatExpr operator / (const Mat& a, double s)
{
MatExpr e;
MatOp_AddEx::makeExpr(e, a, Mat(), 1./s, 0);
return e;
}
-
+
MatExpr operator / (double s, const Mat& a)
{
MatExpr e;
MatOp_Bin::makeExpr(e, '/', a, Mat(), s);
return e;
}
-
+
MatExpr operator / (const MatExpr& e, const Mat& m)
{
MatExpr en;
e.op->divide(e, MatExpr(m), en);
return en;
}
-
+
MatExpr operator / (const Mat& m, const MatExpr& e)
{
MatExpr en;
e.op->divide(MatExpr(m), e, en);
return en;
}
-
+
MatExpr operator / (const MatExpr& e, double s)
{
MatExpr en;
e.op->multiply(e, 1./s, en);
return en;
}
-
+
MatExpr operator / (double s, const MatExpr& e)
{
MatExpr en;
e.op->divide(s, e, en);
return en;
}
-
+
MatExpr operator / (const MatExpr& e1, const MatExpr& e2)
{
MatExpr en;
MatOp_Cmp::makeExpr(e, CV_CMP_LT, a, b);
return e;
}
-
+
MatExpr operator < (const Mat& a, double s)
{
MatExpr e;
MatOp_Cmp::makeExpr(e, CV_CMP_LT, a, s);
return e;
}
-
+
MatExpr operator < (double s, const Mat& a)
{
MatExpr e;
MatOp_Cmp::makeExpr(e, CV_CMP_NE, a, s);
return e;
}
-
+
MatExpr operator >= (const Mat& a, const Mat& b)
{
MatExpr e;
MatExpr e;
MatOp_Cmp::makeExpr(e, CV_CMP_LT, a, s);
return e;
-}
-
+}
+
MatExpr min(const Mat& a, const Mat& b)
{
MatExpr e;
MatOp_Bin::makeExpr(e, 'm', a, b);
return e;
}
-
+
MatExpr min(const Mat& a, double s)
{
MatExpr e;
MatOp_Bin::makeExpr(e, 'm', a, s);
return e;
}
-
+
MatExpr min(double s, const Mat& a)
{
MatExpr e;
MatOp_Bin::makeExpr(e, 'M', a, b);
return e;
}
-
+
MatExpr max(const Mat& a, double s)
{
MatExpr e;
MatOp_Bin::makeExpr(e, 'M', a, s);
return e;
}
-
+
MatExpr max(double s, const Mat& a)
{
MatExpr e;
MatOp_Bin::makeExpr(e, '&', a, b);
return e;
}
-
+
MatExpr operator & (const Mat& a, const Scalar& s)
{
MatExpr e;
MatOp_Bin::makeExpr(e, '&', a, s);
return e;
}
-
+
MatExpr operator & (const Scalar& s, const Mat& a)
{
MatExpr e;
MatOp_Bin::makeExpr(e, '&', a, s);
return e;
}
-
+
MatExpr operator | (const Mat& a, const Mat& b)
{
MatExpr e;
MatOp_Bin::makeExpr(e, '|', a, b);
return e;
}
-
+
MatExpr operator | (const Mat& a, const Scalar& s)
{
MatExpr e;
MatOp_Bin::makeExpr(e, '|', a, s);
return e;
}
-
+
MatExpr operator | (const Scalar& s, const Mat& a)
{
MatExpr e;
return en;
}
-
+
Size MatExpr::size() const
{
if( isT(*this) || isInv(*this) )
return a.size();
return op ? op->size(*this) : Size();
}
-
-
+
+
int MatExpr::type() const
{
if( isInitializer(*this) )
return CV_8U;
return op ? op->type(*this) : -1;
}
-
-
+
+
/////////////////////////////////////////////////////////////////////////////////////////////////////
-
-void MatOp_Identity::assign(const MatExpr& e, Mat& m, int type) const
+
+void MatOp_Identity::assign(const MatExpr& e, Mat& m, int _type) const
{
- if( type == -1 || type == e.a.type() )
+ if( _type == -1 || _type == e.a.type() )
m = e.a;
else
{
- CV_Assert( CV_MAT_CN(type) == e.a.channels() );
- e.a.convertTo(m, type);
+ CV_Assert( CV_MAT_CN(_type) == e.a.channels() );
+ e.a.convertTo(m, _type);
}
}
inline void MatOp_Identity::makeExpr(MatExpr& res, const Mat& m)
{
res = MatExpr(&g_MatOp_Identity, 0, m, Mat(), Mat(), 1, 0);
-}
-
+}
+
/////////////////////////////////////////////////////////////////////////////////////////////////////
-void MatOp_AddEx::assign(const MatExpr& e, Mat& m, int type) const
-{
- Mat temp, &dst = type == -1 || e.a.type() == type ? m : temp;
+void MatOp_AddEx::assign(const MatExpr& e, Mat& m, int _type) const
+{
+ Mat temp, &dst = _type == -1 || e.a.type() == _type ? m : temp;
if( e.b.data )
{
if( e.s == Scalar() || !e.s.isReal() )
}
else
cv::addWeighted(e.a, e.alpha, e.b, e.beta, 0, dst);
-
+
if( !e.s.isReal() )
cv::add(dst, e.s, dst);
}
}
else if( e.s.isReal() && (dst.data != m.data || fabs(e.alpha) != 1))
{
- e.a.convertTo(m, type, e.alpha, e.s[0]);
+ e.a.convertTo(m, _type, e.alpha, e.s[0]);
return;
}
else if( e.alpha == 1 )
e.a.convertTo(dst, e.a.type(), e.alpha);
cv::add(dst, e.s, dst);
}
-
+
if( dst.data != m.data )
dst.convertTo(m, m.type());
}
-
+
void MatOp_AddEx::add(const MatExpr& e, const Scalar& s, MatExpr& res) const
{
res = e;
res.s += s;
}
-
+
void MatOp_AddEx::subtract(const Scalar& s, const MatExpr& e, MatExpr& res) const
{
res = e;
res.beta = -res.beta;
res.s = s - res.s;
}
-
+
void MatOp_AddEx::multiply(const MatExpr& e, double s, MatExpr& res) const
{
res = e;
res.beta *= s;
res.s *= s;
}
-
+
void MatOp_AddEx::divide(double s, const MatExpr& e, MatExpr& res) const
{
if( isScaled(e) )
else
MatOp::transpose(e, res);
}
-
+
void MatOp_AddEx::abs(const MatExpr& e, MatExpr& res) const
{
if( (!e.b.data || e.beta == 0) && fabs(e.alpha) == 1 )
else
MatOp::abs(e, res);
}
-
+
inline void MatOp_AddEx::makeExpr(MatExpr& res, const Mat& a, const Mat& b, double alpha, double beta, const Scalar& s)
{
res = MatExpr(&g_MatOp_AddEx, 0, a, b, Mat(), alpha, beta, s);
}
-
+
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-void MatOp_Bin::assign(const MatExpr& e, Mat& m, int type) const
+
+void MatOp_Bin::assign(const MatExpr& e, Mat& m, int _type) const
{
- Mat temp, &dst = type == -1 || e.a.type() == type ? m : temp;
-
+ Mat temp, &dst = _type == -1 || e.a.type() == _type ? m : temp;
+
if( e.flags == '*' )
cv::multiply(e.a, e.b, dst, e.alpha);
else if( e.flags == '/' && e.b.data )
cv::absdiff(e.a, e.s, dst);
else
CV_Error(CV_StsError, "Unknown operation");
-
+
if( dst.data != m.data )
- dst.convertTo(m, type);
+ dst.convertTo(m, _type);
}
void MatOp_Bin::multiply(const MatExpr& e, double s, MatExpr& res) const
{
res = MatExpr(&g_MatOp_Bin, op, a, Mat(), Mat(), 1, 0, s);
}
-
+
///////////////////////////////////////////////////////////////////////////////////////////////////////
-
-void MatOp_Cmp::assign(const MatExpr& e, Mat& m, int type) const
+
+void MatOp_Cmp::assign(const MatExpr& e, Mat& m, int _type) const
{
- Mat temp, &dst = type == -1 || type == CV_8U ? m : temp;
-
+ Mat temp, &dst = _type == -1 || _type == CV_8U ? m : temp;
+
if( e.b.data )
cv::compare(e.a, e.b, dst, e.flags);
else
cv::compare(e.a, e.alpha, dst, e.flags);
-
+
if( dst.data != m.data )
- dst.convertTo(m, type);
+ dst.convertTo(m, _type);
}
inline void MatOp_Cmp::makeExpr(MatExpr& res, int cmpop, const Mat& a, const Mat& b)
{
res = MatExpr(&g_MatOp_Cmp, cmpop, a, b, Mat(), 1, 1);
}
-
+
inline void MatOp_Cmp::makeExpr(MatExpr& res, int cmpop, const Mat& a, double alpha)
{
res = MatExpr(&g_MatOp_Cmp, cmpop, a, Mat(), Mat(), alpha, 1);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-void MatOp_T::assign(const MatExpr& e, Mat& m, int type) const
+
+void MatOp_T::assign(const MatExpr& e, Mat& m, int _type) const
{
- Mat temp, &dst = type == -1 || type == e.a.type() ? m : temp;
-
+ Mat temp, &dst = _type == -1 || _type == e.a.type() ? m : temp;
+
cv::transpose(e.a, dst);
-
+
if( dst.data != m.data || e.alpha != 1 )
- dst.convertTo(m, type, e.alpha);
+ dst.convertTo(m, _type, e.alpha);
}
void MatOp_T::multiply(const MatExpr& e, double s, MatExpr& res) const
res = e;
res.alpha *= s;
}
-
+
void MatOp_T::transpose(const MatExpr& e, MatExpr& res) const
{
if( e.alpha == 1 )
else
MatOp_AddEx::makeExpr(res, e.a, Mat(), e.alpha, 0);
}
-
+
inline void MatOp_T::makeExpr(MatExpr& res, const Mat& a, double alpha)
{
res = MatExpr(&g_MatOp_T, 0, a, Mat(), Mat(), alpha, 0);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-void MatOp_GEMM::assign(const MatExpr& e, Mat& m, int type) const
+
+void MatOp_GEMM::assign(const MatExpr& e, Mat& m, int _type) const
{
- Mat temp, &dst = type == -1 || type == e.a.type() ? m : temp;
-
+ Mat temp, &dst = _type == -1 || _type == e.a.type() ? m : temp;
+
cv::gemm(e.a, e.b, e.alpha, e.c, e.beta, dst, e.flags);
if( dst.data != m.data )
- dst.convertTo(m, type);
+ dst.convertTo(m, _type);
}
void MatOp_GEMM::add(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const
{
bool i1 = isIdentity(e1), i2 = isIdentity(e2);
double alpha1 = i1 ? 1 : e1.alpha, alpha2 = i2 ? 1 : e2.alpha;
-
+
if( isMatProd(e1) && (i2 || isScaled(e2) || isT(e2)) )
MatOp_GEMM::makeExpr(res, (e1.flags & ~CV_GEMM_C_T)|(isT(e2) ? CV_GEMM_C_T : 0),
e1.a, e1.b, alpha1, e2.a, alpha2);
else
e2.op->add(e1, e2, res);
}
-
+
void MatOp_GEMM::subtract(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const
{
bool i1 = isIdentity(e1), i2 = isIdentity(e2);
double alpha1 = i1 ? 1 : e1.alpha, alpha2 = i2 ? 1 : e2.alpha;
-
+
if( isMatProd(e1) && (i2 || isScaled(e2) || isT(e2)) )
MatOp_GEMM::makeExpr(res, (e1.flags & ~CV_GEMM_C_T)|(isT(e2) ? CV_GEMM_C_T : 0),
e1.a, e1.b, alpha1, e2.a, -alpha2);
res.alpha *= s;
res.beta *= s;
}
-
+
void MatOp_GEMM::transpose(const MatExpr& e, MatExpr& res) const
{
res = e;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
-
-void MatOp_Invert::assign(const MatExpr& e, Mat& m, int type) const
+
+void MatOp_Invert::assign(const MatExpr& e, Mat& m, int _type) const
{
- Mat temp, &dst = type == -1 || type == e.a.type() ? m : temp;
-
+ Mat temp, &dst = _type == -1 || _type == e.a.type() ? m : temp;
+
cv::invert(e.a, dst, e.flags);
if( dst.data != m.data )
- dst.convertTo(m, type);
+ dst.convertTo(m, _type);
}
void MatOp_Invert::matmul(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-void MatOp_Solve::assign(const MatExpr& e, Mat& m, int type) const
+
+void MatOp_Solve::assign(const MatExpr& e, Mat& m, int _type) const
{
- Mat temp, &dst = type == -1 || type == e.a.type() ? m : temp;
-
+ Mat temp, &dst = _type == -1 || _type == e.a.type() ? m : temp;
+
cv::solve(e.a, e.b, dst, e.flags);
if( dst.data != m.data )
- dst.convertTo(m, type);
+ dst.convertTo(m, _type);
}
inline void MatOp_Solve::makeExpr(MatExpr& res, int method, const Mat& a, const Mat& b)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-void MatOp_Initializer::assign(const MatExpr& e, Mat& m, int type) const
+
+void MatOp_Initializer::assign(const MatExpr& e, Mat& m, int _type) const
{
- if( type == -1 )
- type = e.a.type();
- m.create(e.a.size(), type);
+ if( _type == -1 )
+ _type = e.a.type();
+ m.create(e.a.size(), _type);
if( e.flags == 'I' )
setIdentity(m, Scalar(e.alpha));
else if( e.flags == '0' )
res = e;
res.alpha *= s;
}
-
+
inline void MatOp_Initializer::makeExpr(MatExpr& res, int method, Size sz, int type, double alpha)
{
res = MatExpr(&g_MatOp_Initializer, method, Mat(sz, type, (void*)0), Mat(), Mat(), alpha, 0);
-}
+}
+
-
///////////////////////////////////////////////////////////////////////////////////////////////////////////
MatExpr Mat::t() const
MatOp_T::makeExpr(e, *this);
return e;
}
-
+
MatExpr Mat::inv(int method) const
{
MatExpr e;
MatOp_Invert::makeExpr(e, method, *this);
return e;
}
-
+
MatExpr Mat::mul(InputArray m, double scale) const
{
MatOp_Initializer::makeExpr(e, '0', Size(cols, rows), type);
return e;
}
-
+
MatExpr Mat::zeros(Size size, int type)
{
MatExpr e;
MatOp_Initializer::makeExpr(e, '0', size, type);
return e;
}
-
+
MatExpr Mat::ones(int rows, int cols, int type)
{
MatExpr e;
MatOp_Initializer::makeExpr(e, '1', Size(cols, rows), type);
return e;
}
-
+
MatExpr Mat::ones(Size size, int type)
{
MatExpr e;
MatOp_Initializer::makeExpr(e, 'I', size, type);
return e;
}
-
+
}
/* End of file. */
std::swap(a.dataend, b.dataend);
std::swap(a.datalimit, b.datalimit);
std::swap(a.allocator, b.allocator);
-
+
std::swap(a.size.p, b.size.p);
std::swap(a.step.p, b.step.p);
std::swap(a.step.buf[0], b.step.buf[0]);
std::swap(a.step.buf[1], b.step.buf[1]);
-
+
if( a.step.p == b.step.buf )
{
a.step.p = a.step.buf;
a.size.p = &a.rows;
}
-
+
if( b.step.p == a.step.buf )
{
b.step.p = b.step.buf;
m.rows = m.cols = -1;
}
}
-
+
m.dims = _dims;
if( !_sz )
return;
-
+
size_t esz = CV_ELEM_SIZE(m.flags), total = esz;
int i;
for( i = _dims-1; i >= 0; i-- )
int s = _sz[i];
CV_Assert( s >= 0 );
m.size.p[i] = s;
-
+
if( _steps )
m.step.p[i] = i < _dims-1 ? _steps[i] : esz;
else if( autoSteps )
total = (size_t)total1;
}
}
-
+
if( _dims == 1 )
{
m.dims = 2;
m.step[1] = esz;
}
}
-
+
static void updateContinuityFlag(Mat& m)
{
int i, j;
if( m.size[i] > 1 )
break;
}
-
+
for( j = m.dims-1; j > i; j-- )
{
if( m.step[j]*m.size[j] < m.step[j-1] )
break;
}
-
+
int64 t = (int64)m.step[0]*m.size[0];
if( j <= i && t == (int)t )
m.flags |= Mat::CONTINUOUS_FLAG;
else
m.flags &= ~Mat::CONTINUOUS_FLAG;
}
-
+
static void finalizeHdr(Mat& m)
{
updateContinuityFlag(m);
else
m.dataend = m.datalimit = 0;
}
-
-
+
+
void Mat::create(int d, const int* _sizes, int _type)
{
int i;
CV_Assert(0 <= d && _sizes && d <= CV_MAX_DIM && _sizes);
_type = CV_MAT_TYPE(_type);
-
+
if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() )
{
if( d == 2 && rows == _sizes[0] && cols == _sizes[1] )
if( i == d && (d > 1 || size[1] == 1))
return;
}
-
+
release();
if( d == 0 )
return;
flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL;
setSize(*this, d, _sizes, 0, true);
-
+
if( total() > 0 )
{
#ifdef HAVE_TGPU
#endif
if( !allocator )
{
- size_t total = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
- data = datastart = (uchar*)fastMalloc(total + (int)sizeof(*refcount));
- refcount = (int*)(data + total);
+ size_t totalsize = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
+ data = datastart = (uchar*)fastMalloc(totalsize + (int)sizeof(*refcount));
+ refcount = (int*)(data + totalsize);
*refcount = 1;
}
else
{
#ifdef HAVE_TGPU
- try
+ try
{
allocator->allocate(dims, size, _type, refcount, datastart, data, step.p);
CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
}catch(...)
{
allocator = 0;
- size_t total = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
- data = datastart = (uchar*)fastMalloc(total + (int)sizeof(*refcount));
- refcount = (int*)(data + total);
+ size_t totalSize = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
+ data = datastart = (uchar*)fastMalloc(totalSize + (int)sizeof(*refcount));
+ refcount = (int*)(data + totalSize);
*refcount = 1;
}
#else
#endif
}
}
-
+
finalizeHdr(*this);
}
step[i] = m.step[i];
}
}
-
+
void Mat::deallocate()
{
if( allocator )
}
}
-
-Mat::Mat(const Mat& m, const Range& rowRange, const Range& colRange) : size(&rows)
+
+Mat::Mat(const Mat& m, const Range& _rowRange, const Range& _colRange) : size(&rows)
{
initEmpty();
CV_Assert( m.dims >= 2 );
if( m.dims > 2 )
{
AutoBuffer<Range> rs(m.dims);
- rs[0] = rowRange;
- rs[1] = colRange;
+ rs[0] = _rowRange;
+ rs[1] = _colRange;
for( int i = 2; i < m.dims; i++ )
rs[i] = Range::all();
*this = m(rs);
return;
}
-
+
*this = m;
- if( rowRange != Range::all() && rowRange != Range(0,rows) )
+ if( _rowRange != Range::all() && _rowRange != Range(0,rows) )
{
- CV_Assert( 0 <= rowRange.start && rowRange.start <= rowRange.end && rowRange.end <= m.rows );
- rows = rowRange.size();
- data += step*rowRange.start;
+ CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end && _rowRange.end <= m.rows );
+ rows = _rowRange.size();
+ data += step*_rowRange.start;
flags |= SUBMATRIX_FLAG;
}
-
- if( colRange != Range::all() && colRange != Range(0,cols) )
+
+ if( _colRange != Range::all() && _colRange != Range(0,cols) )
{
- CV_Assert( 0 <= colRange.start && colRange.start <= colRange.end && colRange.end <= m.cols );
- cols = colRange.size();
- data += colRange.start*elemSize();
+ CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols );
+ cols = _colRange.size();
+ data += _colRange.start*elemSize();
flags &= cols < m.cols ? ~CONTINUOUS_FLAG : -1;
flags |= SUBMATRIX_FLAG;
}
-
+
if( rows == 1 )
flags |= CONTINUOUS_FLAG;
-
+
if( rows <= 0 || cols <= 0 )
{
release();
rows = cols = 0;
}
}
-
-
+
+
Mat::Mat(const Mat& m, const Rect& roi)
: flags(m.flags), dims(2), rows(roi.height), cols(roi.width),
data(m.data + roi.y*m.step[0]), refcount(m.refcount),
CV_Assert( m.dims <= 2 );
flags &= roi.width < m.cols ? ~CONTINUOUS_FLAG : -1;
flags |= roi.height == 1 ? CONTINUOUS_FLAG : 0;
-
+
size_t esz = CV_ELEM_SIZE(flags);
data += roi.x*esz;
CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
CV_XADD(refcount, 1);
if( roi.width < m.cols || roi.height < m.rows )
flags |= SUBMATRIX_FLAG;
-
+
step[0] = m.step[0]; step[1] = esz;
-
+
if( rows <= 0 || cols <= 0 )
{
release();
}
}
-
+
Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _steps) : size(&rows)
{
initEmpty();
setSize(*this, _dims, _sizes, _steps, true);
finalizeHdr(*this);
}
-
-
+
+
Mat::Mat(const Mat& m, const Range* ranges) : size(&rows)
{
initEmpty();
int i, d = m.dims;
-
+
CV_Assert(ranges);
for( i = 0; i < d; i++ )
{
}
updateContinuityFlag(*this);
}
-
-
+
+
Mat::Mat(const CvMatND* m, bool copyData) : size(&rows)
{
initEmpty();
flags |= CV_MAT_TYPE(m->type);
int _sizes[CV_MAX_DIM];
size_t _steps[CV_MAX_DIM];
-
+
int i, d = m->dims;
for( i = 0; i < d; i++ )
{
_sizes[i] = m->dim[i].size;
_steps[i] = m->dim[i].step;
}
-
+
setSize(*this, d, _sizes, _steps);
finalizeHdr(*this);
temp.copyTo(*this);
}
}
-
-
+
+
Mat Mat::diag(int d) const
{
CV_Assert( dims <= 2 );
Mat m = *this;
size_t esz = elemSize();
int len;
-
+
if( d >= 0 )
{
len = std::min(cols - d, rows);
m.data -= step[0]*d;
}
CV_DbgAssert( len > 0 );
-
+
m.size[0] = m.rows = len;
m.size[1] = m.cols = 1;
m.step[0] += (len > 1 ? esz : 0);
-
+
if( m.rows > 1 )
m.flags &= ~CONTINUOUS_FLAG;
else
m.flags |= CONTINUOUS_FLAG;
-
+
if( size() != Size(1,1) )
m.flags |= SUBMATRIX_FLAG;
-
+
return m;
}
-
+
Mat::Mat(const CvMat* m, bool copyData) : size(&rows)
{
initEmpty();
-
+
if( !m )
return;
-
+
if( !copyData )
{
flags = MAGIC_VAL + (m->type & (CV_MAT_TYPE_MASK|CV_MAT_CONT_FLAG));
}
}
-
+
Mat::Mat(const IplImage* img, bool copyData) : size(&rows)
{
initEmpty();
-
+
if( !img )
return;
-
+
dims = 2;
CV_DbgAssert(CV_IS_IMAGE(img) && img->imageData != 0);
-
- int depth = IPL2CV_DEPTH(img->depth);
+
+ int imgdepth = IPL2CV_DEPTH(img->depth);
size_t esz;
step[0] = img->widthStep;
if(!img->roi)
{
CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL);
- flags = MAGIC_VAL + CV_MAKETYPE(depth, img->nChannels);
+ flags = MAGIC_VAL + CV_MAKETYPE(imgdepth, img->nChannels);
rows = img->height; cols = img->width;
datastart = data = (uchar*)img->imageData;
esz = CV_ELEM_SIZE(flags);
{
CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL || img->roi->coi != 0);
bool selectedPlane = img->roi->coi && img->dataOrder == IPL_DATA_ORDER_PLANE;
- flags = MAGIC_VAL + CV_MAKETYPE(depth, selectedPlane ? 1 : img->nChannels);
+ flags = MAGIC_VAL + CV_MAKETYPE(imgdepth, selectedPlane ? 1 : img->nChannels);
rows = img->roi->height; cols = img->roi->width;
esz = CV_ELEM_SIZE(flags);
data = datastart = (uchar*)img->imageData +
- (selectedPlane ? (img->roi->coi - 1)*step*img->height : 0) +
- img->roi->yOffset*step[0] + img->roi->xOffset*esz;
+ (selectedPlane ? (img->roi->coi - 1)*step*img->height : 0) +
+ img->roi->yOffset*step[0] + img->roi->xOffset*esz;
}
datalimit = datastart + step.p[0]*rows;
dataend = datastart + step.p[0]*(rows-1) + esz*cols;
}
}
-
+
Mat::operator IplImage() const
{
CV_Assert( dims <= 2 );
return img;
}
-
+
void Mat::pop_back(size_t nelems)
{
CV_Assert( nelems <= (size_t)size.p[0] );
-
+
if( isSubmatrix() )
*this = rowRange(0, size.p[0] - (int)nelems);
else
}*/
}
}
-
-
+
+
void Mat::push_back_(const void* elem)
{
int r = size.p[0];
if( isSubmatrix() || dataend + step.p[0] > datalimit )
reserve( std::max(r + 1, (r*3+1)/2) );
-
+
size_t esz = elemSize();
memcpy(data + r*step.p[0], elem, esz);
size.p[0] = r + 1;
void Mat::reserve(size_t nelems)
{
const size_t MIN_SIZE = 64;
-
+
CV_Assert( (int)nelems >= 0 );
if( !isSubmatrix() && data + step.p[0]*nelems <= datalimit )
return;
-
+
int r = size.p[0];
-
+
if( (size_t)r >= nelems )
return;
-
+
size.p[0] = std::max((int)nelems, 1);
size_t newsize = total()*elemSize();
-
+
if( newsize < MIN_SIZE )
size.p[0] = (int)((MIN_SIZE + newsize - 1)*nelems/newsize);
-
+
Mat m(dims, size.p, type());
size.p[0] = r;
if( r > 0 )
Mat mpart = m.rowRange(0, r);
copyTo(mpart);
}
-
+
*this = m;
size.p[0] = r;
dataend = data + step.p[0]*r;
}
-
+
void Mat::resize(size_t nelems)
{
int saveRows = size.p[0];
if( saveRows == (int)nelems )
return;
CV_Assert( (int)nelems >= 0 );
-
+
if( isSubmatrix() || data + step.p[0]*nelems > datalimit )
reserve(nelems);
-
+
size.p[0] = (int)nelems;
dataend += (size.p[0] - saveRows)*step.p[0];
-
+
//updateContinuityFlag(*this);
-}
+}
+
-
void Mat::resize(size_t nelems, const Scalar& s)
{
int saveRows = size.p[0];
resize(nelems);
-
+
if( size.p[0] > saveRows )
{
Mat part = rowRange(saveRows, size.p[0]);
part = s;
}
-}
-
+}
+
void Mat::push_back(const Mat& elems)
{
int r = size.p[0], delta = elems.size.p[0];
push_back(tmp);
return;
}
- if( !data )
- {
- *this = elems.clone();
- return;
- }
+ if( !data )
+ {
+ *this = elems.clone();
+ return;
+ }
size.p[0] = elems.size.p[0];
bool eq = size == elems.size;
CV_Error(CV_StsUnmatchedSizes, "");
if( type() != elems.type() )
CV_Error(CV_StsUnmatchedFormats, "");
-
+
if( isSubmatrix() || dataend + step.p[0]*delta > datalimit )
reserve( std::max(r + delta, (r*3+1)/2) );
-
+
size.p[0] += delta;
dataend += step.p[0]*delta;
-
+
//updateContinuityFlag(*this);
-
+
if( isContinuous() && elems.isContinuous() )
memcpy(data + r*step.p[0], elems.data, elems.total()*elems.elemSize());
else
}
}
-
+
Mat cvarrToMat(const CvArr* arr, bool copyData,
bool /*allowND*/, int coiMode)
{
CV_Assert( dims <= 2 && step[0] > 0 );
size_t esz = elemSize(), minstep;
ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart;
-
+
if( delta1 == 0 )
ofs.x = ofs.y = 0;
else
else
flags &= ~CONTINUOUS_FLAG;
return *this;
-}
+}
}
-
+
void cv::extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)
{
Mat mat = cvarrToMat(arr, false, true, 1);
_ch.create(mat.dims, mat.size, mat.depth());
Mat ch = _ch.getMat();
if(coi < 0)
- {
+ {
CV_Assert( CV_IS_IMAGE(arr) );
coi = cvGetImageCOI((const IplImage*)arr)-1;
}
int _pairs[] = { coi, 0 };
mixChannels( &mat, 1, &ch, 1, _pairs, 1 );
}
-
+
void cv::insertImageCOI(InputArray _ch, CvArr* arr, int coi)
{
Mat ch = _ch.getMat(), mat = cvarrToMat(arr, false, true, 1);
if(coi < 0)
- {
+ {
CV_Assert( CV_IS_IMAGE(arr) );
coi = cvGetImageCOI((const IplImage*)arr)-1;
}
int _pairs[] = { 0, coi };
mixChannels( &ch, 1, &mat, 1, _pairs, 1 );
}
-
+
namespace cv
{
{
int cn = channels();
Mat hdr = *this;
-
+
if( dims > 2 && new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 )
{
hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn;
return hdr;
}
-
+
CV_Assert( dims <= 2 );
-
+
if( new_cn == 0 )
new_cn = cn;
return hdr;
}
-
+
int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const
{
return (depth() == _depth || _depth <= 0) &&
}
}
-
+
/*************************************************************************************************\
Input/Output Array
\*************************************************************************************************/
_InputArray::_InputArray() : flags(0), obj(0) {}
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+_InputArray::~_InputArray() {}
+#endif
_InputArray::_InputArray(const Mat& m) : flags(MAT), obj((void*)&m) {}
_InputArray::_InputArray(const vector<Mat>& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {}
_InputArray::_InputArray(const double& val) : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&val), sz(Size(1,1)) {}
_InputArray::_InputArray(const GlBuffer& buf) : flags(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER), obj((void*)&buf) {}
_InputArray::_InputArray(const GlTexture& tex) : flags(FIXED_TYPE + FIXED_SIZE + OPENGL_TEXTURE), obj((void*)&tex) {}
_InputArray::_InputArray(const gpu::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {}
-
+
Mat _InputArray::getMat(int i) const
{
int k = kind();
-
+
if( k == MAT )
{
const Mat* m = (const Mat*)obj;
return *m;
return m->row(i);
}
-
+
if( k == EXPR )
{
CV_Assert( i < 0 );
return (Mat)*((const MatExpr*)obj);
}
-
+
if( k == MATX )
{
CV_Assert( i < 0 );
return Mat(sz, flags, obj);
}
-
+
if( k == STD_VECTOR )
{
CV_Assert( i < 0 );
int t = CV_MAT_TYPE(flags);
const vector<uchar>& v = *(const vector<uchar>*)obj;
-
+
return !v.empty() ? Mat(size(), t, (void*)&v[0]) : Mat();
}
-
+
if( k == NONE )
return Mat();
-
+
if( k == STD_VECTOR_VECTOR )
{
int t = type(i);
const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
CV_Assert( 0 <= i && i < (int)vv.size() );
const vector<uchar>& v = vv[i];
-
+
return !v.empty() ? Mat(size(i), t, (void*)&v[0]) : Mat();
}
-
+
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
{
const vector<Mat>& v = *(const vector<Mat>*)obj;
CV_Assert( 0 <= i && i < (int)v.size() );
-
+
return v[i];
- }
+ }
}
-
-
+
+
void _InputArray::getMatVector(vector<Mat>& mv) const
{
int k = kind();
-
+
if( k == MAT )
{
const Mat& m = *(const Mat*)obj;
int i, n = (int)m.size[0];
mv.resize(n);
-
+
for( i = 0; i < n; i++ )
mv[i] = m.dims == 2 ? Mat(1, m.cols, m.type(), (void*)m.ptr(i)) :
Mat(m.dims-1, &m.size[1], m.type(), (void*)m.ptr(i), &m.step[1]);
return;
}
-
+
if( k == EXPR )
{
Mat m = *(const MatExpr*)obj;
int i, n = m.size[0];
mv.resize(n);
-
+
for( i = 0; i < n; i++ )
mv[i] = m.row(i);
return;
}
-
+
if( k == MATX )
{
size_t i, n = sz.height, esz = CV_ELEM_SIZE(flags);
mv.resize(n);
-
+
for( i = 0; i < n; i++ )
mv[i] = Mat(1, sz.width, CV_MAT_TYPE(flags), (uchar*)obj + esz*sz.width*i);
return;
}
-
+
if( k == STD_VECTOR )
{
const vector<uchar>& v = *(const vector<uchar>*)obj;
-
+
size_t i, n = v.size(), esz = CV_ELEM_SIZE(flags);
int t = CV_MAT_DEPTH(flags), cn = CV_MAT_CN(flags);
mv.resize(n);
-
+
for( i = 0; i < n; i++ )
mv[i] = Mat(1, cn, t, (void*)(&v[0] + esz*i));
return;
}
-
+
if( k == NONE )
{
mv.clear();
return;
}
-
+
if( k == STD_VECTOR_VECTOR )
{
const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
int i, n = (int)vv.size();
int t = CV_MAT_TYPE(flags);
mv.resize(n);
-
+
for( i = 0; i < n; i++ )
{
const vector<uchar>& v = vv[i];
}
return;
}
-
+
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
{
return *d_mat;
}
}
-
+
int _InputArray::kind() const
{
return flags & KIND_MASK;
}
-
+
Size _InputArray::size(int i) const
{
int k = kind();
-
+
if( k == MAT )
{
CV_Assert( i < 0 );
return ((const Mat*)obj)->size();
}
-
+
if( k == EXPR )
{
CV_Assert( i < 0 );
return ((const MatExpr*)obj)->size();
}
-
+
if( k == MATX )
{
CV_Assert( i < 0 );
return sz;
}
-
+
if( k == STD_VECTOR )
{
CV_Assert( i < 0 );
size_t szb = v.size(), szi = iv.size();
return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
}
-
+
if( k == NONE )
return Size();
-
+
if( k == STD_VECTOR_VECTOR )
{
const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
return vv.empty() ? Size() : Size((int)vv.size(), 1);
CV_Assert( i < (int)vv.size() );
const vector<vector<int> >& ivv = *(const vector<vector<int> >*)obj;
-
+
size_t szb = vv[i].size(), szi = ivv[i].size();
return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
}
-
+
if( k == STD_VECTOR_MAT )
{
const vector<Mat>& vv = *(const vector<Mat>*)obj;
if( i < 0 )
return vv.empty() ? Size() : Size((int)vv.size(), 1);
CV_Assert( i < (int)vv.size() );
-
+
return vv[i].size();
}
{
return size(i).area();
}
-
+
int _InputArray::type(int i) const
{
int k = kind();
-
+
if( k == MAT )
return ((const Mat*)obj)->type();
-
+
if( k == EXPR )
return ((const MatExpr*)obj)->type();
-
+
if( k == MATX || k == STD_VECTOR || k == STD_VECTOR_VECTOR )
return CV_MAT_TYPE(flags);
-
+
if( k == NONE )
return -1;
-
+
if( k == STD_VECTOR_MAT )
{
const vector<Mat>& vv = *(const vector<Mat>*)obj;
CV_Assert( i < (int)vv.size() );
-
+
return vv[i >= 0 ? i : 0].type();
}
-
+
if( k == OPENGL_BUFFER )
return ((const GlBuffer*)obj)->type();
-
+
if( k == OPENGL_TEXTURE )
return ((const GlTexture*)obj)->type();
-
+
CV_Assert( k == GPU_MAT );
//if( k == GPU_MAT )
return ((const gpu::GpuMat*)obj)->type();
}
-
+
int _InputArray::depth(int i) const
{
return CV_MAT_DEPTH(type(i));
}
-
+
int _InputArray::channels(int i) const
{
return CV_MAT_CN(type(i));
}
-
+
bool _InputArray::empty() const
{
int k = kind();
-
+
if( k == MAT )
return ((const Mat*)obj)->empty();
-
+
if( k == EXPR )
return false;
-
+
if( k == MATX )
return false;
-
+
if( k == STD_VECTOR )
{
const vector<uchar>& v = *(const vector<uchar>*)obj;
return v.empty();
}
-
+
if( k == NONE )
return true;
-
+
if( k == STD_VECTOR_VECTOR )
{
const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
return vv.empty();
}
-
+
if( k == STD_VECTOR_MAT )
{
const vector<Mat>& vv = *(const vector<Mat>*)obj;
return vv.empty();
}
-
+
if( k == OPENGL_BUFFER )
return ((const GlBuffer*)obj)->empty();
-
+
if( k == OPENGL_TEXTURE )
return ((const GlTexture*)obj)->empty();
-
+
CV_Assert( k == GPU_MAT );
//if( k == GPU_MAT )
return ((const gpu::GpuMat*)obj)->empty();
}
-
-
+
+
_OutputArray::_OutputArray() {}
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+_OutputArray::~_OutputArray() {}
+#endif
_OutputArray::_OutputArray(Mat& m) : _InputArray(m) {}
_OutputArray::_OutputArray(vector<Mat>& vec) : _InputArray(vec) {}
_OutputArray::_OutputArray(const Mat& m) : _InputArray(m) {flags |= FIXED_SIZE|FIXED_TYPE;}
_OutputArray::_OutputArray(const vector<Mat>& vec) : _InputArray(vec) {flags |= FIXED_SIZE;}
-
+
bool _OutputArray::fixedSize() const
{
return (flags & FIXED_SIZE) == FIXED_SIZE;
{
return (flags & FIXED_TYPE) == FIXED_TYPE;
}
-
-void _OutputArray::create(Size _sz, int type, int i, bool allowTransposed, int fixedDepthMask) const
+
+void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
{
int k = kind();
if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == _sz);
- CV_Assert(!fixedType() || ((Mat*)obj)->type() == type);
- ((Mat*)obj)->create(_sz, type);
+ CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
+ ((Mat*)obj)->create(_sz, mtype);
return;
}
- int sz[] = {_sz.height, _sz.width};
- create(2, sz, type, i, allowTransposed, fixedDepthMask);
+ int sizes[] = {_sz.height, _sz.width};
+ create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
}
-void _OutputArray::create(int rows, int cols, int type, int i, bool allowTransposed, int fixedDepthMask) const
+void _OutputArray::create(int rows, int cols, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
{
int k = kind();
if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
{
CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == Size(cols, rows));
- CV_Assert(!fixedType() || ((Mat*)obj)->type() == type);
- ((Mat*)obj)->create(rows, cols, type);
+ CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
+ ((Mat*)obj)->create(rows, cols, mtype);
return;
}
- int sz[] = {rows, cols};
- create(2, sz, type, i, allowTransposed, fixedDepthMask);
+ int sizes[] = {rows, cols};
+ create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
}
-
-void _OutputArray::create(int dims, const int* size, int type, int i, bool allowTransposed, int fixedDepthMask) const
+
+void _OutputArray::create(int dims, const int* sizes, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
{
int k = kind();
- type = CV_MAT_TYPE(type);
-
+ mtype = CV_MAT_TYPE(mtype);
+
if( k == MAT )
{
CV_Assert( i < 0 );
CV_Assert(!fixedType() && !fixedSize());
m.release();
}
-
+
if( dims == 2 && m.dims == 2 && m.data &&
- m.type() == type && m.rows == size[1] && m.cols == size[0] )
+ m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
return;
}
if(fixedType())
{
- if(CV_MAT_CN(type) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
- type = m.type();
+ if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
+ mtype = m.type();
else
- CV_Assert(CV_MAT_TYPE(type) == m.type());
+ CV_Assert(CV_MAT_TYPE(mtype) == m.type());
}
if(fixedSize())
{
CV_Assert(m.dims == dims);
for(int j = 0; j < dims; ++j)
- CV_Assert(m.size[j] == size[j]);
+ CV_Assert(m.size[j] == sizes[j]);
}
- m.create(dims, size, type);
+ m.create(dims, sizes, mtype);
return;
}
-
+
if( k == MATX )
{
CV_Assert( i < 0 );
int type0 = CV_MAT_TYPE(flags);
- CV_Assert( type == type0 || (CV_MAT_CN(type) == 1 && ((1 << type0) & fixedDepthMask) != 0) );
- CV_Assert( dims == 2 && ((size[0] == sz.height && size[1] == sz.width) ||
- (allowTransposed && size[0] == sz.width && size[1] == sz.height)));
+ CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == 1 && ((1 << type0) & fixedDepthMask) != 0) );
+ CV_Assert( dims == 2 && ((sizes[0] == sz.height && sizes[1] == sz.width) ||
+ (allowTransposed && sizes[0] == sz.width && sizes[1] == sz.height)));
return;
}
-
+
if( k == STD_VECTOR || k == STD_VECTOR_VECTOR )
{
- CV_Assert( dims == 2 && (size[0] == 1 || size[1] == 1 || size[0]*size[1] == 0) );
- size_t len = size[0]*size[1] > 0 ? size[0] + size[1] - 1 : 0;
+ CV_Assert( dims == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
+ size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0;
vector<uchar>* v = (vector<uchar>*)obj;
-
+
if( k == STD_VECTOR_VECTOR )
{
vector<vector<uchar> >& vv = *(vector<vector<uchar> >*)obj;
}
else
CV_Assert( i < 0 );
-
+
int type0 = CV_MAT_TYPE(flags);
- CV_Assert( type == type0 || (CV_MAT_CN(type) == CV_MAT_CN(type0) && ((1 << type0) & fixedDepthMask) != 0) );
-
+ CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == CV_MAT_CN(type0) && ((1 << type0) & fixedDepthMask) != 0) );
+
int esz = CV_ELEM_SIZE(type0);
CV_Assert(!fixedSize() || len == ((vector<uchar>*)v)->size() / esz);
switch( esz )
}
return;
}
-
+
if( k == NONE )
{
- CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
+ CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
return;
}
-
+
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
{
vector<Mat>& v = *(vector<Mat>*)obj;
-
+
if( i < 0 )
{
- CV_Assert( dims == 2 && (size[0] == 1 || size[1] == 1 || size[0]*size[1] == 0) );
- size_t len = size[0]*size[1] > 0 ? size[0] + size[1] - 1 : 0, len0 = v.size();
-
+ CV_Assert( dims == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
+ size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0, len0 = v.size();
+
CV_Assert(!fixedSize() || len == len0);
v.resize(len);
if( fixedType() )
{
- int type = CV_MAT_TYPE(flags);
+ int _type = CV_MAT_TYPE(flags);
for( size_t j = len0; j < len; j++ )
{
- if( v[i].type() == type )
+ if( v[i].type() == _type )
continue;
CV_Assert( v[i].empty() );
- v[i].flags = (v[i].flags & ~CV_MAT_TYPE_MASK) | type;
+ v[i].flags = (v[i].flags & ~CV_MAT_TYPE_MASK) | _type;
}
}
return;
}
-
+
CV_Assert( i < (int)v.size() );
Mat& m = v[i];
-
+
if( allowTransposed )
{
if( !m.isContinuous() )
CV_Assert(!fixedType() && !fixedSize());
m.release();
}
-
+
if( dims == 2 && m.dims == 2 && m.data &&
- m.type() == type && m.rows == size[1] && m.cols == size[0] )
+ m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
return;
}
if(fixedType())
{
- if(CV_MAT_CN(type) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
- type = m.type();
+ if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
+ mtype = m.type();
else
- CV_Assert(!fixedType() || (CV_MAT_CN(type) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0));
+ CV_Assert(!fixedType() || (CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0));
}
if(fixedSize())
{
CV_Assert(m.dims == dims);
for(int j = 0; j < dims; ++j)
- CV_Assert(m.size[j] == size[j]);
+ CV_Assert(m.size[j] == sizes[j]);
}
- m.create(dims, size, type);
+ m.create(dims, sizes, mtype);
}
}
-
+
void _OutputArray::release() const
{
CV_Assert(!fixedSize());
int k = kind();
-
+
if( k == MAT )
{
((Mat*)obj)->release();
return;
}
-
+
if( k == NONE )
return;
-
+
if( k == STD_VECTOR )
{
create(Size(), CV_MAT_TYPE(flags));
return;
}
-
+
if( k == STD_VECTOR_VECTOR )
{
((vector<vector<uchar> >*)obj)->clear();
return;
}
-
+
CV_Assert( k == STD_VECTOR_MAT );
//if( k == STD_VECTOR_MAT )
{
((vector<Mat>*)obj)->clear();
- }
+ }
}
void _OutputArray::clear() const
{
int k = kind();
-
+
if( k == MAT )
{
CV_Assert(!fixedSize());
((Mat*)obj)->resize(0);
return;
}
-
+
release();
}
-
+
bool _OutputArray::needed() const
{
return kind() != NONE;
static _OutputArray _none;
OutputArray noArray() { return _none; }
-
+
}
/*************************************************************************************************\
_dst.release();
return;
}
-
+
int totalCols = 0, cols = 0;
size_t i;
for( i = 0; i < nsrc; i++ )
cols += src[i].cols;
}
}
-
+
void cv::hconcat(InputArray src1, InputArray src2, OutputArray dst)
{
Mat src[] = {src1.getMat(), src2.getMat()};
hconcat(src, 2, dst);
}
-
+
void cv::hconcat(InputArray _src, OutputArray dst)
{
vector<Mat> src;
_dst.release();
return;
}
-
+
int totalRows = 0, rows = 0;
size_t i;
for( i = 0; i < nsrc; i++ )
rows += src[i].rows;
}
}
-
+
void cv::vconcat(InputArray src1, InputArray src2, OutputArray dst)
{
Mat src[] = {src1.getMat(), src2.getMat()};
vconcat(src, 2, dst);
-}
+}
void cv::vconcat(InputArray _src, OutputArray dst)
{
_src.getMatVector(src);
vconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
}
-
+
//////////////////////////////////////// set identity ////////////////////////////////////////////
void cv::setIdentity( InputOutputArray _m, const Scalar& s )
{
Mat m = _m.getMat();
CV_Assert( m.dims <= 2 );
int i, j, rows = m.rows, cols = m.cols, type = m.type();
-
+
if( type == CV_32FC1 )
{
float* data = (float*)m.data;
}
}
-//////////////////////////////////////////// trace ///////////////////////////////////////////
-
+//////////////////////////////////////////// trace ///////////////////////////////////////////
+
cv::Scalar cv::trace( InputArray _m )
{
Mat m = _m.getMat();
CV_Assert( m.dims <= 2 );
int i, type = m.type();
int nm = std::min(m.rows, m.cols);
-
+
if( type == CV_32FC1 )
{
const float* ptr = (const float*)m.data;
_s += ptr[i*step];
return _s;
}
-
+
if( type == CV_64FC1 )
{
const double* ptr = (const double*)m.data;
_s += ptr[i*step];
return _s;
}
-
+
return cv::sum(m.diag());
}
{
int i=0, j, m = sz.width, n = sz.height;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for(; i <= m - 4; i += 4 )
{
T* d0 = (T*)(dst + dstep*i);
T* d1 = (T*)(dst + dstep*(i+1));
T* d2 = (T*)(dst + dstep*(i+2));
T* d3 = (T*)(dst + dstep*(i+3));
-
+
for( j = 0; j <= n - 4; j += 4 )
{
const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
-
+
d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
d1[j] = s0[1]; d1[j+1] = s1[1]; d1[j+2] = s2[1]; d1[j+3] = s3[1];
d2[j] = s0[2]; d2[j+1] = s1[2]; d2[j+2] = s2[2]; d2[j+3] = s3[2];
d3[j] = s0[3]; d3[j+1] = s1[3]; d3[j+2] = s2[3]; d3[j+3] = s3[3];
}
-
+
for( ; j < n; j++ )
{
const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
{
T* d0 = (T*)(dst + dstep*i);
j = 0;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for(; j <= n - 4; j += 4 )
{
const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
-
+
d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
}
#endif
std::swap( row[j], *(T*)(data1 + step*j) );
}
}
-
+
typedef void (*TransposeFunc)( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz );
typedef void (*TransposeInplaceFunc)( uchar* data, size_t step, int n );
-
+
#define DEF_TRANSPOSE_FUNC(suffix, type) \
static void transpose_##suffix( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz ) \
{ transpose_<type>(src, sstep, dst, dstep, sz); } \
};
}
-
+
void cv::transpose( InputArray _src, OutputArray _dst )
{
Mat src = _src.getMat();
_dst.create(src.cols, src.rows, src.type());
Mat dst = _dst.getMat();
-
+
if( dst.data == src.data )
{
TransposeInplaceFunc func = transposeInplaceTab[esz];
{
Mat m = _m.getMat();
CV_Assert( m.dims <= 2 );
-
+
int i, j, nrows = m.rows, type = m.type();
int j0 = 0, j1 = nrows;
CV_Assert( m.rows == m.cols );
CV_Error( CV_StsUnsupportedFormat, "" );
}
-
+
cv::Mat cv::Mat::cross(InputArray _m) const
{
Mat m = _m.getMat();
- int t = type(), d = CV_MAT_DEPTH(t);
- CV_Assert( dims <= 2 && m.dims <= 2 && size() == m.size() && t == m.type() &&
+ int tp = type(), d = CV_MAT_DEPTH(tp);
+ CV_Assert( dims <= 2 && m.dims <= 2 && size() == m.size() && tp == m.type() &&
((rows == 3 && cols == 1) || (cols*channels() == 3 && rows == 1)));
- Mat result(rows, cols, t);
+ Mat result(rows, cols, tp);
if( d == CV_32F )
{
{
src += srcstep;
i = 0;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for(; i <= size.width - 4; i += 4 )
{
WT s0, s1;
dst[k] = (ST)a0;
}
}
- }
+ }
}
typedef void (*ReduceFunc)( const Mat& src, Mat& dst );
_dst.create(dim == 0 ? 1 : src.rows, dim == 0 ? src.cols : 1,
CV_MAKETYPE(dtype >= 0 ? dtype : stype, cn));
Mat dst = _dst.getMat(), temp = dst;
-
+
CV_Assert( op == CV_REDUCE_SUM || op == CV_REDUCE_MAX ||
op == CV_REDUCE_MIN || op == CV_REDUCE_AVG );
CV_Assert( src.channels() == dst.channels() );
if( op0 == CV_REDUCE_AVG )
temp.convertTo(dst, dst.type(), 1./(dim == 0 ? src.rows : src.cols));
}
-
-
+
+
//////////////////////////////////////// sort ///////////////////////////////////////////
namespace cv
bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
bool inplace = src.data == dst.data;
bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
-
+
if( sortRows )
n = src.rows, len = src.cols;
else
bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
CV_Assert( src.data != dst.data );
-
+
if( sortRows )
n = src.rows, len = src.cols;
else
typedef void (*SortFunc)(const Mat& src, Mat& dst, int flags);
}
-
+
void cv::sort( InputArray _src, OutputArray _dst, int flags )
{
static SortFunc tab[] =
Mat src = _src.getMat();
SortFunc func = tab[src.depth()];
CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
-
+
Mat dst = _dst.getMat();
if( dst.data == src.data )
_dst.release();
dst = _dst.getMat();
func( src, dst, flags );
}
-
-
+
+
////////////////////////////////////////// kmeans ////////////////////////////////////////////
namespace cv
dist[i] = normL2Sqr_(data + step*i, data + step*centers[0], dims);
sum0 += dist[i];
}
-
+
for( k = 1; k < K; k++ )
{
double bestSum = DBL_MAX;
tdist2[i] = std::min(normL2Sqr_(data + step*i, data + step*ci, dims), dist[i]);
s += tdist2[i];
}
-
+
if( s < bestSum )
{
bestSum = s;
}
}
-
+
double cv::kmeans( InputArray _data, int K,
InputOutputArray _bestLabels,
TermCriteria criteria, int attempts,
CV_Assert( N >= K );
_bestLabels.create(N, 1, CV_32S, -1, true);
-
+
Mat _labels, best_labels = _bestLabels.getMat();
if( flags & CV_KMEANS_USE_INITIAL_LABELS )
{
for( i = 0; i < N; i++ )
CV_Assert( (unsigned)labels[i] < (unsigned)K );
}
-
+
// compute centers
centers = Scalar(0);
for( k = 0; k < K; k++ )
sample = data.ptr<float>(i);
k = labels[i];
float* center = centers.ptr<float>(k);
- j=0;
- #if CV_ENABLE_UNROLLED
+ j=0;
+ #if CV_ENABLE_UNROLLED
for(; j <= dims - 4; j += 4 )
{
float t0 = center[j] + sample[j];
if( iter > 0 )
max_center_shift = 0;
-
+
for( k = 0; k < K; k++ )
{
if( counters[k] != 0 )
if( counters[max_k] < counters[k1] )
max_k = k1;
}
-
- double max_dist = 0;
+
+ double max_dist = 0;
int farthest_i = -1;
float* new_center = centers.ptr<float>(k);
float* old_center = centers.ptr<float>(max_k);
float scale = 1.f/counters[max_k];
for( j = 0; j < dims; j++ )
_old_center[j] = old_center[j]*scale;
-
+
for( i = 0; i < N; i++ )
{
if( labels[i] != max_k )
continue;
sample = data.ptr<float>(i);
double dist = normL2Sqr_(sample, _old_center, dims);
-
+
if( max_dist <= dist )
{
max_dist = dist;
farthest_i = i;
}
}
-
+
counters[max_k]--;
counters[k]++;
labels[farthest_i] = k;
sample = data.ptr<float>(farthest_i);
-
+
for( j = 0; j < dims; j++ )
{
old_center[j] -= sample[j];
float scale = 1.f/counters[k];
for( j = 0; j < dims; j++ )
center[j] *= scale;
-
+
if( iter > 0 )
{
double dist = 0;
}
}
}
-
+
if( ++iter == MAX(criteria.maxCount, 2) || max_center_shift <= criteria.epsilon )
break;
cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
-
+
if( dim < 0 )
dim = src.rows > dst.rows ? 0 : src.cols > dst.cols ? 1 : dst.cols == 1;
if( (dim == 0 && (dst.cols != src.cols || dst.rows != 1)) ||
(dim == 1 && (dst.rows != src.rows || dst.cols != 1)) )
CV_Error( CV_StsBadSize, "The output array size is incorrect" );
-
+
if( src.channels() != dst.channels() )
CV_Error( CV_StsUnmatchedFormats, "Input and output arrays must have the same number of channels" );
cvRange( CvArr* arr, double start, double end )
{
int ok = 0;
-
+
CvMat stub, *mat = (CvMat*)arr;
double delta;
int type, step;
double val = start;
int i, j;
int rows, cols;
-
+
if( !CV_IS_MAT(mat) )
mat = cvGetMat( mat, &stub);
CV_IMPL void
cvSort( const CvArr* _src, CvArr* _dst, CvArr* _idx, int flags )
{
- cv::Mat src = cv::cvarrToMat(_src), dst, idx;
-
+ cv::Mat src = cv::cvarrToMat(_src);
+
if( _idx )
{
cv::Mat idx0 = cv::cvarrToMat(_idx), idx = idx0;
CV_Assert( labels.isContinuous() && labels.type() == CV_32S &&
(labels.cols == 1 || labels.rows == 1) &&
labels.cols + labels.rows - 1 == data.rows );
-
+
double compactness = cv::kmeans(data, cluster_count, labels, termcrit, attempts,
flags, _centers ? cv::_OutputArray(centers) : cv::_OutputArray() );
if( _compactness )
: arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
{
init(_arrays, _planes, 0, _narrays);
-}
-
+}
+
NAryMatIterator::NAryMatIterator(const Mat** _arrays, uchar** _ptrs, int _narrays)
: arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
{
init(_arrays, 0, _ptrs, _narrays);
}
-
+
void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int _narrays)
{
CV_Assert( _arrays && (_ptrs || _planes) );
int i, j, d1=0, i0 = -1, d = -1;
-
+
arrays = _arrays;
ptrs = _ptrs;
planes = _planes;
narrays = _narrays;
nplanes = 0;
size = 0;
-
+
if( narrays < 0 )
{
for( i = 0; _arrays[i] != 0; i++ )
const Mat& A = *arrays[i];
if( ptrs )
ptrs[i] = A.data;
-
+
if( !A.data )
continue;
-
+
if( i0 < 0 )
{
i0 = i;
d = A.dims;
-
+
// find the first dimensionality which is different from 1;
// in any of the arrays the first "d1" step do not affect the continuity
for( d1 = 0; d1 < d; d1++ )
iterdepth = j;
if( iterdepth == d1 )
iterdepth = 0;
-
+
nplanes = 1;
for( j = iterdepth-1; j >= 0; j-- )
nplanes *= arrays[i0]->size[j];
}
else
iterdepth = 0;
-
+
idx = 0;
-
+
if( !planes )
return;
{
CV_Assert(arrays[i] != 0);
const Mat& A = *arrays[i];
-
+
if( !A.data )
{
planes[i] = Mat();
continue;
}
-
- planes[i] = Mat(1, (int)size, A.type(), A.data);
+
+ planes[i] = Mat(1, (int)size, A.type(), A.data);
}
}
if( idx >= nplanes-1 )
return *this;
++idx;
-
+
if( iterdepth == 1 )
{
if( ptrs )
planes[i].data = data;
}
}
-
+
return *this;
}
if( !m )
return Point();
CV_DbgAssert(m->dims <= 2);
-
+
ptrdiff_t ofs = ptr - m->data;
int y = (int)(ofs/m->step[0]);
return Point((int)((ofs - y*m->step[0])/elemSize), y);
}
return result;
}
-
+
void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
{
if( m->isContinuous() )
ptr = sliceEnd;
return;
}
-
+
int d = m->dims;
if( d == 2 )
{
sliceStart + (ofs - y*m->cols)*elemSize;
return;
}
-
+
if( relative )
ofs += lpos();
-
+
if( ofs < 0 )
ofs = 0;
-
+
int szi = m->size[d-1];
ptrdiff_t t = ofs/szi;
int v = (int)(ofs - t*szi);
ofs = t;
ptr = m->data + v*elemSize;
sliceStart = m->data;
-
+
for( int i = d-2; i >= 0; i-- )
{
szi = m->size[i];
ofs = t;
sliceStart += v*m->step[i];
}
-
+
sliceEnd = sliceStart + m->size[d-1]*elemSize;
if( ofs > 0 )
ptr = sliceEnd;
else
ptr = sliceStart + (ptr - m->data);
}
-
+
void MatConstIterator::seek(const int* _idx, bool relative)
{
int i, d = m->dims;
return (b.ptr - a.ptr)/b.elemSize;
return b.lpos() - a.lpos();
-}
-
+}
+
//////////////////////////////// SparseMat ////////////////////////////////
template<typename T1, typename T2> void
to[i] = saturate_cast<T2>(from[i]*alpha + beta);
}
-ConvertData getConvertData(int fromType, int toType)
+static ConvertData getConvertData(int fromType, int toType)
{
static ConvertData tab[][8] =
{{ convertData_<uchar, uchar>, convertData_<uchar, schar>,
return func;
}
-ConvertScaleData getConvertScaleData(int fromType, int toType)
+static ConvertScaleData getConvertScaleData(int fromType, int toType)
{
static ConvertScaleData tab[][8] =
{{ convertScaleData_<uchar, uchar>, convertScaleData_<uchar, schar>,
sizeof(int)*std::max(dims - CV_MAX_DIM, 0), CV_ELEM_SIZE1(_type));
nodeSize = alignSize(valueOffset +
CV_ELEM_SIZE(_type), (int)sizeof(size_t));
-
+
int i;
for( i = 0; i < dims; i++ )
size[i] = _sizes[i];
int i, idx[CV_MAX_DIM] = {0}, d = m.dims, lastSize = m.size[d - 1];
size_t esz = m.elemSize();
- uchar* ptr = m.data;
+ uchar* dptr = m.data;
for(;;)
{
- for( i = 0; i < lastSize; i++, ptr += esz )
+ for( i = 0; i < lastSize; i++, dptr += esz )
{
- if( isZeroElem(ptr, esz) )
+ if( isZeroElem(dptr, esz) )
continue;
idx[d-1] = i;
uchar* to = newNode(idx, hash(idx));
- copyElem( ptr, to, esz );
+ copyElem( dptr, to, esz );
}
-
+
for( i = d - 2; i >= 0; i-- )
{
- ptr += m.step[i] - m.size[i+1]*m.step[i+1];
+ dptr += m.step[i] - m.size[i+1]*m.step[i+1];
if( ++idx[i] < m.size[i] )
break;
idx[i] = 0;
break;
}
}
-
+
SparseMat::SparseMat(const CvSparseMat* m)
: flags(MAGIC_VAL), hdr(0)
{
m = temp;
return;
}
-
+
CV_Assert(hdr != 0);
if( hdr != m.hdr )
m.create( hdr->dims, hdr->size, rtype );
-
+
SparseMatConstIterator from = begin();
size_t i, N = nzcount();
{
const Node* n = from.node();
uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
- cvtfunc( from.ptr, to, cn );
+ cvtfunc( from.ptr, to, cn );
}
}
else
{
const Node* n = from.node();
uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
- cvtfunc( from.ptr, to, cn, alpha, 0 );
+ cvtfunc( from.ptr, to, cn, alpha, 0 );
}
}
}
if( rtype < 0 )
rtype = type();
rtype = CV_MAKETYPE(rtype, cn);
-
+
CV_Assert( hdr );
m.create( dims(), hdr->size, rtype );
m = Scalar(beta);
return &value<uchar>(elem);
nidx = elem->next;
}
-
+
if( createMissing )
{
int idx[] = { i0 };
}
return 0;
}
-
+
uchar* SparseMat::ptr(int i0, int i1, bool createMissing, size_t* hashval)
{
CV_Assert( hdr && hdr->dims == 2 );
resizeHashTab(std::max(hsize*2, (size_t)8));
hsize = hdr->hashtab.size();
}
-
+
if( !hdr->freeList )
{
size_t i, nsz = hdr->nodeSize, psize = hdr->pool.size(),
*((double*)p) = 0.;
else
memset(p, 0, esz);
-
+
return p;
}
double norm( const SparseMat& src, int normType )
{
SparseMatConstIterator it = src.begin();
-
+
size_t i, N = src.nzcount();
normType &= NORM_TYPE_MASK;
int type = src.type();
double result = 0;
-
+
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
-
+
if( type == CV_32F )
{
if( normType == NORM_INF )
else
for( i = 0; i < N; i++, ++it )
{
- double v = *(const float*)it.ptr;
+ double v = *(const float*)it.ptr;
result += v*v;
}
}
else
for( i = 0; i < N; i++, ++it )
{
- double v = *(const double*)it.ptr;
+ double v = *(const double*)it.ptr;
result += v*v;
}
}
else
CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
-
+
if( normType == NORM_L2 )
result = std::sqrt(result);
return result;
}
-
+
void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _minidx, int* _maxidx )
{
SparseMatConstIterator it = src.begin();
size_t i, N = src.nzcount(), d = src.hdr ? src.hdr->dims : 0;
int type = src.type();
const int *minidx = 0, *maxidx = 0;
-
+
if( type == CV_32F )
{
float minval = FLT_MAX, maxval = -FLT_MAX;
}
else
CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
-
+
if( _minidx )
for( i = 0; i < d; i++ )
_minidx[i] = minidx[i];
_maxidx[i] = maxidx[i];
}
-
+
void normalize( const SparseMat& src, SparseMat& dst, double a, int norm_type )
{
double scale = 1;
}
else
CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
-
+
src.convertTo( dst, -1, scale );
}
////////////////////// RotatedRect //////////////////////
-
+
void RotatedRect::points(Point2f pt[]) const
{
double _angle = angle*CV_PI/180.;
float b = (float)cos(_angle)*0.5f;
float a = (float)sin(_angle)*0.5f;
-
+
pt[0].x = center.x - a*size.height - b*size.width;
pt[0].y = center.y + b*size.height - a*size.width;
pt[1].x = center.x + a*size.height - b*size.width;
r.width -= r.x - 1;
r.height -= r.y - 1;
return r;
-}
-
}
-
+
+}
+
/* End of file. */
void cv::gpu::setGlDevice(int device)\r
{\r
#ifndef HAVE_CUDA\r
- (void)device;\r
+ (void)device;\r
throw_nocuda;\r
#else\r
#ifndef HAVE_OPENGL\r
- (void)device;\r
+ (void)device;\r
throw_nogl;\r
#else\r
if (!glFuncTab()->isGlContextInitialized())\r
{\r
public:\r
static const Ptr<Impl>& empty();\r
- \r
+\r
Impl(int rows, int cols, int type, unsigned int target);\r
Impl(const Mat& m, unsigned int target);\r
~Impl();\r
\r
private:\r
Impl();\r
- \r
+\r
unsigned int buffer_;\r
\r
#ifdef HAVE_CUDA\r
\r
#endif // HAVE_OPENGL\r
\r
-cv::GlBuffer::GlBuffer(Usage usage) : rows_(0), cols_(0), type_(0), usage_(usage)\r
+cv::GlBuffer::GlBuffer(Usage _usage) : rows_(0), cols_(0), type_(0), usage_(_usage)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)usage;\r
+ (void)_usage;\r
throw_nogl;\r
#else\r
impl_ = Impl::empty();\r
#endif\r
}\r
\r
-cv::GlBuffer::GlBuffer(int rows, int cols, int type, Usage usage) : rows_(0), cols_(0), type_(0), usage_(usage)\r
+cv::GlBuffer::GlBuffer(int _rows, int _cols, int _type, Usage _usage) : rows_(0), cols_(0), type_(0), usage_(_usage)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)rows;\r
- (void)cols;\r
- (void)type;\r
- (void)usage;\r
+ (void)_rows;\r
+ (void)_cols;\r
+ (void)_type;\r
+ (void)_usage;\r
throw_nogl;\r
#else\r
- impl_ = new Impl(rows, cols, type, usage);\r
- rows_ = rows;\r
- cols_ = cols;\r
- type_ = type;\r
+ impl_ = new Impl(_rows, _cols, _type, _usage);\r
+ rows_ = _rows;\r
+ cols_ = _cols;\r
+ type_ = _type;\r
#endif\r
}\r
\r
-cv::GlBuffer::GlBuffer(Size size, int type, Usage usage) : rows_(0), cols_(0), type_(0), usage_(usage)\r
+cv::GlBuffer::GlBuffer(Size _size, int _type, Usage _usage) : rows_(0), cols_(0), type_(0), usage_(_usage)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)size;\r
- (void)type;\r
- (void)usage;\r
+ (void)_size;\r
+ (void)_type;\r
+ (void)_usage;\r
throw_nogl;\r
#else\r
- impl_ = new Impl(size.height, size.width, type, usage);\r
- rows_ = size.height;\r
- cols_ = size.width;\r
- type_ = type;\r
+ impl_ = new Impl(_size.height, _size.width, _type, _usage);\r
+ rows_ = _size.height;\r
+ cols_ = _size.width;\r
+ type_ = _type;\r
#endif\r
}\r
\r
-cv::GlBuffer::GlBuffer(InputArray mat_, Usage usage) : rows_(0), cols_(0), type_(0), usage_(usage)\r
+cv::GlBuffer::GlBuffer(InputArray mat_, Usage _usage) : rows_(0), cols_(0), type_(0), usage_(_usage)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)mat_;\r
- (void)usage;\r
+ (void)mat_;\r
+ (void)_usage;\r
throw_nogl;\r
#else\r
int kind = mat_.kind();\r
- Size size = mat_.size();\r
- int type = mat_.type();\r
+ Size _size = mat_.size();\r
+ int _type = mat_.type();\r
\r
if (kind == _InputArray::GPU_MAT)\r
{\r
throw_nocuda;\r
#else\r
GpuMat d_mat = mat_.getGpuMat();\r
- impl_ = new Impl(d_mat.rows, d_mat.cols, d_mat.type(), usage);\r
+ impl_ = new Impl(d_mat.rows, d_mat.cols, d_mat.type(), _usage);\r
impl_->copyFrom(d_mat);\r
#endif\r
}\r
else\r
{\r
Mat mat = mat_.getMat();\r
- impl_ = new Impl(mat, usage);\r
+ impl_ = new Impl(mat, _usage);\r
}\r
\r
- rows_ = size.height;\r
- cols_ = size.width;\r
- type_ = type;\r
+ rows_ = _size.height;\r
+ cols_ = _size.width;\r
+ type_ = _type;\r
#endif\r
}\r
\r
-void cv::GlBuffer::create(int rows, int cols, int type, Usage usage)\r
+void cv::GlBuffer::create(int _rows, int _cols, int _type, Usage _usage)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)rows;\r
- (void)cols;\r
- (void)type;\r
- (void)usage;\r
+ (void)_rows;\r
+ (void)_cols;\r
+ (void)_type;\r
+ (void)_usage;\r
throw_nogl;\r
#else\r
- if (rows_ != rows || cols_ != cols || type_ != type || usage_ != usage)\r
+ if (rows_ != _rows || cols_ != _cols || type_ != _type || usage_ != _usage)\r
{\r
- impl_ = new Impl(rows, cols, type, usage);\r
- rows_ = rows;\r
- cols_ = cols;\r
- type_ = type;\r
- usage_ = usage;\r
+ impl_ = new Impl(_rows, _cols, _type, _usage);\r
+ rows_ = _rows;\r
+ cols_ = _cols;\r
+ type_ = _type;\r
+ usage_ = _usage;\r
}\r
#endif\r
}\r
void cv::GlBuffer::copyFrom(InputArray mat_)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)mat_;\r
+ (void)mat_;\r
throw_nogl;\r
#else\r
int kind = mat_.kind();\r
- Size size = mat_.size();\r
- int type = mat_.type();\r
+ Size _size = mat_.size();\r
+ int _type = mat_.type();\r
\r
- create(size, type);\r
+ create(_size, _type);\r
\r
switch (kind)\r
{\r
\r
private:\r
Impl();\r
- \r
+\r
GLuint tex_;\r
};\r
\r
#endif\r
}\r
\r
-cv::GlTexture::GlTexture(int rows, int cols, int type) : rows_(0), cols_(0), type_(0), buf_(GlBuffer::TEXTURE_BUFFER)\r
+cv::GlTexture::GlTexture(int _rows, int _cols, int _type) : rows_(0), cols_(0), type_(0), buf_(GlBuffer::TEXTURE_BUFFER)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)rows;\r
- (void)cols;\r
- (void)type;\r
+ (void)_rows;\r
+ (void)_cols;\r
+ (void)_type;\r
throw_nogl;\r
#else\r
- impl_ = new Impl(rows, cols, type);\r
- rows_ = rows;\r
- cols_ = cols;\r
- type_ = type;\r
+ impl_ = new Impl(_rows, _cols, _type);\r
+ rows_ = _rows;\r
+ cols_ = _cols;\r
+ type_ = _type;\r
#endif\r
}\r
\r
-cv::GlTexture::GlTexture(Size size, int type) : rows_(0), cols_(0), type_(0), buf_(GlBuffer::TEXTURE_BUFFER)\r
+cv::GlTexture::GlTexture(Size _size, int _type) : rows_(0), cols_(0), type_(0), buf_(GlBuffer::TEXTURE_BUFFER)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)size;\r
- (void)type;\r
+ (void)_size;\r
+ (void)_type;\r
throw_nogl;\r
#else\r
- impl_ = new Impl(size.height, size.width, type);\r
- rows_ = size.height;\r
- cols_ = size.width;\r
- type_ = type;\r
+ impl_ = new Impl(_size.height, _size.width, _type);\r
+ rows_ = _size.height;\r
+ cols_ = _size.width;\r
+ type_ = _type;\r
#endif\r
}\r
\r
cv::GlTexture::GlTexture(InputArray mat_, bool bgra) : rows_(0), cols_(0), type_(0), buf_(GlBuffer::TEXTURE_BUFFER)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)mat_;\r
- (void)bgra;\r
+ (void)mat_;\r
+ (void)bgra;\r
throw_nogl;\r
-#else \r
+#else\r
int kind = mat_.kind();\r
- Size size = mat_.size();\r
- int type = mat_.type();\r
+ Size _size = mat_.size();\r
+ int _type = mat_.type();\r
\r
switch (kind)\r
{\r
}\r
}\r
\r
- rows_ = size.height;\r
- cols_ = size.width;\r
- type_ = type;\r
+ rows_ = _size.height;\r
+ cols_ = _size.width;\r
+ type_ = _type;\r
#endif\r
}\r
\r
-void cv::GlTexture::create(int rows, int cols, int type)\r
+void cv::GlTexture::create(int _rows, int _cols, int _type)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)rows;\r
- (void)cols;\r
- (void)type;\r
+ (void)_rows;\r
+ (void)_cols;\r
+ (void)_type;\r
throw_nogl;\r
#else\r
- if (rows_ != rows || cols_ != cols || type_ != type)\r
+ if (rows_ != _rows || cols_ != _cols || type_ != _type)\r
{\r
- impl_ = new Impl(rows, cols, type);\r
- rows_ = rows;\r
- cols_ = cols;\r
- type_ = type;\r
+ impl_ = new Impl(_rows, _cols, _type);\r
+ rows_ = _rows;\r
+ cols_ = _cols;\r
+ type_ = _type;\r
}\r
#endif\r
}\r
void cv::GlTexture::copyFrom(InputArray mat_, bool bgra)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)mat_;\r
- (void)bgra;\r
+ (void)mat_;\r
+ (void)bgra;\r
throw_nogl;\r
#else\r
int kind = mat_.kind();\r
- Size size = mat_.size();\r
- int type = mat_.type();\r
+ Size _size = mat_.size();\r
+ int _type = mat_.type();\r
\r
- create(size, type);\r
+ create(_size, _type);\r
\r
switch(kind)\r
{\r
////////////////////////////////////////////////////////////////////////\r
// GlFont\r
\r
-cv::GlFont::GlFont(const string& family, int height, Weight weight, Style style)\r
- : family_(family), height_(height), weight_(weight), style_(style), base_(0)\r
+cv::GlFont::GlFont(const string& _family, int _height, Weight _weight, Style _style)\r
+ : family_(_family), height_(_height), weight_(_weight), style_(_style), base_(0)\r
{\r
#ifndef HAVE_OPENGL\r
throw_nogl;\r
base_ = glGenLists(256);\r
CV_CheckGlError();\r
\r
- glFuncTab()->generateBitmapFont(family, height, weight, (style & STYLE_ITALIC) != 0, (style & STYLE_UNDERLINE) != 0, 0, 256, base_);\r
+ glFuncTab()->generateBitmapFont(family_, height_, weight_, (style_ & STYLE_ITALIC) != 0, (style_ & STYLE_UNDERLINE) != 0, 0, 256, base_);\r
#endif\r
}\r
\r
class FontCompare : public unary_function<Ptr<GlFont>, bool>\r
{\r
public:\r
- inline FontCompare(const string& family, int height, GlFont::Weight weight, GlFont::Style style) \r
+ inline FontCompare(const string& family, int height, GlFont::Weight weight, GlFont::Style style)\r
: family_(family), height_(height), weight_(weight), style_(style)\r
{\r
}\r
Ptr<GlFont> cv::GlFont::get(const std::string& family, int height, Weight weight, Style style)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)family;\r
- (void)height;\r
- (void)weight;\r
- (void)style;\r
+ (void)family;\r
+ (void)height;\r
+ (void)weight;\r
+ (void)style;\r
throw_nogl;\r
return Ptr<GlFont>();\r
#else\r
void cv::render(const GlTexture& tex, Rect_<double> wndRect, Rect_<double> texRect)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)tex;\r
- (void)wndRect;\r
- (void)texRect;\r
+ (void)tex;\r
+ (void)wndRect;\r
+ (void)texRect;\r
throw_nogl;\r
#else\r
if (!tex.empty())\r
void cv::render(const GlArrays& arr, int mode, Scalar color)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)arr;\r
- (void)mode;\r
- (void)color;\r
+ (void)arr;\r
+ (void)mode;\r
+ (void)color;\r
throw_nogl;\r
#else\r
glColor3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0);\r
void cv::render(const string& str, const Ptr<GlFont>& font, Scalar color, Point2d pos)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)str;\r
- (void)font;\r
- (void)color;\r
- (void)pos;\r
+ (void)str;\r
+ (void)font;\r
+ (void)color;\r
+ (void)pos;\r
throw_nogl;\r
#else\r
glPushAttrib(GL_DEPTH_BUFFER_BIT);\r
bool icvCheckGlError(const char* file, const int line, const char* func)\r
{\r
#ifndef HAVE_OPENGL\r
- (void)file;\r
- (void)line;\r
- (void)func;\r
+ (void)file;\r
+ (void)line;\r
+ (void)func;\r
return true;\r
#else\r
GLenum err = glGetError();\r
{
CV_Assert(m.dims <= 2);
int type = m.type();
-
+
char crowbrace = getCloseBrace(rowsep);
char orowbrace = crowbrace ? rowsep : '\0';
-
+
if( orowbrace || isspace(rowsep) )
rowsep = '\0';
-
+
for( int i = 0; i < m.rows; i++ )
{
if(orowbrace)
writeMat(out, m, ';', ' ', m.cols == 1);
out << "]";
}
-
+
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, ' ');
writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
out << "]";
}
-
+
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, '[');
writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
out << "], type='" << numpyTypes[m.depth()] << "')";
}
-
+
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, '[');
if(m.rows > 1)
out << "\n";
}
-
+
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, ' ');
writeMat(out, m, ',', ' ', m.cols==1);
out << "}";
}
-
+
void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
{
writeElems(out, data, nelems, type, ' ');
static const Formatter* g_defaultFormatter0 = &matlabFormatter;
static const Formatter* g_defaultFormatter = &matlabFormatter;
-bool my_streq(const char* a, const char* b)
+static bool my_streq(const char* a, const char* b)
{
size_t i, alen = strlen(a), blen = strlen(b);
if( alen != blen )
g_defaultFormatter = fmt;
return prevFmt;
}
-
+
Formatted::Formatted(const Mat& _m, const Formatter* _fmt,
const vector<int>& _params)
{
fmt = _fmt ? _fmt : Formatter::get();
std::copy(_params.begin(), _params.end(), back_inserter(params));
}
-
+
Formatted::Formatted(const Mat& _m, const Formatter* _fmt, const int* _params)
{
mtx = _m;
fmt = _fmt ? _fmt : Formatter::get();
-
+
if( _params )
{
int i, maxParams = 100;
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if defined _MSC_VER && _MSC_VER >= 1200
- // disable warnings related to inline functions
- #pragma warning( disable: 4251 4711 4710 4514 )
-#endif
-
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
for( i = 0; i < n; i++ )
s.val[i] = ((T1*)&v)[i];
return s;
-}
+}
/****************************************************************************************\
* sum *
{
ST s0 = dst[0];
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for(; i <= len - 4; i += 4, src += cn*4 )
s0 += src[0] + src[cn] + src[cn*2] + src[cn*3];
#endif
dst[1] = s1;
dst[2] = s2;
}
-
+
for( ; k < cn; k += 4 )
{
src = src0 + k;
}
return len;
}
-
+
int i, nzm = 0;
if( cn == 1 )
{
if( mask[i] )
{
int k = 0;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for( ; k <= cn - 4; k += 4 )
{
ST s0, s1;
static int countNonZero_(const T* src, int len )
{
int i=0, nz = 0;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for(; i <= len - 4; i += 4 )
nz += (src[i] != 0) + (src[i+1] != 0) + (src[i+2] != 0) + (src[i+3] != 0);
#endif
static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn )
{
const T* src = src0;
-
+
if( !mask )
{
int i;
int k = cn % 4;
-
+
if( k == 1 )
{
ST s0 = sum[0];
sum[0] = s0; sum[1] = s1; sum[2] = s2;
sqsum[0] = sq0; sqsum[1] = sq1; sqsum[2] = sq2;
}
-
+
for( ; k < cn; k += 4 )
{
src = src0 + k;
}
return len;
}
-
+
int i, nzm = 0;
if( cn == 1 )
}
}
return nzm;
-}
+}
static int sqsum8u( const uchar* src, const uchar* mask, int* sum, int* sqsum, int len, int cn )
Mat src = _src.getMat();
int k, cn = src.channels(), depth = src.depth();
SumFunc func = sumTab[depth];
-
+
CV_Assert( cn <= 4 && func != 0 );
-
+
const Mat* arrays[] = {&src, 0};
uchar* ptrs[1];
NAryMatIterator it(arrays, ptrs);
int* buf = (int*)&s[0];
size_t esz = 0;
bool blockSum = depth < CV_32S;
-
+
if( blockSum )
{
intSumBlockSize = depth <= CV_8S ? (1 << 23) : (1 << 15);
{
Mat src = _src.getMat();
CountNonZeroFunc func = countNonZeroTab[src.depth()];
-
+
CV_Assert( src.channels() == 1 && func != 0 );
-
+
const Mat* arrays[] = {&src, 0};
uchar* ptrs[1];
NAryMatIterator it(arrays, ptrs);
int total = (int)it.size, nz = 0;
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
nz += func( ptrs[0], total );
-
+
return nz;
-}
+}
cv::Scalar cv::mean( InputArray _src, InputArray _mask )
{
Mat src = _src.getMat(), mask = _mask.getMat();
CV_Assert( mask.empty() || mask.type() == CV_8U );
-
+
int k, cn = src.channels(), depth = src.depth();
SumFunc func = sumTab[depth];
-
+
CV_Assert( cn <= 4 && func != 0 );
-
+
const Mat* arrays[] = {&src, &mask, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
int* buf = (int*)&s[0];
bool blockSum = depth <= CV_16S;
size_t esz = 0, nz0 = 0;
-
+
if( blockSum )
{
intSumBlockSize = depth <= CV_8S ? (1 << 23) : (1 << 15);
blockSize = std::min(blockSize, intSumBlockSize);
_buf.allocate(cn);
buf = _buf;
-
+
for( k = 0; k < cn; k++ )
buf[k] = 0;
esz = src.elemSize();
}
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
}
}
return s*(nz0 ? 1./nz0 : 0);
-}
+}
void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, InputArray _mask )
{
Mat src = _src.getMat(), mask = _mask.getMat();
CV_Assert( mask.empty() || mask.type() == CV_8U );
-
+
int k, cn = src.channels(), depth = src.depth();
SumSqrFunc func = sumSqrTab[depth];
-
+
CV_Assert( func != 0 );
-
+
const Mat* arrays[] = {&src, &mask, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
int *sbuf = (int*)s, *sqbuf = (int*)sq;
bool blockSum = depth <= CV_16S, blockSqSum = depth <= CV_8S;
size_t esz = 0;
-
+
for( k = 0; k < cn; k++ )
s[k] = sq[k] = 0;
-
+
if( blockSum )
{
intSumBlockSize = 1 << 15;
sbuf[k] = sqbuf[k] = 0;
esz = src.elemSize();
}
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
ptrs[1] += bsz;
}
}
-
+
double scale = nz0 ? 1./nz0 : 0.;
for( k = 0; k < cn; k++ )
{
s[k] *= scale;
sq[k] = std::sqrt(std::max(sq[k]*scale - s[k]*s[k], 0.));
}
-
+
for( j = 0; j < 2; j++ )
{
const double* sptr = j == 0 ? s : sq;
{
WT minVal = *_minVal, maxVal = *_maxVal;
size_t minIdx = *_minIdx, maxIdx = *_maxIdx;
-
+
if( !mask )
{
for( int i = 0; i < len; i++ )
static void minMaxIdx_64f(const double* src, const uchar* mask, double* minval, double* maxval,
size_t* minidx, size_t* maxidx, int len, size_t startidx )
-{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); }
+{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); }
typedef void (*MinMaxIdxFunc)(const uchar*, const uchar*, int*, int*, size_t*, size_t*, int, size_t);
{
Mat src = _src.getMat(), mask = _mask.getMat();
int depth = src.depth(), cn = src.channels();
-
+
CV_Assert( (cn == 1 && (mask.empty() || mask.type() == CV_8U)) ||
(cn >= 1 && mask.empty() && !minIdx && !maxIdx) );
MinMaxIdxFunc func = minmaxTab[depth];
CV_Assert( func != 0 );
-
+
const Mat* arrays[] = {&src, &mask, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
-
+
size_t minidx = 0, maxidx = 0;
int iminval = INT_MAX, imaxval = INT_MIN;
float fminval = FLT_MAX, fmaxval = -FLT_MAX;
size_t startidx = 1;
int *minval = &iminval, *maxval = &imaxval;
int planeSize = (int)it.size*cn;
-
+
if( depth == CV_32F )
minval = (int*)&fminval, maxval = (int*)&fmaxval;
else if( depth == CV_64F )
minval = (int*)&dminval, maxval = (int*)&dmaxval;
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it, startidx += planeSize )
func( ptrs[0], ptrs[1], minval, maxval, &minidx, &maxidx, planeSize, startidx );
-
+
if( minidx == 0 )
dminval = dmaxval = 0;
else if( depth == CV_32F )
dminval = fminval, dmaxval = fmaxval;
else if( depth <= CV_32S )
dminval = iminval, dmaxval = imaxval;
-
+
if( minVal )
*minVal = dminval;
if( maxVal )
*maxVal = dmaxval;
-
+
if( minIdx )
ofs2idx(src, minidx, minIdx);
if( maxIdx )
ofs2idx(src, maxidx, maxIdx);
-}
+}
void cv::minMaxLoc( InputArray _img, double* minVal, double* maxVal,
Point* minLoc, Point* maxLoc, InputArray mask )
{
Mat img = _img.getMat();
CV_Assert(img.dims <= 2);
-
+
minMaxIdx(_img, minVal, maxVal, (int*)minLoc, (int*)maxLoc, mask);
if( minLoc )
std::swap(minLoc->x, minLoc->y);
{
float CV_DECL_ALIGNED(16) buf[4];
__m128 d0 = _mm_setzero_ps(), d1 = _mm_setzero_ps();
-
+
for( ; j <= n - 8; j += 8 )
{
__m128 t0 = _mm_sub_ps(_mm_loadu_ps(a + j), _mm_loadu_ps(b + j));
}
else
#endif
- {
+ {
for( ; j <= n - 4; j += 4 )
{
float t0 = a[j] - b[j], t1 = a[j+1] - b[j+1], t2 = a[j+2] - b[j+2], t3 = a[j+3] - b[j+3];
d += t0*t0 + t1*t1 + t2*t2 + t3*t3;
}
}
-
+
for( ; j < n; j++ )
{
float t = a[j] - b[j];
static const int CV_DECL_ALIGNED(16) absbuf[4] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
__m128 d0 = _mm_setzero_ps(), d1 = _mm_setzero_ps();
__m128 absmask = _mm_load_ps((const float*)absbuf);
-
+
for( ; j <= n - 8; j += 8 )
{
__m128 t0 = _mm_sub_ps(_mm_loadu_ps(a + j), _mm_loadu_ps(b + j));
if( USE_SSE2 )
{
__m128i d0 = _mm_setzero_si128();
-
+
for( ; j <= n - 16; j += 16 )
{
__m128i t0 = _mm_loadu_si128((const __m128i*)(a + j));
__m128i t1 = _mm_loadu_si128((const __m128i*)(b + j));
-
+
d0 = _mm_add_epi32(d0, _mm_sad_epu8(t0, t1));
}
{
__m128i t0 = _mm_cvtsi32_si128(*(const int*)(a + j));
__m128i t1 = _mm_cvtsi32_si128(*(const int*)(b + j));
-
+
d0 = _mm_add_epi32(d0, _mm_sad_epu8(t0, t1));
}
d = _mm_cvtsi128_si32(_mm_add_epi32(d0, _mm_unpackhi_epi64(d0, d0)));
return d;
}
-static const uchar popCountTable[] =
+static const uchar popCountTable[] =
{
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
};
-int normHamming(const uchar* a, int n)
+static int normHamming(const uchar* a, int n)
{
int i = 0, result = 0;
#if CV_NEON
result += popCountTable[a[i]];
return result;
}
-
+
int normHamming(const uchar* a, const uchar* b, int n)
{
int i = 0, result = 0;
return result;
}
-int normHamming(const uchar* a, int n, int cellSize)
+static int normHamming(const uchar* a, int n, int cellSize)
{
if( cellSize == 1 )
return normHamming(a, n);
for( ; i < n; i++ )
result += tab[a[i]];
return result;
-}
-
+}
+
int normHamming(const uchar* a, const uchar* b, int n, int cellSize)
{
if( cellSize == 1 )
else
CV_Error( CV_StsBadSize, "bad cell size (not 1, 2 or 4) in normHamming" );
int i = 0, result = 0;
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for( ; i <= n - 4; i += 4 )
result += tab[a[i] ^ b[i]] + tab[a[i+1] ^ b[i+1]] +
tab[a[i+2] ^ b[i+2]] + tab[a[i+3] ^ b[i+3]];
}
*_result = result;
return 0;
-}
+}
template<typename T, typename ST> int
normDiffInf_(const T* src1, const T* src2, const uchar* mask, ST* _result, int len, int cn)
}
*_result = result;
return 0;
-}
+}
#define CV_DEF_NORM_FUNC(L, suffix, type, ntype) \
typedef int (*NormFunc)(const uchar*, const uchar*, uchar*, int, int);
-typedef int (*NormDiffFunc)(const uchar*, const uchar*, const uchar*, uchar*, int, int);
+typedef int (*NormDiffFunc)(const uchar*, const uchar*, const uchar*, uchar*, int, int);
static NormFunc normTab[3][8] =
{
{
Mat src = _src.getMat(), mask = _mask.getMat();
int depth = src.depth(), cn = src.channels();
-
+
normType &= 7;
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR ||
((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src.type() == CV_8U) );
-
+
if( src.isContinuous() && mask.empty() )
{
size_t len = src.total()*cn;
if( depth == CV_32F )
{
const float* data = src.ptr<float>();
-
+
if( normType == NORM_L2 )
{
double result = 0;
if( depth == CV_8U )
{
const uchar* data = src.ptr<uchar>();
-
+
if( normType == NORM_HAMMING )
return normHamming(data, (int)len);
-
+
if( normType == NORM_HAMMING2 )
return normHamming(data, (int)len, 2);
}
}
}
-
+
CV_Assert( mask.empty() || mask.type() == CV_8U );
-
+
if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
{
if( !mask.empty() )
return norm(temp, normType);
}
int cellSize = normType == NORM_HAMMING ? 1 : 2;
-
+
const Mat* arrays[] = {&src, 0};
uchar* ptrs[1];
NAryMatIterator it(arrays, ptrs);
int total = (int)it.size;
int result = 0;
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
result += normHamming(ptrs[0], total, cellSize);
-
+
return result;
}
-
+
NormFunc func = normTab[normType >> 1][depth];
CV_Assert( func != 0 );
-
+
const Mat* arrays[] = {&src, &mask, 0};
uchar* ptrs[2];
union
int isum = 0;
int *ibuf = &result.i;
size_t esz = 0;
-
+
if( blockSum )
{
intSumBlockSize = (normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15))/cn;
ibuf = &isum;
esz = src.elemSize();
}
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
ptrs[1] += bsz;
}
}
-
+
if( normType == NORM_INF )
{
if( depth == CV_64F )
}
else if( normType == NORM_L2 )
result.d = std::sqrt(result.d);
-
+
return result.d;
}
{
if( normType & CV_RELATIVE )
return norm(_src1, _src2, normType & ~CV_RELATIVE, _mask)/(norm(_src2, normType, _mask) + DBL_EPSILON);
-
+
Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat();
int depth = src1.depth(), cn = src1.channels();
-
+
CV_Assert( src1.size == src2.size && src1.type() == src2.type() );
-
+
normType &= 7;
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR ||
((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src1.type() == CV_8U) );
-
+
if( src1.isContinuous() && src2.isContinuous() && mask.empty() )
{
size_t len = src1.total()*src1.channels();
{
const float* data1 = src1.ptr<float>();
const float* data2 = src2.ptr<float>();
-
+
if( normType == NORM_L2 )
{
double result = 0;
}
}
}
-
+
CV_Assert( mask.empty() || mask.type() == CV_8U );
-
+
if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
{
if( !mask.empty() )
return norm(temp, normType);
}
int cellSize = normType == NORM_HAMMING ? 1 : 2;
-
+
const Mat* arrays[] = {&src1, &src2, 0};
uchar* ptrs[2];
NAryMatIterator it(arrays, ptrs);
int total = (int)it.size;
int result = 0;
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
result += normHamming(ptrs[0], ptrs[1], total, cellSize);
-
+
return result;
}
-
+
NormDiffFunc func = normDiffTab[normType >> 1][depth];
CV_Assert( func != 0 );
-
+
const Mat* arrays[] = {&src1, &src2, &mask, 0};
uchar* ptrs[3];
union
unsigned isum = 0;
unsigned *ibuf = &result.u;
size_t esz = 0;
-
+
if( blockSum )
{
intSumBlockSize = normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15);
ibuf = &isum;
esz = src1.elemSize();
}
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
for( j = 0; j < total; j += blockSize )
ptrs[2] += bsz;
}
}
-
+
if( normType == NORM_INF )
{
if( depth == CV_64F )
}
else if( normType == NORM_L2 )
result.d = std::sqrt(result.d);
-
+
return result.d;
}
typedef void (*BatchDistFunc)(const uchar* src1, const uchar* src2, size_t step2,
int nvecs, int len, uchar* dist, const uchar* mask);
-
+
struct BatchDistInvoker
{
BatchDistInvoker( const Mat& _src1, const Mat& _src2,
update = _update;
func = _func;
}
-
+
void operator()(const BlockedRange& range) const
{
AutoBuffer<int> buf(src2->rows);
int* bufptr = buf;
-
+
for( int i = range.begin(); i < range.end(); i++ )
{
func(src1->ptr(i), src2->ptr(), src2->step, src2->rows, src2->cols,
K > 0 ? (uchar*)bufptr : dist->ptr(i), mask->data ? mask->ptr(i) : 0);
-
+
if( K > 0 )
{
int* nidxptr = nidx->ptr<int>(i);
// since positive float's can be compared just like int's,
// we handle both CV_32S and CV_32F cases with a single branch
int* distptr = (int*)dist->ptr(i);
-
+
int j, k;
-
+
for( j = 0; j < src2->rows; j++ )
{
int d = bufptr[j];
}
}
}
-
+
const Mat *src1;
const Mat *src2;
Mat *dist;
int update;
BatchDistFunc func;
};
-
+
}
-
+
void cv::batchDistance( InputArray _src1, InputArray _src2,
OutputArray _dist, int dtype, OutputArray _nidx,
int normType, int K, InputArray _mask,
CV_Assert( type == src2.type() && src1.cols == src2.cols &&
(type == CV_32F || type == CV_8U));
CV_Assert( _nidx.needed() == (K > 0) );
-
+
if( dtype == -1 )
{
dtype = normType == NORM_HAMMING || normType == NORM_HAMMING2 ? CV_32S : CV_32F;
CV_Assert( (type == CV_8U && dtype == CV_32S) || dtype == CV_32F);
K = std::min(K, src2.rows);
-
+
_dist.create(src1.rows, (K > 0 ? K : src2.rows), dtype);
Mat dist = _dist.getMat(), nidx;
if( _nidx.needed() )
_nidx.create(dist.size(), CV_32S);
nidx = _nidx.getMat();
}
-
+
if( update == 0 && K > 0 )
{
dist = Scalar::all(dtype == CV_32S ? (double)INT_MAX : (double)FLT_MAX);
nidx = Scalar::all(-1);
}
-
+
if( crosscheck )
{
CV_Assert( K == 1 && update == 0 && mask.empty() );
Mat tdist, tidx;
batchDistance(src2, src1, tdist, dtype, tidx, normType, K, mask, 0, false);
-
+
// if an idx-th element from src1 appeared to be the nearest to i-th element of src2,
// we update the minimum mutual distance between idx-th element of src1 and the whole src2 set.
// As a result, if nidx[idx] = i*, it means that idx-th element of src1 is the nearest
}
return;
}
-
+
BatchDistFunc func = 0;
if( type == CV_8U )
{
else if( normType == NORM_L2 )
func = (BatchDistFunc)batchDistL2_32f;
}
-
+
if( func == 0 )
CV_Error_(CV_StsUnsupportedFormat,
("The combination of type=%d, dtype=%d and normType=%d is not supported",
type, dtype, normType));
-
+
parallel_for(BlockedRange(0, src1.rows),
BatchDistInvoker(src1, src2, dist, nidx, K, mask, update, func));
}
#if defined __linux__ || defined __APPLE__
#include <unistd.h>
#include <stdio.h>
-#include <sys/types.h>
+#include <sys/types.h>
#if defined ANDROID
#include <sys/sysconf.h>
#else
/*!
\return the error description and the context as a text string.
- */
+ */
const char* Exception::what() const throw() { return msg.c_str(); }
void Exception::formatMessage()
else
msg = format("%s:%d: error: (%d) %s\n", file.c_str(), line, code, err.c_str());
}
-
+
struct HWFeatures
{
enum { MAX_FEATURE = CV_HARDWARE_MAX_FEATURE };
#endif
}
-#if ANDROID
+#ifdef ANDROID
static inline int getNumberOfCPUsImpl()
{
FILE* cpuPossible = fopen("/sys/devices/system/cpu/possible", "r");
sscanf(pos, "%d-%d", &rstart, &rend);
cpusAvailable += rend - rstart + 1;
}
-
+
}
return cpusAvailable ? cpusAvailable : 1;
}
#if defined WIN32 || defined _WIN32
SYSTEM_INFO sysinfo;
GetSystemInfo( &sysinfo );
-
+
return (int)sysinfo.dwNumberOfProcessors;
-#elif ANDROID
+#elif defined ANDROID
static int ncpus = getNumberOfCPUsImpl();
printf("CPUS= %d\n", ncpus);
return ncpus;
#elif defined __APPLE__
int numCPU=0;
int mib[4];
- size_t len = sizeof(numCPU);
-
+ size_t len = sizeof(numCPU);
+
/* set the mib for hw.ncpu */
mib[0] = CTL_HW;
mib[1] = HW_AVAILCPU; // alternatively, try HW_NCPU;
-
+
/* get the number of CPUs from the system */
sysctl(mib, 2, &numCPU, &len, NULL, 0);
-
- if( numCPU < 1 )
+
+ if( numCPU < 1 )
{
mib[1] = HW_NCPU;
sysctl( mib, 2, &numCPU, &len, NULL, 0 );
-
+
if( numCPU < 1 )
numCPU = 1;
}
-
+
return (int)numCPU;
#else
return 1;
{
char buf[L_tmpnam];
char* name = 0;
-#if ANDROID
+#ifdef ANDROID
strcpy(buf, "/sdcard/__opencv_temp_XXXXXX");
name = mktemp(buf);
#else
}
#if defined BUILD_SHARED_LIBS && defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE
+BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID );
+
BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID )
{
if( fdwReason == DLL_THREAD_DETACH || fdwReason == DLL_PROCESS_DETACH )
dst.at<double>(0,0) = cvtest::norm(src[0], normType, mask);
dst.at<double>(0,1) = cvtest::norm(src[0], src[1], normType, mask);
}
- void generateScalars(int, RNG& rng)
+ void generateScalars(int, RNG& /*rng*/)
{
}
double getMaxErr(int)
static void cvTsSimpleSeqShiftAndCopy( CvTsSimpleSeq* seq, int from_idx, int to_idx, void* elem=0 )
{
int elem_size = seq->elem_size;
-
+
if( from_idx == to_idx )
return;
assert( (from_idx > to_idx && !elem) || (from_idx < to_idx && elem) );
-
+
if( from_idx < seq->count )
{
memmove( seq->array + to_idx*elem_size, seq->array + from_idx*elem_size,
{
int i, k, len = seq->count, elem_size = seq->elem_size;
schar *data = seq->array, t;
-
+
for( i = 0; i < len/2; i++ )
{
schar* a = data + i*elem_size;
{
int i;
int elem_size = set_header->elem_size;
-
+
for( i = 0; i < set_header->max_count; i++ )
{
set_header->array[i*elem_size] = 0;
set_header->max_count = max_count;
set_header->free_stack = (int*)(set_header + 1);
set_header->array = (schar*)(set_header->free_stack + max_count);
-
+
cvTsClearSimpleSet( set_header );
return set_header;
}
{
int idx, idx2;
assert( set_header->free_count > 0 );
-
+
idx = set_header->free_stack[--set_header->free_count];
idx2 = idx * set_header->elem_size;
assert( set_header->array[idx2] == 0 );
if( set_header->elem_size > 1 )
memcpy( set_header->array + idx2 + 1, elem, set_header->elem_size - 1 );
set_header->count = MAX( set_header->count, idx + 1 );
-
+
return idx;
}
assert( set_header->free_count < set_header->max_count &&
0 <= index && index < set_header->max_count );
assert( set_header->array[index * set_header->elem_size] == 1 );
-
+
set_header->free_stack[set_header->free_count++] = index;
set_header->array[index * set_header->elem_size] = 0;
}
int edge_size, int oriented )
{
CvTsSimpleGraph* graph;
-
+
assert( max_vtx_count > 1 && vtx_size >= 0 && edge_size >= 0 );
graph = (CvTsSimpleGraph*)cvAlloc( sizeof(*graph) +
max_vtx_count * max_vtx_count * (edge_size + 1));
graph->edge_size = edge_size + 1;
graph->matrix = (char*)(graph + 1);
graph->oriented = oriented;
-
+
cvTsClearSimpleGraph( graph );
return graph;
}
int i, max_vtx_count = graph->vtx->max_count;
int edge_size = graph->edge_size;
cvTsSimpleSetRemove( graph->vtx, index );
-
+
/* remove all the corresponding edges */
for( i = 0; i < max_vtx_count; i++ )
{
static void cvTsSimpleGraphAddEdge( CvTsSimpleGraph* graph, int idx1, int idx2, void* edge )
{
int i, t, n = graph->oriented ? 1 : 2;
-
+
assert( cvTsSimpleSetFind( graph->vtx, idx1 ) &&
cvTsSimpleSetFind( graph->vtx, idx2 ));
-
+
for( i = 0; i < n; i++ )
{
int ofs = (idx1*graph->vtx->max_count + idx2)*graph->edge_size;
graph->matrix[ofs] = 1;
if( graph->edge_size > 1 )
memcpy( graph->matrix + ofs + 1, edge, graph->edge_size - 1 );
-
+
CV_SWAP( idx1, idx2, t );
}
}
static void cvTsSimpleGraphRemoveEdge( CvTsSimpleGraph* graph, int idx1, int idx2 )
{
int i, t, n = graph->oriented ? 1 : 2;
-
+
assert( cvTsSimpleSetFind( graph->vtx, idx1 ) &&
cvTsSimpleSetFind( graph->vtx, idx2 ));
-
+
for( i = 0; i < n; i++ )
{
int ofs = (idx1*graph->vtx->max_count + idx2)*graph->edge_size;
int edge_size = graph->edge_size;
int max_vtx_count = graph->vtx->max_count;
assert( cvTsSimpleGraphFindVertex( graph, index ) != 0 );
-
+
for( i = 0; i < max_vtx_count; i++ )
{
count += graph->matrix[(i*max_vtx_count + index)*edge_size] +
graph->matrix[(index*max_vtx_count + i)*edge_size];
}
-
+
if( !graph->oriented )
{
assert( count % 2 == 0 );
virtual ~Core_DynStructBaseTest();
bool can_do_fast_forward();
void clear();
-
+
protected:
int read_params( CvFileStorage* fs );
void run_func(void);
const char* file, int line );
int test_seq_block_consistence( int _struct_idx, CvSeq* seq, int total );
void update_progressbar();
-
+
int struct_count, max_struct_size, iterations, generations;
int min_log_storage_block_size, max_log_storage_block_size;
int min_log_elem_size, max_log_elem_size;
iterations = max_struct_size*2;
gen = struct_idx = iter = -1;
test_progress = -1;
-
+
storage = 0;
}
double sqrt_scale = sqrt(ts->get_test_case_count_scale());
if( code < 0 )
return code;
-
+
struct_count = cvReadInt( find_param( fs, "struct_count" ), struct_count );
max_struct_size = cvReadInt( find_param( fs, "max_struct_size" ), max_struct_size );
generations = cvReadInt( find_param( fs, "generations" ), generations );
iterations = cvReadInt( find_param( fs, "iterations" ), iterations );
generations = cvRound(generations*sqrt_scale);
iterations = cvRound(iterations*sqrt_scale);
-
+
min_log_storage_block_size = cvReadInt( find_param( fs, "min_log_storage_block_size" ),
min_log_storage_block_size );
max_log_storage_block_size = cvReadInt( find_param( fs, "max_log_storage_block_size" ),
max_log_storage_block_size );
min_log_elem_size = cvReadInt( find_param( fs, "min_log_elem_size" ), min_log_elem_size );
max_log_elem_size = cvReadInt( find_param( fs, "max_log_elem_size" ), max_log_elem_size );
-
+
struct_count = cvtest::clipInt( struct_count, 1, 100 );
max_struct_size = cvtest::clipInt( max_struct_size, 1, 1<<20 );
generations = cvtest::clipInt( generations, 1, 100 );
iterations = cvtest::clipInt( iterations, 100, 1<<20 );
-
+
min_log_storage_block_size = cvtest::clipInt( min_log_storage_block_size, 7, 20 );
max_log_storage_block_size = cvtest::clipInt( max_log_storage_block_size,
min_log_storage_block_size, 20 );
-
+
min_log_elem_size = cvtest::clipInt( min_log_elem_size, 0, 8 );
max_log_elem_size = cvtest::clipInt( max_log_elem_size, min_log_elem_size, 10 );
-
+
return 0;
}
void Core_DynStructBaseTest::update_progressbar()
{
int64 t;
-
+
if( test_progress < 0 )
{
test_progress = 0;
cpu_freq = cv::getTickFrequency();
start_time = cv::getTickCount();
}
-
+
t = cv::getTickCount();
test_progress = update_progress( test_progress, 0, 0, (double)(t - start_time)/cpu_freq );
}
{
int sum = 0;
struct_idx = _struct_idx;
-
+
CV_TS_SEQ_CHECK_CONDITION( seq != 0, "Null sequence pointer" );
-
+
if( seq->first )
{
CvSeqBlock* block = seq->first;
CvSeqBlock* prev_block = block->prev;
-
+
int delta_idx = seq->first->start_index;
-
+
for( ;; )
{
CV_TS_SEQ_CHECK_CONDITION( sum == block->start_index - delta_idx &&
block = block->next;
if( block == seq->first ) break;
}
-
+
CV_TS_SEQ_CHECK_CONDITION( block->prev->count * seq->elem_size +
block->prev->data <= seq->block_max,
"block->data or block_max pointer are incorrect" );
}
-
+
CV_TS_SEQ_CHECK_CONDITION( seq->total == sum && sum == total,
"total number of elements is incorrect" );
-
+
return 0;
}
Core_SeqBaseTest();
void clear();
void run( int );
-
+
protected:
int test_multi_create();
int test_get_seq_elem( int _struct_idx, int iters );
vector<int> index(struct_count);
int cur_count, elem_size;
RNG& rng = ts->get_rng();
-
+
for( int i = 0; i < struct_count; i++ )
{
double t;
CvTsSimpleSeq* sseq;
-
+
pos[i] = -1;
index[i] = i;
-
+
t = cvtest::randReal(rng)*(max_log_elem_size - min_log_elem_size) + min_log_elem_size;
elem_size = cvRound( exp(t * CV_LOG2) );
elem_size = MIN( elem_size, (int)(storage->block_size - sizeof(void*) -
sizeof(CvSeqBlock) - sizeof(CvMemBlock)) );
-
+
cvTsReleaseSimpleSeq( (CvTsSimpleSeq**)&simple_struct[i] );
simple_struct[i] = sseq = cvTsCreateSimpleSeq( max_struct_size, elem_size );
cxcore_struct[i] = 0;
Mat m( 1, MAX(sseq->count,1)*elem_size, CV_8UC1, sseq->array );
cvtest::randUni( rng, m, Scalar::all(0), Scalar::all(256) );
}
-
+
for( cur_count = struct_count; cur_count > 0; cur_count-- )
{
for(;;)
int k = cvtest::randInt( rng ) % cur_count;
struct_idx = index[k];
CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[struct_idx];
-
+
if( pos[struct_idx] < 0 )
{
int hdr_size = (cvtest::randInt(rng) % 10)*4 + sizeof(CvSeq);
hdr_size = MIN( hdr_size, (int)(storage->block_size - sizeof(CvMemBlock)) );
elem_size = sseq->elem_size;
-
+
if( cvtest::randInt(rng) % 2 )
{
cvStartWriteSeq( 0, hdr_size, elem_size, storage, &writer[struct_idx] );
s = cvCreateSeq( 0, hdr_size, elem_size, storage );
cvStartAppendToSeq( s, &writer[struct_idx] );
}
-
+
cvSetSeqBlockSize( writer[struct_idx].seq, cvtest::randInt( rng ) % 10000 );
pos[struct_idx] = 0;
}
-
+
update_progressbar();
if( pos[struct_idx] == sseq->count )
{
index[k] = index[k+1];
break;
}
-
+
{
schar* el = cvTsSimpleSeqElem( sseq, pos[struct_idx] );
CV_WRITE_SEQ_ELEM_VAR( el, writer[struct_idx] );
pos[struct_idx]++;
}
}
-
+
return 0;
}
int Core_SeqBaseTest::test_get_seq_elem( int _struct_idx, int iters )
{
RNG& rng = ts->get_rng();
-
+
CvSeq* seq = (CvSeq*)cxcore_struct[_struct_idx];
CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[_struct_idx];
struct_idx = _struct_idx;
-
+
assert( seq->total == sseq->count );
-
+
if( sseq->count == 0 )
return 0;
-
+
for( int i = 0; i < iters; i++ )
{
int idx = cvtest::randInt(rng) % (sseq->count*3) - sseq->count*3/2;
int bad_range = (unsigned)idx0 >= (unsigned)(sseq->count);
schar* elem;
elem = cvGetSeqElem( seq, idx );
-
+
if( bad_range )
{
CV_TS_SEQ_CHECK_CONDITION( elem == 0,
CV_TS_SEQ_CHECK_CONDITION( elem != 0 &&
!memcmp( elem, cvTsSimpleSeqElem(sseq, idx0), sseq->elem_size ),
"cvGetSeqElem returns wrong element" );
-
+
idx = cvSeqElemIdx(seq, elem );
CV_TS_SEQ_CHECK_CONDITION( idx >= 0 && idx == idx0,
"cvSeqElemIdx is incorrect" );
}
}
-
+
return 0;
}
CvSeqReader reader;
vector<schar> _elem(sseq->elem_size);
schar* elem = &_elem[0];
-
+
assert( total == sseq->count );
this->struct_idx = _struct_idx;
-
+
int pos = cvtest::randInt(rng) % 2;
cvStartReadSeq( seq, &reader, pos );
-
+
if( total == 0 )
{
CV_TS_SEQ_CHECK_CONDITION( reader.ptr == 0, "Empty sequence reader pointer is not NULL" );
return 0;
}
-
+
pos = pos ? seq->total - 1 : 0;
-
+
CV_TS_SEQ_CHECK_CONDITION( pos == cvGetSeqReaderPos(&reader),
"initial reader position is wrong" );
-
+
for( iter = 0; iter < iters; iter++ )
{
int op = cvtest::randInt(rng) % max_val;
-
+
if( op >= max_val - 2 )
{
int new_pos, new_pos0;
int bad_range;
int is_relative = op == max_val - 1;
-
+
new_pos = cvtest::randInt(rng) % (total*2) - total;
new_pos0 = new_pos + (is_relative ? pos : 0 );
-
+
if( new_pos0 < 0 ) new_pos0 += total;
if( new_pos0 >= total ) new_pos0 -= total;
-
+
bad_range = (unsigned)new_pos0 >= (unsigned)total;
cvSetSeqReaderPos( &reader, new_pos, is_relative );
-
+
if( !bad_range )
{
CV_TS_SEQ_CHECK_CONDITION( new_pos0 == cvGetSeqReaderPos( &reader ),
{
int direction = (op % 3) - 1;
memcpy( elem, reader.ptr, sseq->elem_size );
-
+
if( direction > 0 )
{
CV_NEXT_SEQ_ELEM( sseq->elem_size, reader );
{
CV_PREV_SEQ_ELEM( sseq->elem_size, reader );
}
-
+
CV_TS_SEQ_CHECK_CONDITION( memcmp(elem, cvTsSimpleSeqElem(sseq, pos),
sseq->elem_size) == 0, "reading is incorrect" );
pos += direction;
if( -pos > 0 ) pos += total;
if( pos >= total ) pos -= total;
-
+
CV_TS_SEQ_CHECK_CONDITION( pos == cvGetSeqReaderPos( &reader ),
"reader doesn't move correctly after reading" );
}
}
-
+
return 0;
}
int max_elem_size = 0;
schar* elem2 = 0;
RNG& rng = ts->get_rng();
-
+
for( int i = 0; i < struct_count; i++ )
max_elem_size = MAX( max_elem_size, ((CvSeq*)cxcore_struct[i])->elem_size );
-
+
vector<schar> elem_buf(max_struct_size*max_elem_size);
schar* elem = (schar*)&elem_buf[0];
Mat elem_mat;
-
+
for( iter = 0; iter < iters; iter++ )
{
struct_idx = cvtest::randInt(rng) % struct_count;
CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[struct_idx];
int elem_size = sseq->elem_size;
int whence = 0, pos = 0, count = 0;
-
+
switch( op )
{
case 0:
case 2: // push/pushfront/insert
if( sseq->count == sseq->max_count )
break;
-
+
elem_mat = Mat(1, elem_size, CV_8U, elem);
cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
-
+
whence = op - 1;
if( whence < 0 )
{
pos = cvtest::randInt(rng) % (sseq->count + 1);
cvSeqInsert( seq, pos, elem );
}
-
+
cvTsSimpleSeqShiftAndCopy( sseq, pos, pos + 1, elem );
elem2 = cvGetSeqElem( seq, pos );
CV_TS_SEQ_CHECK_CONDITION( elem2 != 0, "The inserted element could not be retrieved" );
memcmp(elem2, cvTsSimpleSeqElem(sseq,pos), elem_size) == 0,
"The inserted sequence element is wrong" );
break;
-
+
case 3:
case 4:
case 5: // pop/popfront/remove
if( sseq->count == 0 )
break;
-
+
whence = op - 4;
if( whence < 0 )
{
pos = cvtest::randInt(rng) % sseq->count;
cvSeqRemove( seq, pos );
}
-
+
if( whence != 0 )
CV_TS_SEQ_CHECK_CONDITION( seq->total == sseq->count - 1 &&
memcmp( elem, cvTsSimpleSeqElem(sseq,pos), elem_size) == 0,
"The popped sequence element isn't correct" );
-
+
cvTsSimpleSeqShiftAndCopy( sseq, pos + 1, pos );
-
+
if( sseq->count > 0 )
{
elem2 = cvGetSeqElem( seq, pos < sseq->count ? pos : -1 );
CV_TS_SEQ_CHECK_CONDITION( elem2 != 0, "GetSeqElem fails after removing the element" );
-
+
CV_TS_SEQ_CHECK_CONDITION( memcmp( elem2,
cvTsSimpleSeqElem(sseq, pos - (pos == sseq->count)), elem_size) == 0,
"The first shifted element is not correct after removing another element" );
"The sequence doesn't become empty after the final remove" );
}
break;
-
+
case 6:
case 7:
case 8: // push [front] multi/insert slice
if( sseq->count == sseq->max_count )
break;
-
+
count = cvtest::randInt( rng ) % (sseq->max_count - sseq->count + 1);
elem_mat = Mat(1, MAX(count,1) * elem_size, CV_8U, elem);
cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
-
+
whence = op - 7;
pos = whence < 0 ? 0 : whence > 0 ? sseq->count : cvtest::randInt(rng) % (sseq->count+1);
if( whence != 0 )
sseq->elem_size,
elem, count,
&header, &block );
-
+
cvSeqInsertSlice( seq, pos, &header );
}
cvTsSimpleSeqShiftAndCopy( sseq, pos, pos + count, elem );
-
+
if( sseq->count > 0 )
{
// choose the random element among the added
"Adding no elements to empty sequence fails" );
}
break;
-
+
case 9:
case 10:
case 11: // pop [front] multi
if( sseq->count == 0 )
break;
-
+
count = cvtest::randInt(rng) % (sseq->count+1);
whence = op - 10;
pos = whence < 0 ? 0 : whence > 0 ? sseq->count - count :
cvtest::randInt(rng) % (sseq->count - count + 1);
-
+
if( whence != 0 )
{
cvSeqPopMulti( seq, elem, count, whence < 0 );
-
+
if( count > 0 )
{
CV_TS_SEQ_CHECK_CONDITION( memcmp(elem,
{
cvSeqRemoveSlice( seq, cvSlice(pos, pos + count) );
}
-
+
CV_TS_SEQ_CHECK_CONDITION( seq->total == sseq->count - count,
"The popmulti left a wrong number of elements in the sequence" );
-
+
cvTsSimpleSeqShiftAndCopy( sseq, pos + count, pos, 0 );
if( sseq->count > 0 )
{
{
CvMemStoragePos storage_pos;
cvSaveMemStoragePos( storage, &storage_pos );
-
+
int copy_data = cvtest::randInt(rng) % 2;
count = cvtest::randInt(rng) % (seq->total + 1);
pos = cvtest::randInt(rng) % (seq->total - count + 1);
CvSeq* seq_slice = cvSeqSlice( seq, cvSlice(pos, pos + count), storage, copy_data );
-
+
CV_TS_SEQ_CHECK_CONDITION( seq_slice && seq_slice->total == count,
"cvSeqSlice returned incorrect slice" );
-
+
if( count > 0 )
{
int test_idx = cvtest::randInt(rng) % count;
CV_TS_SEQ_CHECK_CONDITION( (elem2 == elem3) ^ copy_data,
"copy_data flag is handled incorrectly" );
}
-
+
cvRestoreMemStoragePos( storage, &storage_pos );
}
break;
assert(0);
return -1;
}
-
+
if( test_seq_block_consistence(struct_idx, seq, sseq->count) < 0 )
return -1;
-
+
if( test_get_seq_elem(struct_idx, 7) < 0 )
return -1;
-
+
update_progressbar();
}
-
+
return 0;
}
RNG& rng = ts->get_rng();
int i;
double t;
-
+
clear();
test_progress = -1;
-
+
simple_struct.resize(struct_count, 0);
cxcore_struct.resize(struct_count, 0);
-
+
for( gen = 0; gen < generations; gen++ )
{
struct_idx = iter = -1;
-
+
if( !storage )
{
t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size)
+ min_log_storage_block_size;
storage = cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) );
}
-
+
iter = struct_idx = -1;
test_multi_create();
-
+
for( i = 0; i < struct_count; i++ )
{
if( test_seq_block_consistence(i, (CvSeq*)cxcore_struct[i],
((CvTsSimpleSeq*)simple_struct[i])->count) < 0 )
return;
-
+
if( test_get_seq_elem( i, MAX(iterations/3,7) ) < 0 )
return;
-
+
if( test_get_seq_reading( i, MAX(iterations/3,7) ) < 0 )
return;
update_progressbar();
}
-
+
if( test_seq_ops( iterations ) < 0 )
return;
-
+
if( cvtest::randInt(rng) % 2 )
storage.release();
else
public:
Core_SeqSortInvTest();
void run( int );
-
+
protected:
};
double t;
schar *elem0, *elem, *elem2;
vector<uchar> buffer;
-
+
clear();
test_progress = -1;
-
+
simple_struct.resize(struct_count, 0);
cxcore_struct.resize(struct_count, 0);
-
+
for( gen = 0; gen < generations; gen++ )
{
struct_idx = iter = -1;
-
+
if( storage.empty() )
{
t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size)
+ min_log_storage_block_size;
storage = cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) );
}
-
+
for( iter = 0; iter < iterations/10; iter++ )
{
int max_size = 0;
test_multi_create();
-
+
for( i = 0; i < struct_count; i++ )
{
CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[i];
max_size = MAX( max_size, sseq->count*sseq->elem_size );
}
-
+
buffer.resize(max_size);
-
+
for( i = 0; i < struct_count; i++ )
{
CvSeq* seq = (CvSeq*)cxcore_struct[i];
CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[i];
CvSlice slice = CV_WHOLE_SEQ;
-
+
//printf("%d. %d. %d-th size = %d\n", gen, iter, i, sseq->count );
-
+
cvSeqInvert( seq );
cvTsSimpleSeqInvert( sseq );
-
+
if( test_seq_block_consistence( i, seq, sseq->count ) < 0 )
return;
-
+
if( sseq->count > 0 && cvtest::randInt(rng) % 2 == 0 )
{
slice.end_index = cvtest::randInt(rng) % sseq->count + 1;
slice.start_index = cvtest::randInt(rng) % (sseq->count - slice.end_index + 1);
slice.end_index += slice.start_index;
}
-
+
cvCvtSeqToArray( seq, &buffer[0], slice );
-
+
slice.end_index = MIN( slice.end_index, sseq->count );
CV_TS_SEQ_CHECK_CONDITION( sseq->count == 0 || memcmp( &buffer[0],
sseq->array + slice.start_index*sseq->elem_size,
(slice.end_index - slice.start_index)*sseq->elem_size ) == 0,
"cvSeqInvert returned wrong result" );
-
+
for( k = 0; k < (sseq->count > 0 ? 10 : 0); k++ )
{
int idx0 = cvtest::randInt(rng) % sseq->count, idx = 0;
elem0 = cvTsSimpleSeqElem( sseq, idx0 );
elem = cvGetSeqElem( seq, idx0 );
elem2 = cvSeqSearch( seq, elem0, k % 2 ? icvCmpSeqElems : 0, 0, &idx, seq );
-
+
CV_TS_SEQ_CHECK_CONDITION( elem != 0 &&
memcmp( elem0, elem, seq->elem_size ) == 0,
"cvSeqInvert gives incorrect result" );
elem2 == cvGetSeqElem( seq, idx ),
"cvSeqSearch failed (linear search)" );
}
-
+
cvSeqSort( seq, icvCmpSeqElems, seq );
-
+
if( test_seq_block_consistence( i, seq, sseq->count ) < 0 )
return;
-
+
if( sseq->count > 0 )
{
// !!! This is not thread-safe !!!
icvCmpSeqElems2_elem_size = sseq->elem_size;
qsort( sseq->array, sseq->count, sseq->elem_size, icvCmpSeqElems2 );
-
+
if( cvtest::randInt(rng) % 2 == 0 )
{
slice.end_index = cvtest::randInt(rng) % sseq->count + 1;
slice.end_index += slice.start_index;
}
}
-
+
cvCvtSeqToArray( seq, &buffer[0], slice );
CV_TS_SEQ_CHECK_CONDITION( sseq->count == 0 || memcmp( &buffer[0],
sseq->array + slice.start_index*sseq->elem_size,
(slice.end_index - slice.start_index)*sseq->elem_size ) == 0,
"cvSeqSort returned wrong result" );
-
+
for( k = 0; k < (sseq->count > 0 ? 10 : 0); k++ )
{
int idx0 = cvtest::randInt(rng) % sseq->count, idx = 0;
elem0 = cvTsSimpleSeqElem( sseq, idx0 );
elem = cvGetSeqElem( seq, idx0 );
elem2 = cvSeqSearch( seq, elem0, icvCmpSeqElems, 1, &idx, seq );
-
+
CV_TS_SEQ_CHECK_CONDITION( elem != 0 &&
memcmp( elem0, elem, seq->elem_size ) == 0,
"cvSeqSort gives incorrect result" );
"cvSeqSearch failed (binary search)" );
}
}
-
+
cvClearMemStorage( storage );
}
-
+
storage.release();
}
}
Core_SetTest();
void clear();
void run( int );
-
+
protected:
//int test_seq_block_consistence( int struct_idx );
int test_set_ops( int iters );
schar* elem_data = 0;
RNG& rng = ts->get_rng();
//int max_active_count = 0, mean_active_count = 0;
-
+
for( int i = 0; i < struct_count; i++ )
max_elem_size = MAX( max_elem_size, ((CvSeq*)cxcore_struct[i])->elem_size );
-
+
vector<schar> elem_buf(max_elem_size);
Mat elem_mat;
-
+
for( iter = 0; iter < iters; iter++ )
{
struct_idx = cvtest::randInt(rng) % struct_count;
-
+
CvSet* cvset = (CvSet*)cxcore_struct[struct_idx];
CvTsSimpleSet* sset = (CvTsSimpleSet*)simple_struct[struct_idx];
int pure_elem_size = sset->elem_size - 1;
CvSetElem* first_free = cvset->free_elems;
CvSetElem* next_free = first_free ? first_free->next_free : 0;
int pass_data = 0;
-
+
if( iter > iters/10 && cvtest::randInt(rng)%200 == 0 ) // clear set
{
- int prev_count = cvset->total;
+ prev_count = cvset->total;
cvClearSet( cvset );
cvTsClearSimpleSet( sset );
-
+
CV_TS_SEQ_CHECK_CONDITION( cvset->active_count == 0 && cvset->total == 0 &&
cvset->first == 0 && cvset->free_elems == 0 &&
(cvset->free_blocks != 0 || prev_count == 0),
{
if( sset->free_count == 0 )
continue;
-
+
elem_mat = Mat(1, cvset->elem_size, CV_8U, &elem_buf[0]);
cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
elem = (CvSetElem*)&elem_buf[0];
-
+
if( by_ptr )
{
elem2 = cvSetNew( cvset );
CV_TS_SEQ_CHECK_CONDITION( elem2 != 0 && elem2->flags == idx,
"cvSetAdd returned NULL pointer or a wrong index" );
}
-
+
elem_data = (schar*)elem + sizeof(int);
-
+
if( !pass_data )
memcpy( (schar*)elem2 + sizeof(int), elem_data, pure_elem_size );
-
+
idx = elem2->flags;
idx0 = cvTsSimpleSetAdd( sset, elem_data );
elem3 = cvGetSetElem( cvset, idx );
-
+
CV_TS_SEQ_CHECK_CONDITION( CV_IS_SET_ELEM(elem3) &&
idx == idx0 && elem3 == elem2 && (!pass_data ||
memcmp( (char*)elem3 + sizeof(int), elem_data, pure_elem_size) == 0),
"The added element is not correct" );
-
+
CV_TS_SEQ_CHECK_CONDITION( (!first_free || elem3 == first_free) &&
(!next_free || cvset->free_elems == next_free) &&
cvset->active_count == prev_count + 1,
else if( op == 2 || op == 3 ) // remove element
{
idx = cvtest::randInt(rng) % sset->max_count;
-
+
if( sset->free_count == sset->max_count || idx >= sset->count )
continue;
-
+
elem_data = cvTsSimpleSetFind(sset, idx);
if( elem_data == 0 )
continue;
-
+
elem = cvGetSetElem( cvset, idx );
CV_TS_SEQ_CHECK_CONDITION( CV_IS_SET_ELEM(elem) && elem->flags == idx &&
memcmp((char*)elem + sizeof(int), elem_data, pure_elem_size) == 0,
"cvGetSetElem returned wrong element" );
-
+
if( by_ptr )
{
cvSetRemoveByPtr( cvset, elem );
{
cvSetRemove( cvset, idx );
}
-
+
cvTsSimpleSetRemove( sset, idx );
-
+
CV_TS_SEQ_CHECK_CONDITION( !CV_IS_SET_ELEM(elem) && !cvGetSetElem(cvset, idx) &&
(elem->flags & CV_SET_ELEM_IDX_MASK) == idx,
"cvSetRemove[ByPtr] didn't release the element properly" );
-
+
CV_TS_SEQ_CHECK_CONDITION( elem->next_free == first_free &&
cvset->free_elems == elem &&
cvset->active_count == prev_count - 1,
"The free node list has not been updated properly" );
}
-
+
//max_active_count = MAX( max_active_count, cvset->active_count );
//mean_active_count += cvset->active_count;
CV_TS_SEQ_CHECK_CONDITION( cvset->active_count == sset->max_count - sset->free_count &&
cvset->total >= cvset->active_count &&
(cvset->total == 0 || cvset->total >= prev_total),
"The total number of cvset elements is not correct" );
-
+
// CvSet and simple set do not neccessary have the same "total" (active & free) number,
// so pass "set->total" to skip that check
test_seq_block_consistence( struct_idx, (CvSeq*)cvset, cvset->total );
update_progressbar();
}
-
+
return 0;
}
{
RNG& rng = ts->get_rng();
double t;
-
+
clear();
test_progress = -1;
-
+
simple_struct.resize(struct_count, 0);
cxcore_struct.resize(struct_count, 0);
-
+
for( gen = 0; gen < generations; gen++ )
{
struct_idx = iter = -1;
t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size) + min_log_storage_block_size;
storage = cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) );
-
+
for( int i = 0; i < struct_count; i++ )
{
t = cvtest::randReal(rng)*(max_log_elem_size - min_log_elem_size) + min_log_elem_size;
elem_size = MAX( elem_size, (int)sizeof(CvSetElem) );
elem_size = MIN( elem_size, (int)(storage->block_size - sizeof(void*) - sizeof(CvMemBlock) - sizeof(CvSeqBlock)) );
pure_elem_size = MIN( pure_elem_size, elem_size-(int)sizeof(CvSetElem) );
-
+
cvTsReleaseSimpleSet( (CvTsSimpleSet**)&simple_struct[i] );
simple_struct[i] = cvTsCreateSimpleSet( max_struct_size, pure_elem_size );
cxcore_struct[i] = cvCreateSet( 0, sizeof(CvSet), elem_size, storage );
}
-
+
if( test_set_ops( iterations*100 ) < 0 )
return;
-
+
storage.release();
}
}
Core_GraphTest();
void clear();
void run( int );
-
+
protected:
//int test_seq_block_consistence( int struct_idx );
int test_graph_ops( int iters );
CvGraphEdge* edge = 0, *edge2 = 0;
RNG& rng = ts->get_rng();
//int max_active_count = 0, mean_active_count = 0;
-
+
for( i = 0; i < struct_count; i++ )
{
CvGraph* graph = (CvGraph*)cxcore_struct[i];
max_elem_size = MAX( max_elem_size, graph->elem_size );
max_elem_size = MAX( max_elem_size, graph->edges->elem_size );
}
-
+
vector<schar> elem_buf(max_elem_size);
Mat elem_mat;
-
+
for( iter = 0; iter < iters; iter++ )
{
struct_idx = cvtest::randInt(rng) % struct_count;
int op = cvtest::randInt(rng) % max_op;
int pass_data = 0, vtx_degree0 = 0, vtx_degree = 0;
CvSetElem *first_free, *next_free;
-
+
if( cvtest::randInt(rng) % 200 == 0 ) // clear graph
{
- int prev_vtx_count = graph->total, prev_edge_count = graph->edges->total;
-
+ int prev_vtx_count2 = graph->total, prev_edge_count2 = graph->edges->total;
+
cvClearGraph( graph );
cvTsClearSimpleGraph( sgraph );
-
+
CV_TS_SEQ_CHECK_CONDITION( graph->active_count == 0 && graph->total == 0 &&
graph->first == 0 && graph->free_elems == 0 &&
- (graph->free_blocks != 0 || prev_vtx_count == 0),
+ (graph->free_blocks != 0 || prev_vtx_count2 == 0),
"The graph is not empty after clearing" );
-
+
CV_TS_SEQ_CHECK_CONDITION( edges->active_count == 0 && edges->total == 0 &&
edges->first == 0 && edges->free_elems == 0 &&
- (edges->free_blocks != 0 || prev_edge_count == 0),
+ (edges->free_blocks != 0 || prev_edge_count2 == 0),
"The graph is not empty after clearing" );
}
else if( op == 0 ) // add vertex
{
if( sgraph->vtx->free_count == 0 )
continue;
-
+
first_free = graph->free_elems;
next_free = first_free ? first_free->next_free : 0;
-
+
if( pure_vtx_size )
{
elem_mat = Mat(1, graph->elem_size, CV_8U, &elem_buf[0]);
cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
}
-
+
vtx = (CvGraphVtx*)&elem_buf[0];
idx0 = cvTsSimpleGraphAddVertex( sgraph, vtx + 1 );
-
+
pass_data = cvtest::randInt(rng) % 2;
idx = cvGraphAddVtx( graph, pass_data ? vtx : 0, &vtx2 );
-
+
if( !pass_data && pure_vtx_size > 0 )
memcpy( vtx2 + 1, vtx + 1, pure_vtx_size );
-
+
vtx3 = cvGetGraphVtx( graph, idx );
-
+
CV_TS_SEQ_CHECK_CONDITION( (CV_IS_SET_ELEM(vtx3) && vtx3->flags == idx &&
vtx3->first == 0) || (idx == idx0 && vtx3 == vtx2 &&
(!pass_data || pure_vtx_size == 0 ||
memcmp(vtx3 + 1, vtx + 1, pure_vtx_size) == 0)),
"The added element is not correct" );
-
+
CV_TS_SEQ_CHECK_CONDITION( (!first_free || first_free == (CvSetElem*)vtx3) &&
(!next_free || graph->free_elems == next_free) &&
graph->active_count == prev_vtx_count + 1,
idx = cvtest::randInt(rng) % sgraph->vtx->max_count;
if( sgraph->vtx->free_count == sgraph->vtx->max_count || idx >= sgraph->vtx->count )
continue;
-
+
vtx_data = cvTsSimpleGraphFindVertex(sgraph, idx);
if( vtx_data == 0 )
continue;
-
+
vtx_degree0 = cvTsSimpleGraphVertexDegree( sgraph, idx );
first_free = graph->free_elems;
-
+
vtx = cvGetGraphVtx( graph, idx );
CV_TS_SEQ_CHECK_CONDITION( CV_IS_SET_ELEM(vtx) && vtx->flags == idx &&
(pure_vtx_size == 0 || memcmp( vtx + 1, vtx_data, pure_vtx_size) == 0),
"cvGetGraphVtx returned wrong element" );
-
+
if( cvtest::randInt(rng) % 2 )
{
vtx_degree = cvGraphVtxDegreeByPtr( graph, vtx );
vtx_degree = cvGraphVtxDegree( graph, idx );
cvGraphRemoveVtx( graph, idx );
}
-
+
cvTsSimpleGraphRemoveVertex( sgraph, idx );
-
+
CV_TS_SEQ_CHECK_CONDITION( vtx_degree == vtx_degree0,
"Number of incident edges is different in two graph representations" );
-
+
CV_TS_SEQ_CHECK_CONDITION( !CV_IS_SET_ELEM(vtx) && !cvGetGraphVtx(graph, idx) &&
(vtx->flags & CV_SET_ELEM_IDX_MASK) == idx,
"cvGraphRemoveVtx[ByPtr] didn't release the vertex properly" );
-
+
CV_TS_SEQ_CHECK_CONDITION( graph->edges->active_count == prev_edge_count - vtx_degree,
"cvGraphRemoveVtx[ByPtr] didn't remove all the incident edges "
"(or removed some extra)" );
-
+
CV_TS_SEQ_CHECK_CONDITION( ((CvSetElem*)vtx)->next_free == first_free &&
graph->free_elems == (CvSetElem*)vtx &&
graph->active_count == prev_vtx_count - 1,
{
int v_idx[2] = {0,0}, res = 0;
int v_prev_degree[2] = {0,0}, v_degree[2] = {0,0};
-
+
if( sgraph->vtx->free_count >= sgraph->vtx->max_count-1 )
continue;
-
+
for( i = 0, k = 0; i < 10; i++ )
{
int j = cvtest::randInt(rng) % sgraph->vtx->count;
}
}
}
-
+
if( k < 2 )
continue;
-
+
first_free = graph->edges->free_elems;
next_free = first_free ? first_free->next_free : 0;
-
+
edge = cvFindGraphEdge( graph, v_idx[0], v_idx[1] );
CV_TS_SEQ_CHECK_CONDITION( edge == 0, "Extra edge appeared in the graph" );
-
+
if( pure_edge_size > 0 )
{
elem_mat = Mat(1, graph->edges->elem_size, CV_8U, &elem_buf[0]);
cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
}
edge = (CvGraphEdge*)&elem_buf[0];
-
+
// assign some default weight that is easy to check for
// consistensy, 'cause an edge weight is not stored
// in the simple graph
edge->weight = (float)(v_idx[0] + v_idx[1]);
pass_data = cvtest::randInt(rng) % 2;
-
+
vtx = cvGetGraphVtx( graph, v_idx[0] );
vtx2 = cvGetGraphVtx( graph, v_idx[1] );
CV_TS_SEQ_CHECK_CONDITION( vtx != 0 && vtx2 != 0 && vtx->flags == v_idx[0] &&
vtx2->flags == v_idx[1], "Some of the vertices are missing" );
-
+
if( cvtest::randInt(rng) % 2 )
{
v_prev_degree[0] = cvGraphVtxDegreeByPtr( graph, vtx );
v_degree[0] = cvGraphVtxDegree( graph, v_idx[0] );
v_degree[1] = cvGraphVtxDegree( graph, v_idx[1] );
}
-
+
//edge3 = (CvGraphEdge*)cvGetSetElem( graph->edges, idx );
CV_TS_SEQ_CHECK_CONDITION( res == 1 && edge2 != 0 && CV_IS_SET_ELEM(edge2) &&
((edge2->vtx[0] == vtx && edge2->vtx[1] == vtx2) ||
(!CV_IS_GRAPH_ORIENTED(graph) && edge2->vtx[0] == vtx2 && edge2->vtx[1] == vtx)) &&
(!pass_data || pure_edge_size == 0 || memcmp( edge2 + 1, edge + 1, pure_edge_size ) == 0),
"The edge has been added incorrectly" );
-
+
if( !pass_data )
{
if( pure_edge_size > 0 )
memcpy( edge2 + 1, edge + 1, pure_edge_size );
edge2->weight = edge->weight;
}
-
+
CV_TS_SEQ_CHECK_CONDITION( v_degree[0] == v_prev_degree[0] + 1 &&
v_degree[1] == v_prev_degree[1] + 1,
"The vertices lists have not been updated properly" );
-
+
cvTsSimpleGraphAddEdge( sgraph, v_idx[0], v_idx[1], edge + 1 );
-
+
CV_TS_SEQ_CHECK_CONDITION( (!first_free || first_free == (CvSetElem*)edge2) &&
(!next_free || graph->edges->free_elems == next_free) &&
graph->edges->active_count == prev_edge_count + 1,
{
int v_idx[2] = {0,0}, by_ptr;
int v_prev_degree[2] = {0,0}, v_degree[2] = {0,0};
-
+
if( sgraph->vtx->free_count >= sgraph->vtx->max_count-1 )
continue;
-
+
edge_data = 0;
for( i = 0, k = 0; i < 10; i++ )
{
}
}
}
-
+
if( k < 2 )
continue;
-
+
by_ptr = cvtest::randInt(rng) % 2;
first_free = graph->edges->free_elems;
-
+
vtx = cvGetGraphVtx( graph, v_idx[0] );
vtx2 = cvGetGraphVtx( graph, v_idx[1] );
CV_TS_SEQ_CHECK_CONDITION( vtx != 0 && vtx2 != 0 && vtx->flags == v_idx[0] &&
vtx2->flags == v_idx[1], "Some of the vertices are missing" );
-
+
if( by_ptr )
{
edge = cvFindGraphEdgeByPtr( graph, vtx, vtx2 );
v_prev_degree[0] = cvGraphVtxDegree( graph, v_idx[0] );
v_prev_degree[1] = cvGraphVtxDegree( graph, v_idx[1] );
}
-
+
idx = edge->flags;
-
+
CV_TS_SEQ_CHECK_CONDITION( edge != 0 && edge->weight == v_idx[0] + v_idx[1] &&
((edge->vtx[0] == vtx && edge->vtx[1] == vtx2) ||
(!CV_IS_GRAPH_ORIENTED(graph) && edge->vtx[1] == vtx && edge->vtx[0] == vtx2)) &&
(pure_edge_size == 0 || memcmp(edge + 1, edge_data, pure_edge_size) == 0),
"An edge is missing or incorrect" );
-
+
if( by_ptr )
{
cvGraphRemoveEdgeByPtr( graph, vtx, vtx2 );
v_degree[0] = cvGraphVtxDegree( graph, v_idx[0] );
v_degree[1] = cvGraphVtxDegree( graph, v_idx[1] );
}
-
+
CV_TS_SEQ_CHECK_CONDITION( !edge2 && !CV_IS_SET_ELEM(edge),
"The edge has not been removed from the edge set" );
-
+
CV_TS_SEQ_CHECK_CONDITION( v_degree[0] == v_prev_degree[0] - 1 &&
v_degree[1] == v_prev_degree[1] - 1,
"The vertices lists have not been updated properly" );
-
+
cvTsSimpleGraphRemoveEdge( sgraph, v_idx[0], v_idx[1] );
-
+
CV_TS_SEQ_CHECK_CONDITION( graph->edges->free_elems == (CvSetElem*)edge &&
graph->edges->free_elems->next_free == first_free &&
graph->edges->active_count == prev_edge_count - 1,
"The free edge list has not been modified properly" );
}
-
+
//max_active_count = MAX( max_active_count, graph->active_count );
//mean_active_count += graph->active_count;
-
+
CV_TS_SEQ_CHECK_CONDITION( graph->active_count == sgraph->vtx->max_count - sgraph->vtx->free_count &&
graph->total >= graph->active_count &&
(graph->total == 0 || graph->total >= prev_vtx_total),
"The total number of graph vertices is not correct" );
-
+
CV_TS_SEQ_CHECK_CONDITION( graph->edges->total >= graph->edges->active_count &&
(graph->edges->total == 0 || graph->edges->total >= prev_edge_total),
"The total number of graph vertices is not correct" );
-
+
// CvGraph and simple graph do not neccessary have the same "total" (active & free) number,
// so pass "graph->total" (or "graph->edges->total") to skip that check
test_seq_block_consistence( struct_idx, (CvSeq*)graph, graph->total );
test_seq_block_consistence( struct_idx, (CvSeq*)graph->edges, graph->edges->total );
update_progressbar();
}
-
+
return 0;
}
RNG& rng = ts->get_rng();
int i, k;
double t;
-
+
clear();
test_progress = -1;
-
+
simple_struct.resize(struct_count, 0);
cxcore_struct.resize(struct_count, 0);
-
+
for( gen = 0; gen < generations; gen++ )
{
struct_idx = iter = -1;
t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size) + min_log_storage_block_size;
int block_size = cvRound( exp(t * CV_LOG2) );
block_size = MAX(block_size, (int)(sizeof(CvGraph) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
-
+
storage = cvCreateMemStorage(block_size);
-
+
for( i = 0; i < struct_count; i++ )
{
int pure_elem_size[2], elem_size[2];
pure_elem_size[k] = pe;
elem_size[k] = e;
}
-
+
cvTsReleaseSimpleGraph( (CvTsSimpleGraph**)&simple_struct[i] );
simple_struct[i] = cvTsCreateSimpleGraph( max_struct_size/4, pure_elem_size[0],
pure_elem_size[1], is_oriented );
sizeof(CvGraph), elem_size[0], elem_size[1],
storage );
}
-
+
if( test_graph_ops( iterations*10 ) < 0 )
return;
-
+
storage.release();
}
}
public:
Core_GraphScanTest();
void run( int );
-
+
protected:
//int test_seq_block_consistence( int struct_idx );
int create_random_graph( int );
int i, vtx_count = cvtest::randInt(rng) % max_struct_size;
int edge_count = cvtest::randInt(rng) % MAX(vtx_count*20, 1);
CvGraph* graph;
-
+
struct_idx = _struct_idx;
cxcore_struct[_struct_idx] = graph =
cvCreateGraph(is_oriented ? CV_ORIENTED_GRAPH : CV_GRAPH,
sizeof(CvGraph), sizeof(CvGraphVtx),
sizeof(CvGraphEdge), storage );
-
+
for( i = 0; i < vtx_count; i++ )
cvGraphAddVtx( graph );
-
+
assert( graph->active_count == vtx_count );
-
+
for( i = 0; i < edge_count; i++ )
{
int j = cvtest::randInt(rng) % vtx_count;
int k = cvtest::randInt(rng) % vtx_count;
-
+
if( j != k )
cvGraphAddEdge( graph, j, k );
}
-
+
assert( graph->active_count == vtx_count && graph->edges->active_count <= edge_count );
-
+
return 0;
}
vector<uchar> vtx_mask, edge_mask;
double t;
int i;
-
+
clear();
test_progress = -1;
-
+
cxcore_struct.resize(struct_count, 0);
-
+
for( gen = 0; gen < generations; gen++ )
{
struct_idx = iter = -1;
storage_blocksize = MAX(storage_blocksize, (int)(sizeof(CvGraphEdge) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
storage_blocksize = MAX(storage_blocksize, (int)(sizeof(CvGraphVtx) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
storage = cvCreateMemStorage(storage_blocksize);
-
+
if( gen == 0 )
{
// special regression test for one sample graph.
// !!! ATTENTION !!! The test relies on the particular order of the inserted edges
// (LIFO: the edge inserted last goes first in the list of incident edges).
// if it is changed, the test will have to be modified.
-
+
int vtx_count = -1, edge_count = 0, edges[][3] =
{
{0,4,'f'}, {0,1,'t'}, {1,4,'t'}, {1,2,'t'}, {2,3,'t'}, {4,3,'c'}, {3,1,'b'},
{5,7,'t'}, {7,5,'b'}, {5,6,'t'}, {6,0,'c'}, {7,6,'c'}, {6,4,'c'}, {-1,-1,0}
};
-
+
CvGraph* graph = cvCreateGraph( CV_ORIENTED_GRAPH, sizeof(CvGraph),
sizeof(CvGraphVtx), sizeof(CvGraphEdge), storage );
-
+
for( i = 0; edges[i][0] >= 0; i++ )
{
vtx_count = MAX( vtx_count, edges[i][0] );
vtx_count = MAX( vtx_count, edges[i][1] );
}
vtx_count++;
-
+
for( i = 0; i < vtx_count; i++ )
cvGraphAddVtx( graph );
-
+
for( i = 0; edges[i][0] >= 0; i++ )
{
CvGraphEdge* edge;
cvGraphAddEdge( graph, edges[i][0], edges[i][1], 0, &edge );
edge->weight = (float)edges[i][2];
}
-
+
edge_count = i;
scanner = cvCreateGraphScanner( graph, 0, CV_GRAPH_ALL_ITEMS );
-
+
for(;;)
{
int code, a = -1, b = -1;
const char* event = "";
code = cvNextGraphItem( scanner );
-
+
switch( code )
{
case CV_GRAPH_VERTEX:
event = "End of procedure";
break;
default:
-#if _MSC_VER >= 1200
- #pragma warning( push )
- #pragma warning( disable : 4127 )
-#endif
CV_TS_SEQ_CHECK_CONDITION( 0, "Invalid code appeared during graph scan" );
-#if _MSC_VER >= 1200
- #pragma warning( pop )
-#endif
}
-
+
ts->printf( cvtest::TS::LOG, "%s", event );
if( a >= 0 )
{
else
ts->printf( cvtest::TS::LOG, ": %d", a );
}
-
+
ts->printf( cvtest::TS::LOG, "\n" );
-
+
if( code < 0 )
break;
}
-
+
CV_TS_SEQ_CHECK_CONDITION( vtx_count == 0 && edge_count == 0,
"Not every vertex/edge has been visited" );
update_progressbar();
}
-
+
// for a random graph the test just checks that every graph vertex and
// every edge is vitisted during the scan
for( iter = 0; iter < iterations; iter++ )
{
create_random_graph(0);
CvGraph* graph = (CvGraph*)cxcore_struct[0];
-
+
// iterate twice to check that scanner doesn't damage the graph
for( i = 0; i < 2; i++ )
{
CvGraphVtx* start_vtx = cvtest::randInt(rng) % 2 || graph->active_count == 0 ? 0 :
cvGetGraphVtx( graph, cvtest::randInt(rng) % graph->active_count );
-
+
scanner = cvCreateGraphScanner( graph, start_vtx, CV_GRAPH_ALL_ITEMS );
-
+
vtx_mask.resize(0);
vtx_mask.resize(graph->active_count, 0);
edge_mask.resize(0);
edge_mask.resize(graph->edges->active_count, 0);
-
+
for(;;)
{
int code = cvNextGraphItem( scanner );
-
+
if( code == CV_GRAPH_OVER )
break;
else if( code & CV_GRAPH_ANY_EDGE )
{
int edge_idx = scanner->edge->flags & CV_SET_ELEM_IDX_MASK;
-
+
CV_TS_SEQ_CHECK_CONDITION( edge_idx < graph->edges->active_count &&
edge_mask[edge_idx] == 0,
"The edge is not found or visited for the second time" );
else if( code & CV_GRAPH_VERTEX )
{
int vtx_idx = scanner->vtx->flags & CV_SET_ELEM_IDX_MASK;
-
+
CV_TS_SEQ_CHECK_CONDITION( vtx_idx < graph->active_count &&
vtx_mask[vtx_idx] == 0,
"The vtx is not found or visited for the second time" );
vtx_mask[vtx_idx] = 1;
}
}
-
+
cvReleaseGraphScanner( &scanner );
-
+
CV_TS_SEQ_CHECK_CONDITION( cvtest::norm(Mat(vtx_mask),CV_L1) == graph->active_count &&
cvtest::norm(Mat(edge_mask),CV_L1) == graph->edges->active_count,
"Some vertices or edges have not been visited" );
}
cvClearMemStorage( storage );
}
-
+
storage.release();
}
}
catch(int)
{
}
-
+
cvReleaseGraphScanner( &scanner );
}
assert( src.channels() == 1 );
if( dim == 0 ) // row
{
- sum.create( 1, src.cols, CV_64FC1 );
+ sum.create( 1, src.cols, CV_64FC1 );
max.create( 1, src.cols, CV_64FC1 );
min.create( 1, src.cols, CV_64FC1 );
}
else
{
- sum.create( src.rows, 1, CV_64FC1 );
+ sum.create( src.rows, 1, CV_64FC1 );
max.create( src.rows, 1, CV_64FC1 );
min.create( src.rows, 1, CV_64FC1 );
}
sum.setTo(Scalar(0));
max.setTo(Scalar(-DBL_MAX));
min.setTo(Scalar(DBL_MAX));
-
+
const Mat_<Type>& src_ = src;
Mat_<double>& sum_ = (Mat_<double>&)sum;
Mat_<double>& min_ = (Mat_<double>&)min;
Mat_<double>& max_ = (Mat_<double>&)max;
-
+
if( dim == 0 )
{
for( int ri = 0; ri < src.rows; ri++ )
else if ( dstType == CV_32S )
eps = 0.6;
}
-
+
assert( opRes.type() == CV_64FC1 );
Mat _dst, dst, diff;
reduce( src, _dst, dim, opType, dstType );
getMatTypeStr( src.type(), srcTypeStr );
getMatTypeStr( dstType, dstTypeStr );
const char* dimStr = dim == 0 ? "ROWS" : "COLS";
-
+
sprintf( msg, "bad accuracy with srcType = %s, dstType = %s, opType = %s, dim = %s",
srcTypeStr.c_str(), dstTypeStr.c_str(), opTypeStr, dimStr );
ts->printf( cvtest::TS::LOG, msg );
{
int code = cvtest::TS::OK, tempCode;
Mat src, sum, avg, max, min;
-
+
src.create( sz, srcType );
randu( src, Scalar(0), Scalar(100) );
-
+
if( srcType == CV_8UC1 )
testReduce<uchar>( src, sum, avg, max, min, dim );
else if( srcType == CV_8SC1 )
testReduce<float>( src, sum, avg, max, min, dim );
else if( srcType == CV_64FC1 )
testReduce<double>( src, sum, avg, max, min, dim );
- else
+ else
assert( 0 );
-
+
// 1. sum
tempCode = checkOp( src, dstType, CV_REDUCE_SUM, sum, dim );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
// 2. avg
tempCode = checkOp( src, dstType, CV_REDUCE_AVG, avg, dim );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
// 3. max
tempCode = checkOp( src, dstType, CV_REDUCE_MAX, max, dim );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
// 4. min
tempCode = checkOp( src, dstType, CV_REDUCE_MIN, min, dim );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
return code;
}
int Core_ReduceTest::checkDim( int dim, Size sz )
{
int code = cvtest::TS::OK, tempCode;
-
+
// CV_8UC1
tempCode = checkCase( CV_8UC1, CV_8UC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
tempCode = checkCase( CV_8UC1, CV_32SC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
tempCode = checkCase( CV_8UC1, CV_32FC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
tempCode = checkCase( CV_8UC1, CV_64FC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
// CV_16UC1
tempCode = checkCase( CV_16UC1, CV_32FC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
tempCode = checkCase( CV_16UC1, CV_64FC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
// CV_16SC1
tempCode = checkCase( CV_16SC1, CV_32FC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
tempCode = checkCase( CV_16SC1, CV_64FC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
// CV_32FC1
tempCode = checkCase( CV_32FC1, CV_32FC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
tempCode = checkCase( CV_32FC1, CV_64FC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
// CV_64FC1
tempCode = checkCase( CV_64FC1, CV_64FC1, dim, sz );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
return code;
}
int Core_ReduceTest::checkSize( Size sz )
{
int code = cvtest::TS::OK, tempCode;
-
+
tempCode = checkDim( 0, sz ); // rows
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
- tempCode = checkDim( 1, sz ); // cols
+
+ tempCode = checkDim( 1, sz ); // cols
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
return code;
}
void Core_ReduceTest::run( int )
{
int code = cvtest::TS::OK, tempCode;
-
+
tempCode = checkSize( Size(1,1) );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
tempCode = checkSize( Size(1,100) );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
tempCode = checkSize( Size(100,1) );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
tempCode = checkSize( Size(1000,500) );
code = tempCode != cvtest::TS::OK ? tempCode : code;
-
+
ts->set_failed_test_info( code );
}
#define CHECK_C
-Size sz(200, 500);
-
class Core_PCATest : public cvtest::BaseTest
{
public:
protected:
void run(int)
{
+ const Size sz(200, 500);
+
double diffPrjEps, diffBackPrjEps,
prjEps, backPrjEps,
evalEps, evecEps;
int maxComponents = 100;
Mat rPoints(sz, CV_32FC1), rTestPoints(sz, CV_32FC1);
- RNG& rng = ts->get_rng();
-
+ RNG& rng = ts->get_rng();
+
rng.fill( rPoints, RNG::UNIFORM, Scalar::all(0.0), Scalar::all(1.0) );
rng.fill( rTestPoints, RNG::UNIFORM, Scalar::all(0.0), Scalar::all(1.0) );
-
+
PCA rPCA( rPoints, Mat(), CV_PCA_DATA_AS_ROW, maxComponents ), cPCA;
-
+
// 1. check C++ PCA & ROW
Mat rPrjTestPoints = rPCA.project( rTestPoints );
Mat rBackPrjTestPoints = rPCA.backProject( rPrjTestPoints );
-
+
Mat avg(1, sz.width, CV_32FC1 );
reduce( rPoints, avg, 0, CV_REDUCE_AVG );
Mat Q = rPoints - repeat( avg, rPoints.rows, 1 ), Qt = Q.t(), eval, evec;
Q = Qt * Q;
Q = Q /(float)rPoints.rows;
-
+
eigen( Q, eval, evec );
/*SVD svd(Q);
evec = svd.vt;
eval = svd.w;*/
-
+
Mat subEval( maxComponents, 1, eval.type(), eval.data ),
subEvec( maxComponents, evec.cols, evec.type(), evec.data );
-
+
#ifdef CHECK_C
Mat prjTestPoints, backPrjTestPoints, cPoints = rPoints.t(), cTestPoints = rTestPoints.t();
CvMat _points, _testPoints, _avg, _eval, _evec, _prjTestPoints, _backPrjTestPoints;
#endif
-
+
// check eigen()
double eigenEps = 1e-6;
double err;
{
Mat v = evec.row(i).t();
Mat Qv = Q * v;
-
+
Mat lv = eval.at<float>(i,0) * v;
err = norm( Qv, lv );
if( err > eigenEps )
absdiff(rPCA.eigenvectors, subEvec, tmp);
double mval = 0; Point mloc;
minMaxLoc(tmp, 0, &mval, 0, &mloc);
-
+
ts->printf( cvtest::TS::LOG, "pca.eigenvectors is incorrect (CV_PCA_DATA_AS_ROW); err = %f\n", err );
ts->printf( cvtest::TS::LOG, "max diff is %g at (i=%d, j=%d) (%g vs %g)\n",
mval, mloc.y, mloc.x, rPCA.eigenvectors.at<float>(mloc.y, mloc.x),
}
}
}
-
+
prjEps = 1.265, backPrjEps = 1.265;
for( int i = 0; i < rTestPoints.rows; i++ )
{
return;
}
}
-
+
// 2. check C++ PCA & COL
cPCA( rPoints.t(), Mat(), CV_PCA_DATA_AS_COL, maxComponents );
diffPrjEps = 1, diffBackPrjEps = 1;
ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
return;
}
-
+
#ifdef CHECK_C
// 3. check C PCA & ROW
_points = rPoints;
backPrjTestPoints.create(rPoints.size(), rPoints.type() );
_prjTestPoints = prjTestPoints;
_backPrjTestPoints = backPrjTestPoints;
-
+
cvCalcPCA( &_points, &_avg, &_eval, &_evec, CV_PCA_DATA_AS_ROW );
cvProjectPCA( &_testPoints, &_avg, &_evec, &_prjTestPoints );
cvBackProjectPCA( &_prjTestPoints, &_avg, &_evec, &_backPrjTestPoints );
-
+
err = norm(prjTestPoints, rPrjTestPoints, CV_RELATIVE_L2);
if( err > diffPrjEps )
{
ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
return;
}
-
+
// 3. check C PCA & COL
_points = cPoints;
_testPoints = cTestPoints;
evec = evec.t(); _evec = evec;
prjTestPoints = prjTestPoints.t(); _prjTestPoints = prjTestPoints;
backPrjTestPoints = backPrjTestPoints.t(); _backPrjTestPoints = backPrjTestPoints;
-
+
cvCalcPCA( &_points, &_avg, &_eval, &_evec, CV_PCA_DATA_AS_COL );
cvProjectPCA( &_testPoints, &_avg, &_evec, &_prjTestPoints );
cvBackProjectPCA( &_prjTestPoints, &_avg, &_evec, &_backPrjTestPoints );
-
+
err = norm(cv::abs(prjTestPoints), cv::abs(rPrjTestPoints.t()), CV_RELATIVE_L2 );
if( err > diffPrjEps )
{
{
public:
Core_ArrayOpTest();
- ~Core_ArrayOpTest();
+ ~Core_ArrayOpTest();
protected:
- void run(int);
+ void run(int);
};
d == 3 ? M.hash(idx[0], idx[1], idx[2]) : M.hash(idx);
phv = &hv;
}
-
+
const uchar* ptr = d == 2 ? M.ptr(idx[0], idx[1], false, phv) :
d == 3 ? M.ptr(idx[0], idx[1], idx[2], false, phv) :
M.ptr(idx, false, phv);
d == 3 ? M.hash(idx[0], idx[1], idx[2]) : M.hash(idx);
phv = &hv;
}
-
+
if( d == 2 )
M.erase(idx[0], idx[1], phv);
else if( d == 3 )
d == 3 ? M.hash(idx[0], idx[1], idx[2]) : M.hash(idx);
phv = &hv;
}
-
+
uchar* ptr = d == 2 ? M.ptr(idx[0], idx[1], true, phv) :
d == 3 ? M.ptr(idx[0], idx[1], idx[2], true, phv) :
M.ptr(idx, true, phv);
void Core_ArrayOpTest::run( int /* start_from */)
{
int errcount = 0;
-
+
// dense matrix operations
{
int sz3[] = {5, 10, 15};
RNG rng;
rng.fill(A, CV_RAND_UNI, Scalar::all(-10), Scalar::all(10));
rng.fill(B, CV_RAND_UNI, Scalar::all(-10), Scalar::all(10));
-
+
int idx0[] = {3,4,5}, idx1[] = {0, 9, 7};
float val0 = 130;
Scalar val1(-1000, 30, 3, 8);
cvSetND(&matB, idx0, val1);
cvSet3D(&matB, idx1[0], idx1[1], idx1[2], -val1);
Ptr<CvMatND> matC = cvCloneMatND(&matB);
-
+
if( A.at<float>(idx0[0], idx0[1], idx0[2]) != val0 ||
A.at<float>(idx1[0], idx1[1], idx1[2]) != -val0 ||
cvGetReal3D(&matA, idx0[0], idx0[1], idx0[2]) != val0 ||
cvGetRealND(&matA, idx1) != -val0 ||
-
+
Scalar(B.at<Vec4s>(idx0[0], idx0[1], idx0[2])) != val1 ||
Scalar(B.at<Vec4s>(idx1[0], idx1[1], idx1[2])) != -val1 ||
Scalar(cvGet3D(matC, idx0[0], idx0[1], idx0[2])) != val1 ||
errcount++;
}
}
-
+
RNG rng;
const int MAX_DIM = 5, MAX_DIM_SZ = 10;
// sparse matrix operations
vector<double> all_vals2;
string sidx, min_sidx, max_sidx;
double min_val=0, max_val=0;
-
+
int p = 1;
for( k = 0; k < dims; k++ )
{
}
SparseMat M( dims, size, depth );
map<string, double> M0;
-
+
int nz0 = (unsigned)rng % max(p/5,10);
nz0 = min(max(nz0, 1), p);
all_vals.resize(nz0);
_all_vals2.convertTo(_all_vals2_f, CV_32F);
_all_vals2_f.convertTo(_all_vals2, CV_64F);
}
-
+
minMaxLoc(_all_vals, &min_val, &max_val);
double _norm0 = norm(_all_vals, CV_C);
double _norm1 = norm(_all_vals, CV_L1);
double _norm2 = norm(_all_vals, CV_L2);
-
+
for( i = 0; i < nz0; i++ )
{
for(;;)
break;
}
}
-
+
Ptr<CvSparseMat> M2 = (CvSparseMat*)M;
MatND Md;
M.copyTo(Md);
SparseMat M3; SparseMat(Md).convertTo(M3, Md.type(), 2);
-
+
int nz1 = (int)M.nzcount(), nz2 = (int)M3.nzcount();
double norm0 = norm(M, CV_C);
double norm1 = norm(M, CV_L1);
double norm2 = norm(M, CV_L2);
double eps = depth == CV_32F ? FLT_EPSILON*100 : DBL_EPSILON*1000;
-
+
if( nz1 != nz0 || nz2 != nz0)
{
errcount++;
si, nz1, nz2, nz0 );
break;
}
-
+
if( fabs(norm0 - _norm0) > fabs(_norm0)*eps ||
fabs(norm1 - _norm1) > fabs(_norm1)*eps ||
fabs(norm2 - _norm2) > fabs(_norm2)*eps )
si, norm0, norm1, norm2, _norm0, _norm1, _norm2 );
break;
}
-
+
int n = (unsigned)rng % max(p/5,10);
n = min(max(n, 1), p) + nz0;
-
+
for( i = 0; i < n; i++ )
{
double val1, val2, val3, val0;
val1 = getValue(M, idx, rng);
val2 = getValue(M2, idx);
val3 = getValue(M3, idx, rng);
-
+
if( val1 != val0 || val2 != val0 || fabs(val3 - val0*2) > fabs(val0*2)*FLT_EPSILON )
{
errcount++;
break;
}
}
-
+
for( i = 0; i < n; i++ )
{
double val1, val2;
errcount++;
ts->printf(cvtest::TS::LOG, "SparseMat: after deleting M[%s], it is =%g/%g (while it should be 0)\n", sidx.c_str(), val1, val2 );
break;
- }
+ }
}
-
+
int nz = (int)M.nzcount();
if( nz != 0 )
{
ts->printf(cvtest::TS::LOG, "The number of non-zero elements after removing all the elements = %d (while it should be 0)\n", nz );
break;
}
-
+
int idx1[MAX_DIM], idx2[MAX_DIM];
double val1 = 0, val2 = 0;
M3 = SparseMat(Md);
min_val, max_val, min_sidx.c_str(), max_sidx.c_str());
break;
}
-
+
minMaxIdx(Md, &val1, &val2, idx1, idx2);
s1 = idx2string(idx1, dims), s2 = idx2string(idx2, dims);
if( (min_val < 0 && (val1 != min_val || s1 != min_sidx)) ||
break;
}
}
-
+
ts->set_failed_test_info(errcount == 0 ? cvtest::TS::OK : cvtest::TS::FAIL_INVALID_OUTPUT);
}
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
36.42f, 37.65f, 38.89f, 40.11f, 41.34f, 42.56f, 43.77f };
static const double xp = 1.64;
CV_Assert(n >= 1);
-
+
if( n <= 30 )
return chi2_tab95[n-1];
return n + sqrt((double)2*n)*xp + 0.6666666666666*(xp*xp - 1);
const int* H = (const int*)hist.data;
float* H0 = ((float*)hist0.data);
int i, hsz = hist.cols;
-
+
double sum = 0;
for( i = 0; i < hsz; i++ )
sum += H[i];
CV_Assert( fabs(1./sum - scale) < FLT_EPSILON );
-
+
if( dist_type == CV_RAND_UNI )
{
float scale0 = (float)(1./hsz);
}
else
{
- double sum = 0, r = (hsz-1.)/2;
+ double sum2 = 0, r = (hsz-1.)/2;
double alpha = 2*sqrt(2.)/r, beta = -alpha*r;
for( i = 0; i < hsz; i++ )
{
double x = i*alpha + beta;
H0[i] = (float)exp(-x*x);
- sum += H0[i];
+ sum2 += H0[i];
}
- sum = 1./sum;
+ sum2 = 1./sum2;
for( i = 0; i < hsz; i++ )
- H0[i] = (float)(H0[i]*sum);
+ H0[i] = (float)(H0[i]*sum2);
}
-
+
double chi2 = 0;
for( i = 0; i < hsz; i++ )
{
chi2 += (a - b)*(a - b)/(a + b);
}
realval = chi2;
-
+
double chi2_pval = chi2_p95(hsz - 1 - (dist_type == CV_RAND_NORMAL ? 2 : 0));
refval = chi2_pval*0.01;
return realval <= refval;
static int _ranges[][2] =
{{ 0, 256 }, { -128, 128 }, { 0, 65536 }, { -32768, 32768 },
{ -1000000, 1000000 }, { -1000, 1000 }, { -1000, 1000 }};
-
+
const int MAX_SDIM = 10;
const int N = 2000000;
const int maxSlice = 1000;
const int MAX_HIST_SIZE = 1000;
int progress = 0;
-
+
RNG& rng = ts->get_rng();
RNG tested_rng = theRNG();
test_case_count = 200;
-
+
for( int idx = 0; idx < test_case_count; idx++ )
{
progress = update_progress( progress, idx, test_case_count, 0 );
ts->update_context( this, idx, false );
-
+
int depth = cvtest::randInt(rng) % (CV_64F+1);
int c, cn = (cvtest::randInt(rng) % 4) + 1;
int type = CV_MAKETYPE(depth, cn);
double eps = 1.e-4;
if (depth == CV_64F)
eps = 1.e-7;
-
+
bool do_sphere_test = dist_type == CV_RAND_UNI;
Mat arr[2], hist[4];
int W[] = {0,0,0,0};
-
+
arr[0].create(1, SZ, type);
arr[1].create(1, SZ, type);
bool fast_algo = dist_type == CV_RAND_UNI && depth < CV_32F;
-
+
for( c = 0; c < cn; c++ )
{
int a, b, hsz;
while( abs(a-b) <= 1 );
if( a > b )
std::swap(a, b);
-
+
unsigned r = (unsigned)(b - a);
fast_algo = fast_algo && r <= 256 && (r & (r-1)) == 0;
hsz = min((unsigned)(b - a), (unsigned)MAX_HIST_SIZE);
int meanrange = vrange/16;
int mindiv = MAX(vrange/20, 5);
int maxdiv = MIN(vrange/8, 10000);
-
+
a = cvtest::randInt(rng) % meanrange - meanrange/2 +
(_ranges[depth][0] + _ranges[depth][1])/2;
b = cvtest::randInt(rng) % (maxdiv - mindiv) + mindiv;
}
A[c] = a;
B[c] = b;
- hist[c].create(1, hsz, CV_32S);
+ hist[c].create(1, hsz, CV_32S);
}
-
+
cv::RNG saved_rng = tested_rng;
int maxk = fast_algo ? 0 : 1;
for( k = 0; k <= maxk; k++ )
tested_rng.fill(aslice, dist_type, A, B);
}
}
-
+
if( maxk >= 1 && norm(arr[0], arr[1], NORM_INF) > eps)
{
ts->printf( cvtest::TS::LOG, "RNG output depends on the array lengths (some generated numbers get lost?)" );
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
return;
}
-
+
for( c = 0; c < cn; c++ )
{
const uchar* data = arr[0].data;
double maxVal = dist_type == CV_RAND_UNI ? B[c] : A[c] + B[c]*4;
double scale = HSZ/(maxVal - minVal);
double delta = -minVal*scale;
-
+
hist[c] = Scalar::all(0);
-
+
for( i = c; i < SZ*cn; i += cn )
{
double val = depth == CV_8U ? ((const uchar*)data)[i] :
}
}
}
-
+
if( dist_type == CV_RAND_UNI && W[c] != SZ )
{
ts->printf( cvtest::TS::LOG, "Uniform RNG gave values out of the range [%g,%g) on channel %d/%d\n",
return;
}
double refval = 0, realval = 0;
-
+
if( !check_pdf(hist[c], 1./W[c], dist_type, refval, realval) )
{
ts->printf( cvtest::TS::LOG, "RNG failed Chi-square test "
return;
}
}
-
+
// Monte-Carlo test. Compute volume of SDIM-dimensional sphere
// inscribed in [-1,1]^SDIM cube.
if( do_sphere_test )
{
int SDIM = cvtest::randInt(rng) % (MAX_SDIM-1) + 2;
- int N0 = (SZ*cn/SDIM), N = 0;
+ int N0 = (SZ*cn/SDIM), n = 0;
double r2 = 0;
const uchar* data = arr[0].data;
double scale[4], delta[4];
scale[c] = 2./(B[c] - A[c]);
delta[c] = -A[c]*scale[c] - 1;
}
-
+
for( i = k = c = 0; i <= SZ*cn - SDIM; i++, k++, c++ )
{
double val = depth == CV_8U ? ((const uchar*)data)[i] :
r2 += val*val;
if( k == SDIM-1 )
{
- N += r2 <= 1;
+ n += r2 <= 1;
r2 = 0;
k = -1;
}
}
-
- double V = ((double)N/N0)*(1 << SDIM);
-
+
+ double V = ((double)n/N0)*(1 << SDIM);
+
// the theoretically computed volume
int sdim = SDIM % 2;
double V0 = sdim + 1;
for( sdim += 2; sdim <= SDIM; sdim += 2 )
V0 *= 2*CV_PI/sdim;
-
+
if( fabs(V - V0) > 0.3*fabs(V0) )
{
ts->printf( cvtest::TS::LOG, "RNG failed %d-dim sphere volume test (got %g instead of %g)\n",
{
public:
Core_RandRangeTest() {}
- ~Core_RandRangeTest() {}
+ ~Core_RandRangeTest() {}
protected:
void run(int)
{
theRNG().fill(af, RNG::UNIFORM, -DBL_MAX, DBL_MAX);
int n0 = 0, n255 = 0, nx = 0;
int nfmin = 0, nfmax = 0, nfx = 0;
-
+
for( int i = 0; i < a.rows; i++ )
for( int j = 0; j < a.cols; j++ )
{
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
BOWKMeansTrainer::~BOWKMeansTrainer()
{}
-Mat BOWKMeansTrainer::cluster( const Mat& descriptors ) const
+Mat BOWKMeansTrainer::cluster( const Mat& _descriptors ) const
{
Mat labels, vocabulary;
- kmeans( descriptors, clusterCount, labels, termcrit, attempts, flags, vocabulary );
+ kmeans( _descriptors, clusterCount, labels, termcrit, attempts, flags, vocabulary );
return vocabulary;
}
+ sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
}
-void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
+static void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
{
for (int i = 0; i < (int)keypoints.size(); ++i)
{
}
}
-void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
+static void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
{
for (int i = 0; i < (int)keypoints.size(); ++i)
{
}
}
-void pixelTests64(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
+static void pixelTests64(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
{
for (int i = 0; i < (int)keypoints.size(); ++i)
{
void BriefDescriptorExtractor::read( const FileNode& fn)
{
- int descriptorSize = fn["descriptorSize"];
- switch (descriptorSize)
+ int dSize = fn["descriptorSize"];
+ switch (dSize)
{
case 16:
test_fn_ = pixelTests16;
default:
CV_Error(CV_StsBadArg, "descriptorSize must be 16, 32, or 64");
}
- bytes_ = descriptorSize;
+ bytes_ = dSize;
}
void BriefDescriptorExtractor::write( FileStorage& fs) const
{}
void DescriptorExtractor::compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const
-{
+{
if( image.empty() || keypoints.empty() )
{
descriptors.release();
string type = descriptorExtractorType.substr(pos);
return new OpponentColorDescriptorExtractor(DescriptorExtractor::create(type));
}
-
+
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
}
CV_Assert( !descriptorExtractor.empty() );
}
-void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, vector<Mat>& opponentChannels )
+static void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, vector<Mat>& opponentChannels )
{
if( bgrImage.type() != CV_8UC3 )
CV_Error( CV_StsBadArg, "input image must be an BGR image of type CV_8UC3" );
vector<KeyPoint> outKeypoints;
outKeypoints.reserve( keypoints.size() );
- int descriptorSize = descriptorExtractor->descriptorSize();
- Mat mergedDescriptors( maxKeypointsCount, 3*descriptorSize, descriptorExtractor->descriptorType() );
+ int dSize = descriptorExtractor->descriptorSize();
+ Mat mergedDescriptors( maxKeypointsCount, 3*dSize, descriptorExtractor->descriptorType() );
int mergedCount = 0;
// cp - current channel position
- size_t cp[] = {0, 0, 0};
+ size_t cp[] = {0, 0, 0};
while( cp[0] < channelKeypoints[0].size() &&
cp[1] < channelKeypoints[1].size() &&
cp[2] < channelKeypoints[2].size() )
// merge descriptors
for( int ci = 0; ci < N; ci++ )
{
- Mat dst = mergedDescriptors(Range(mergedCount, mergedCount+1), Range(ci*descriptorSize, (ci+1)*descriptorSize));
+ Mat dst = mergedDescriptors(Range(mergedCount, mergedCount+1), Range(ci*dSize, (ci+1)*dSize));
channelDescriptors[ci].row( idxs[ci][cp[ci]] ).copyTo( dst );
cp[ci]++;
}
namespace cv
{
-
+
/*
* FeatureDetector
*/
return new GridAdaptedFeatureDetector(FeatureDetector::create(
detectorType.substr(strlen("Grid"))));
}
-
+
if( detectorType.find("Pyramid") == 0 )
{
return new PyramidAdaptedFeatureDetector(FeatureDetector::create(
detectorType.substr(strlen("Pyramid"))));
}
-
+
if( detectorType.find("Dynamic") == 0 )
{
return new DynamicAdaptedFeatureDetector(AdjusterAdapter::create(
detectorType.substr(strlen("Dynamic"))));
}
-
+
if( detectorType.compare( "HARRIS" ) == 0 )
{
Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
/*
* DenseFeatureDetector
*/
-DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
- float _featureScaleMul, int _initXyStep,
- int _initImgBound, bool _varyXyStepWithScale,
- bool _varyImgBoundWithScale ) :
- initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
- featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
- varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
+DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
+ float _featureScaleMul, int _initXyStep,
+ int _initImgBound, bool _varyXyStepWithScale,
+ bool _varyImgBoundWithScale ) :
+ initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
+ featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
+ varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
{}
}
};
-void keepStrongest( int N, vector<KeyPoint>& keypoints )
+static void keepStrongest( int N, vector<KeyPoint>& keypoints )
{
if( (int)keypoints.size() > N )
{
// draw keypoints
if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
{
- Mat outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
- drawKeypoints( outImg1, keypoints1, outImg1, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
+ Mat _outImg1 = outImg( Rect(0, 0, img1.cols, img1.rows) );
+ drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
- Mat outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
- drawKeypoints( outImg2, keypoints2, outImg2, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
+ Mat _outImg2 = outImg( Rect(img1.cols, 0, img2.cols, img2.rows) );
+ drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags + DrawMatchesFlags::DRAW_OVER_OUTIMG );
}
}
pt2 = kp2.pt,
dpt2 = Point2f( std::min(pt2.x+outImg1.cols, float(outImg.cols-1)), pt2.y );
- line( outImg,
- Point(cvRound(pt1.x*draw_multiplier), cvRound(pt1.y*draw_multiplier)),
- Point(cvRound(dpt2.x*draw_multiplier), cvRound(dpt2.y*draw_multiplier)),
+ line( outImg,
+ Point(cvRound(pt1.x*draw_multiplier), cvRound(pt1.y*draw_multiplier)),
+ Point(cvRound(dpt2.x*draw_multiplier), cvRound(dpt2.y*draw_multiplier)),
color, 1, CV_AA, draw_shift_bits );
}
#include "precomp.hpp"
-namespace cv
-{
+using namespace cv;
/////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
CV_INIT_ALGORITHM(BriefDescriptorExtractor, "Feature2D.BRIEF",
obj.info()->addParam(obj, "bytes", obj.bytes_));
-
+
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(FastFeatureDetector, "Feature2D.FAST",
obj.info()->addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
obj.info()->addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
obj.info()->addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize));
-
+
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(MSER, "Feature2D.MSER",
obj.info()->addParam(obj, "maxEvolution", obj.maxEvolution);
obj.info()->addParam(obj, "areaThreshold", obj.areaThreshold);
obj.info()->addParam(obj, "minMargin", obj.minMargin);
- obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize));
-
+ obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize));
+
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
obj.info()->addParam(obj, "scoreType", obj.scoreType));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
+
CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
obj.info()->addParam(obj, "k", obj.k));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
+
class CV_EXPORTS HarrisDetector : public GFTTDetector
{
public:
HarrisDetector( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,
- int blockSize=3, bool useHarrisDetector=true, double k=0.04 )
- : GFTTDetector( maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector, k ) {}
+ int blockSize=3, bool useHarrisDetector=true, double k=0.04 );
AlgorithmInfo* info() const;
-};
+};
+
+inline HarrisDetector::HarrisDetector( int _maxCorners, double _qualityLevel, double _minDistance,
+ int _blockSize, bool _useHarrisDetector, double _k )
+ : GFTTDetector( _maxCorners, _qualityLevel, _minDistance, _blockSize, _useHarrisDetector, _k ) {}
CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
obj.info()->addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
obj.info()->addParam(obj, "k", obj.k));
-////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense",
obj.info()->addParam(obj, "initFeatureScale", obj.initFeatureScale);
obj.info()->addParam(obj, "gridRows", obj.gridRows);
obj.info()->addParam(obj, "gridCols", obj.gridCols));
-bool initModule_features2d(void)
+bool cv::initModule_features2d(void)
{
- Ptr<Algorithm> brief = createBriefDescriptorExtractor(), orb = createORB(),
- star = createStarDetector(), fastd = createFastFeatureDetector(), mser = createMSER(),
- dense = createDenseFeatureDetector(), gftt = createGFTTDetector(),
- harris = createHarrisDetector(), grid = createGridAdaptedFeatureDetector();
-
- return brief->info() != 0 && orb->info() != 0 && star->info() != 0 &&
- fastd->info() != 0 && mser->info() != 0 && dense->info() != 0 &&
- gftt->info() != 0 && harris->info() != 0 && grid->info() != 0;
+ bool all = true;
+ all &= !BriefDescriptorExtractor_info_auto.name().empty();
+ all &= !FastFeatureDetector_info_auto.name().empty();
+ all &= !StarDetector_info_auto.name().empty();
+ all &= !MSER_info_auto.name().empty();
+ all &= !ORB_info_auto.name().empty();
+ all &= !GFTTDetector_info_auto.name().empty();
+ all &= !HarrisDetector_info_auto.name().empty();
+ all &= !DenseFeatureDetector_info_auto.name().empty();
+ all &= !GridAdaptedFeatureDetector_info_auto.name().empty();
+
+ return all;
}
-
-}
-
private:
const Mat mask;
+ MaskPredicate& operator=(const MaskPredicate&);
};
void KeyPointsFilter::runByPixelsMask( vector<KeyPoint>& keypoints, const Mat& mask )
/*
* DescriptorMatcher
*/
-void convertMatches( const vector<vector<DMatch> >& knnMatches, vector<DMatch>& matches )
+static void convertMatches( const vector<vector<DMatch> >& knnMatches, vector<DMatch>& matches )
{
matches.clear();
matches.reserve( knnMatches.size() );
for(int i = 0; i < (int)ip.size(); ++i)
{
CV_Assert(ip[i].type() == FileNode::MAP);
- std::string name = (std::string)ip[i]["name"];
+ std::string _name = (std::string)ip[i]["name"];
int type = (int)ip[i]["type"];
switch(type)
case CV_16U:
case CV_16S:
case CV_32S:
- indexParams->setInt(name, (int) ip[i]["value"]);
+ indexParams->setInt(_name, (int) ip[i]["value"]);
break;
case CV_32F:
- indexParams->setFloat(name, (float) ip[i]["value"]);
+ indexParams->setFloat(_name, (float) ip[i]["value"]);
break;
case CV_64F:
- indexParams->setDouble(name, (double) ip[i]["value"]);
+ indexParams->setDouble(_name, (double) ip[i]["value"]);
break;
case CV_USRTYPE1:
- indexParams->setString(name, (std::string) ip[i]["value"]);
+ indexParams->setString(_name, (std::string) ip[i]["value"]);
break;
case CV_MAKETYPE(CV_USRTYPE1,2):
- indexParams->setBool(name, (int) ip[i]["value"] != 0);
+ indexParams->setBool(_name, (int) ip[i]["value"] != 0);
break;
case CV_MAKETYPE(CV_USRTYPE1,3):
indexParams->setAlgorithm((int) ip[i]["value"]);
for(int i = 0; i < (int)sp.size(); ++i)
{
CV_Assert(sp[i].type() == FileNode::MAP);
- std::string name = (std::string)sp[i]["name"];
+ std::string _name = (std::string)sp[i]["name"];
int type = (int)sp[i]["type"];
switch(type)
case CV_16U:
case CV_16S:
case CV_32S:
- searchParams->setInt(name, (int) sp[i]["value"]);
+ searchParams->setInt(_name, (int) sp[i]["value"]);
break;
case CV_32F:
- searchParams->setFloat(name, (float) ip[i]["value"]);
+ searchParams->setFloat(_name, (float) ip[i]["value"]);
break;
case CV_64F:
- searchParams->setDouble(name, (double) ip[i]["value"]);
+ searchParams->setDouble(_name, (double) ip[i]["value"]);
break;
case CV_USRTYPE1:
- searchParams->setString(name, (std::string) ip[i]["value"]);
+ searchParams->setString(_name, (std::string) ip[i]["value"]);
break;
case CV_MAKETYPE(CV_USRTYPE1,2):
- searchParams->setBool(name, (int) ip[i]["value"] != 0);
+ searchParams->setBool(_name, (int) ip[i]["value"] != 0);
break;
case CV_MAKETYPE(CV_USRTYPE1,3):
searchParams->setAlgorithm((int) ip[i]["value"]);
}
*imgptr += 0x10000;
}
- int i = (int)(imgptr-ioptr);
- ptsptr->pt = cvPoint( i&stepmask, i>>stepgap );
+ int imsk = (int)(imgptr-ioptr);
+ ptsptr->pt = cvPoint( imsk&stepmask, imsk>>stepgap );
// get the current location
accumulateMSERComp( comptr, ptsptr );
ptsptr++;
* @param detector_params parameters to use
*/
ORB::ORB(int _nfeatures, float _scaleFactor, int _nlevels, int _edgeThreshold,
- int _firstLevel, int WTA_K, int _scoreType, int _patchSize) :
+ int _firstLevel, int _WTA_K, int _scoreType, int _patchSize) :
nfeatures(_nfeatures), scaleFactor(_scaleFactor), nlevels(_nlevels),
- edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(WTA_K),
+ edgeThreshold(_edgeThreshold), firstLevel(_firstLevel), WTA_K(_WTA_K),
scoreType(_scoreType), patchSize(_patchSize)
{}
for (int level = 0; level < nlevels; ++level)
{
- int nfeatures = nfeaturesPerLevel[level];
- allKeypoints[level].reserve(nfeatures*2);
+ int featuresNum = nfeaturesPerLevel[level];
+ allKeypoints[level].reserve(featuresNum*2);
vector<KeyPoint> & keypoints = allKeypoints[level];
if( scoreType == ORB::HARRIS_SCORE )
{
// Keep more points than necessary as FAST does not give amazing corners
- KeyPointsFilter::retainBest(keypoints, 2 * nfeatures);
+ KeyPointsFilter::retainBest(keypoints, 2 * featuresNum);
// Compute the Harris cornerness (better scoring than FAST)
HarrisResponses(imagePyramid[level], keypoints, 7, HARRIS_K);
}
//cull to the final desired level, using the new Harris scores or the original FAST scores.
- KeyPointsFilter::retainBest(keypoints, nfeatures);
+ KeyPointsFilter::retainBest(keypoints, featuresNum);
float sf = getScale(level, firstLevel, scaleFactor);
if( image.type() != CV_8UC1 )
cvtColor(_image, image, CV_BGR2GRAY);
- int nlevels = this->nlevels;
+ int levelsNum = this->nlevels;
if( !do_keypoints )
{
//
// In short, ultimately the descriptor should
// ignore octave parameter and deal only with the keypoint size.
- nlevels = 0;
+ levelsNum = 0;
for( size_t i = 0; i < _keypoints.size(); i++ )
- nlevels = std::max(nlevels, std::max(_keypoints[i].octave, 0));
- nlevels++;
+ levelsNum = std::max(levelsNum, std::max(_keypoints[i].octave, 0));
+ levelsNum++;
}
// Pre-compute the scale pyramids
- vector<Mat> imagePyramid(nlevels), maskPyramid(nlevels);
- for (int level = 0; level < nlevels; ++level)
+ vector<Mat> imagePyramid(levelsNum), maskPyramid(levelsNum);
+ for (int level = 0; level < levelsNum; ++level)
{
float scale = 1/getScale(level, firstLevel, scaleFactor);
Size sz(cvRound(image.cols*scale), cvRound(image.rows*scale));
KeyPointsFilter::runByImageBorder(_keypoints, image.size(), edgeThreshold);
// Cluster the input keypoints depending on the level they were computed at
- allKeypoints.resize(nlevels);
+ allKeypoints.resize(levelsNum);
for (vector<KeyPoint>::iterator keypoint = _keypoints.begin(),
keypointEnd = _keypoints.end(); keypoint != keypointEnd; ++keypoint)
allKeypoints[keypoint->octave].push_back(*keypoint);
// Make sure we rescale the coordinates
- for (int level = 0; level < nlevels; ++level)
+ for (int level = 0; level < levelsNum; ++level)
{
if (level == firstLevel)
continue;
if( do_descriptors )
{
int nkeypoints = 0;
- for (int level = 0; level < nlevels; ++level)
+ for (int level = 0; level < levelsNum; ++level)
nkeypoints += (int)allKeypoints[level].size();
if( nkeypoints == 0 )
_descriptors.release();
_keypoints.clear();
int offset = 0;
- for (int level = 0; level < nlevels; ++level)
+ for (int level = 0; level < levelsNum; ++level)
{
// Get the features and compute their orientation
vector<KeyPoint>& keypoints = allKeypoints[level];
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
-#endif
-
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
computeIntegralImages( const Mat& matI, Mat& matS, Mat& matT, Mat& _FT )
{
CV_Assert( matI.type() == CV_8U );
-
+
int x, y, rows = matI.rows, cols = matI.cols;
-
+
matS.create(rows + 1, cols + 1, CV_32S);
matT.create(rows + 1, cols + 1, CV_32S);
_FT.create(rows + 1, cols + 1, CV_32S);
-
+
const uchar* I = matI.ptr<uchar>();
int *S = matS.ptr<int>(), *T = matT.ptr<int>(), *FT = _FT.ptr<int>();
int istep = (int)matI.step, step = (int)(matS.step/sizeof(S[0]));
StarFeature f[MAX_PATTERN];
Mat sum, tilted, flatTilted;
- int y, i=0, rows = img.rows, cols = img.cols;
+ int y, rows = img.rows, cols = img.cols;
int border, npatterns=0, maxIdx=0;
CV_Assert( img.type() == CV_8UC1 );
-
+
responses.create( img.size(), CV_32F );
sizes.create( img.size(), CV_16S );
- while( pairs[i][0] >= 0 && !
- ( sizes0[pairs[i][0]] >= maxSize
- || sizes0[pairs[i+1][0]] + sizes0[pairs[i+1][0]]/2 >= std::min(rows, cols) ) )
+ while( pairs[npatterns][0] >= 0 && !
+ ( sizes0[pairs[npatterns][0]] >= maxSize
+ || sizes0[pairs[npatterns+1][0]] + sizes0[pairs[npatterns+1][0]]/2 >= std::min(rows, cols) ) )
{
- ++i;
+ ++npatterns;
}
-
- npatterns = i;
+
npatterns += (pairs[npatterns-1][0] >= 0);
maxIdx = pairs[npatterns-1][0];
-
+
computeIntegralImages( img, sum, tilted, flatTilted );
int step = (int)(sum.step/sum.elemSize());
- for( i = 0; i <= maxIdx; i++ )
+ for(int i = 0; i <= maxIdx; i++ )
{
int ur_size = sizes0[i], t_size = sizes0[i] + sizes0[i]/2;
int ur_area = (2*ur_size + 1)*(2*ur_size + 1);
sizes1[maxIdx] = -sizes1[maxIdx];
border = sizes0[maxIdx] + sizes0[maxIdx]/2;
- for( i = 0; i < npatterns; i++ )
+ for(int i = 0; i < npatterns; i++ )
{
int innerArea = f[pairs[i][1]].area;
int outerArea = f[pairs[i][0]].area - innerArea;
invSizes[i][0] = 1.f/outerArea;
invSizes[i][1] = 1.f/innerArea;
}
-
+
#if CV_SSE2
if( useSIMD )
{
- for( i = 0; i < npatterns; i++ )
+ for(int i = 0; i < npatterns; i++ )
{
_mm_store_ps((float*)&invSizes4[i][0], _mm_set1_ps(invSizes[i][0]));
_mm_store_ps((float*)&invSizes4[i][1], _mm_set1_ps(invSizes[i][1]));
}
- for( i = 0; i <= maxIdx; i++ )
+ for(int i = 0; i <= maxIdx; i++ )
_mm_store_ps((float*)&sizes1_4[i], _mm_set1_ps((float)sizes1[i]));
}
#endif
float* r_ptr2 = responses.ptr<float>(rows - 1 - y);
short* s_ptr = sizes.ptr<short>(y);
short* s_ptr2 = sizes.ptr<short>(rows - 1 - y);
-
+
memset( r_ptr, 0, cols*sizeof(r_ptr[0]));
memset( r_ptr2, 0, cols*sizeof(r_ptr2[0]));
memset( s_ptr, 0, cols*sizeof(s_ptr[0]));
for( y = border; y < rows - border; y++ )
{
- int x = border, i;
+ int x = border;
float* r_ptr = responses.ptr<float>(y);
short* s_ptr = sizes.ptr<short>(y);
-
+
memset( r_ptr, 0, border*sizeof(r_ptr[0]));
memset( s_ptr, 0, border*sizeof(s_ptr[0]));
memset( r_ptr + cols - border, 0, border*sizeof(r_ptr[0]));
__m128 bestResponse = _mm_setzero_ps();
__m128 bestSize = _mm_setzero_ps();
- for( i = 0; i <= maxIdx; i++ )
+ for(int i = 0; i <= maxIdx; i++ )
{
const int** p = (const int**)&f[i].p[0];
__m128i r0 = _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(p[0]+ofs)),
_mm_store_ps((float*)&vals[i], _mm_cvtepi32_ps(r0));
}
- for( i = 0; i < npatterns; i++ )
+ for(int i = 0; i < npatterns; i++ )
{
__m128 inner_sum = vals[pairs[i][1]];
__m128 outer_sum = _mm_sub_ps(vals[pairs[i][0]], inner_sum);
_mm_packs_epi32(_mm_cvtps_epi32(bestSize),_mm_setzero_si128()));
}
}
-#endif
+#endif
for( ; x < cols - border; x++ )
{
int ofs = y*step + x;
float bestResponse = 0;
int bestSize = 0;
- for( i = 0; i <= maxIdx; i++ )
+ for(int i = 0; i <= maxIdx; i++ )
{
const int** p = (const int**)&f[i].p[0];
vals[i] = p[0][ofs] - p[1][ofs] - p[2][ofs] + p[3][ofs] +
p[4][ofs] - p[5][ofs] - p[6][ofs] + p[7][ofs];
}
- for( i = 0; i < npatterns; i++ )
+ for(int i = 0; i < npatterns; i++ )
{
int inner_sum = vals[pairs[i][1]];
int outer_sum = vals[pairs[i][0]] - inner_sum;
int x, y, delta = sz/4, radius = delta*4;
float Lxx = 0, Lyy = 0, Lxy = 0;
int Lxxb = 0, Lyyb = 0, Lxyb = 0;
-
+
for( y = pt.y - radius; y <= pt.y + radius; y += delta )
for( x = pt.x - radius; x <= pt.x + radius; x += delta )
{
float Ly = r_ptr[(y+1)*rstep + x] - r_ptr[(y-1)*rstep + x];
Lxx += Lx*Lx; Lyy += Ly*Ly; Lxy += Lx*Ly;
}
-
+
if( (Lxx + Lyy)*(Lxx + Lyy) >= lineThresholdProjected*(Lxx*Lyy - Lxy*Lxy) )
return true;
;
}
}
-
+
StarDetector::StarDetector(int _maxSize, int _responseThreshold,
int _lineThresholdProjected,
int _lineThresholdBinarized,
{
Mat grayImage = image;
if( image.type() != CV_8U ) cvtColor( image, grayImage, CV_BGR2GRAY );
-
+
(*this)(grayImage, keypoints);
KeyPointsFilter::runByPixelsMask( keypoints, mask );
-}
+}
void StarDetector::operator()(const Mat& img, vector<KeyPoint>& keypoints) const
{
responseThreshold, lineThresholdProjected,
lineThresholdBinarized, suppressNonmaxSize );
}
-
+
}
CV_DescriptorExtractorTest& operator=(const CV_DescriptorExtractorTest&) { return *this; }
};
-/*template<typename T, typename Distance>
-class CV_CalonderDescriptorExtractorTest : public CV_DescriptorExtractorTest<Distance>
-{
-public:
- CV_CalonderDescriptorExtractorTest( const char* testName, float _normDif, float _prevTime ) :
- CV_DescriptorExtractorTest<Distance>( testName, _normDif, Ptr<DescriptorExtractor>(), _prevTime )
- {}
-
-protected:
- virtual void createDescriptorExtractor()
- {
- CV_DescriptorExtractorTest<Distance>::dextractor =
- new CalonderDescriptorExtractor<T>( string(CV_DescriptorExtractorTest<Distance>::ts->get_data_path()) +
- FEATURES2D_DIR + "/calonder_classifier.rtc");
- }
-};*/
-
/****************************************************************************************\
* Algorithmic tests for descriptor matchers *
\****************************************************************************************/
dmatcher->radiusMatch( query, matches, radius, masks );
- int curRes = cvtest::TS::OK;
+ //int curRes = cvtest::TS::OK;
if( (int)matches.size() != queryDescCount )
{
ts->printf(cvtest::TS::LOG, "Incorrect matches count while test radiusMatch() function (1).\n");
}
if( (float)badCount > (float)queryDescCount*badPart )
{
- curRes = cvtest::TS::FAIL_INVALID_OUTPUT;
+ //curRes = cvtest::TS::FAIL_INVALID_OUTPUT;
ts->printf( cvtest::TS::LOG, "%f - too large bad matches part while test radiusMatch() function (2).\n",
(float)badCount/(float)queryDescCount );
ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
test.safe_run();
}
-#if CV_SSE2
-TEST( Features2d_DescriptorExtractor_Calonder_uchar, regression )
-{
- CV_CalonderDescriptorExtractorTest<uchar, L2<uchar> > test( "descriptor-calonder-uchar",
- std::numeric_limits<float>::epsilon() + 1,
- 0.0132175f );
- test.safe_run();
-}
-
-TEST( Features2d_DescriptorExtractor_Calonder_float, regression )
-{
- CV_CalonderDescriptorExtractorTest<float, L2<float> > test( "descriptor-calonder-float",
- std::numeric_limits<float>::epsilon(),
- 0.0221308f );
- test.safe_run();
-}
-#endif // CV_SSE2
-
/*
* Matchers
*/
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
/**
Default constructor. Initializes a new pool.
*/
- PooledAllocator(int blocksize = BLOCKSIZE)
+ PooledAllocator(int blockSize = BLOCKSIZE)
{
- this->blocksize = blocksize;
+ blocksize = blockSize;
remaining = 0;
base = NULL;
*/
void* allocateMemory(int size)
{
- int blocksize;
+ int blockSize;
/* Round size up to a multiple of wordsize. The following expression
only works for WORDSIZE that is a power of 2, by masking last bits of
wastedMemory += remaining;
/* Allocate new storage. */
- blocksize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ?
+ blockSize = (size + sizeof(void*) + (WORDSIZE-1) > BLOCKSIZE) ?
size + sizeof(void*) + (WORDSIZE-1) : BLOCKSIZE;
// use the standard C malloc to allocate memory
- void* m = ::malloc(blocksize);
+ void* m = ::malloc(blockSize);
if (!m) {
fprintf(stderr,"Failed to allocate memory.\n");
return NULL;
int shift = 0;
//int shift = (WORDSIZE - ( (((size_t)m) + sizeof(void*)) & (WORDSIZE-1))) & (WORDSIZE-1);
- remaining = blocksize - sizeof(void*) - shift;
+ remaining = blockSize - sizeof(void*) - shift;
loc = ((char*)m + sizeof(void*) + shift);
}
void* rloc = loc;
virtual ::size_t get_size() = 0;
virtual const std::type_info& type() = 0;
virtual void print(std::ostream& out, void* const* src) = 0;
+
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+ virtual ~base_any_policy() {}
+#endif
};
template<typename T>
#undef FLANN_PLATFORM_32_BIT
#undef FLANN_PLATFORM_64_BIT
-#if __amd64__ || __x86_64__ || _WIN64 || _M_X64
+#if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64
#define FLANN_PLATFORM_64_BIT
#else
#define FLANN_PLATFORM_32_BIT
#ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_
#define OPENCV_FLANN_DYNAMIC_BITSET_H_
+#ifndef FLANN_USE_BOOST
+# define FLANN_USE_BOOST 0
+#endif
//#define FLANN_USE_BOOST 1
#if FLANN_USE_BOOST
#include <boost/dynamic_bitset.hpp>
/** @param only constructor we use in our code
* @param the size of the bitset (in bits)
*/
- DynamicBitset(size_t size)
+ DynamicBitset(size_t sz)
{
- resize(size);
+ resize(sz);
reset();
}
/** @param resize the bitset so that it contains at least size bits
* @param size
*/
- void resize(size_t size)
+ void resize(size_t sz)
{
- size_ = size;
- bitset_.resize(size / cell_bit_size_ + 1);
+ size_ = sz;
+ bitset_.resize(sz / cell_bit_size_ + 1);
}
/** @param set a bit to true
* Constructor.
*
* Params:
- * size = heap size
+ * sz = heap size
*/
- Heap(int size)
+ Heap(int sz)
{
- length = size;
+ length = sz;
heap.reserve(length);
count = 0;
}
* indices_length = length of indices vector
*
*/
- void chooseCentersRandom(int k, int* indices, int indices_length, int* centers, int& centers_length)
+ void chooseCentersRandom(int k, int* dsindices, int indices_length, int* centers, int& centers_length)
{
UniqueRandom r(indices_length);
return;
}
- centers[index] = indices[rnd];
+ centers[index] = dsindices[rnd];
for (int j=0; j<index; ++j) {
DistanceType sq = distance(dataset[centers[index]], dataset[centers[j]], dataset.cols);
* indices = indices in the dataset
* Returns:
*/
- void chooseCentersGonzales(int k, int* indices, int indices_length, int* centers, int& centers_length)
+ void chooseCentersGonzales(int k, int* dsindices, int indices_length, int* centers, int& centers_length)
{
int n = indices_length;
int rnd = rand_int(n);
assert(rnd >=0 && rnd < n);
- centers[0] = indices[rnd];
+ centers[0] = dsindices[rnd];
int index;
for (index=1; index<k; ++index) {
int best_index = -1;
DistanceType best_val = 0;
for (int j=0; j<n; ++j) {
- DistanceType dist = distance(dataset[centers[0]],dataset[indices[j]],dataset.cols);
+ DistanceType dist = distance(dataset[centers[0]],dataset[dsindices[j]],dataset.cols);
for (int i=1; i<index; ++i) {
- DistanceType tmp_dist = distance(dataset[centers[i]],dataset[indices[j]],dataset.cols);
+ DistanceType tmp_dist = distance(dataset[centers[i]],dataset[dsindices[j]],dataset.cols);
if (tmp_dist<dist) {
dist = tmp_dist;
}
}
}
if (best_index!=-1) {
- centers[index] = indices[best_index];
+ centers[index] = dsindices[best_index];
}
else {
break;
* indices = indices in the dataset
* Returns:
*/
- void chooseCentersKMeanspp(int k, int* indices, int indices_length, int* centers, int& centers_length)
+ void chooseCentersKMeanspp(int k, int* dsindices, int indices_length, int* centers, int& centers_length)
{
int n = indices_length;
// Choose one random center and set the closestDistSq values
int index = rand_int(n);
assert(index >=0 && index < n);
- centers[0] = indices[index];
+ centers[0] = dsindices[index];
for (int i = 0; i < n; i++) {
- closestDistSq[i] = distance(dataset[indices[i]], dataset[indices[index]], dataset.cols);
+ closestDistSq[i] = distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols);
currentPot += closestDistSq[i];
}
// Compute the new potential
double newPot = 0;
- for (int i = 0; i < n; i++) newPot += std::min( distance(dataset[indices[i]], dataset[indices[index]], dataset.cols), closestDistSq[i] );
+ for (int i = 0; i < n; i++) newPot += std::min( distance(dataset[dsindices[i]], dataset[dsindices[index]], dataset.cols), closestDistSq[i] );
// Store the best result
if ((bestNewPot < 0)||(newPot < bestNewPot)) {
}
// Add the appropriate center
- centers[centerCount] = indices[bestNewIndex];
+ centers[centerCount] = dsindices[bestNewIndex];
currentPot = bestNewPot;
- for (int i = 0; i < n; i++) closestDistSq[i] = std::min( distance(dataset[indices[i]], dataset[indices[bestNewIndex]], dataset.cols), closestDistSq[i] );
+ for (int i = 0; i < n; i++) closestDistSq[i] = std::min( distance(dataset[dsindices[i]], dataset[dsindices[bestNewIndex]], dataset.cols), closestDistSq[i] );
}
centers_length = centerCount;
- void computeLabels(int* indices, int indices_length, int* centers, int centers_length, int* labels, DistanceType& cost)
+ void computeLabels(int* dsindices, int indices_length, int* centers, int centers_length, int* labels, DistanceType& cost)
{
cost = 0;
for (int i=0; i<indices_length; ++i) {
- ElementType* point = dataset[indices[i]];
+ ElementType* point = dataset[dsindices[i]];
DistanceType dist = distance(point, dataset[centers[0]], veclen_);
labels[i] = 0;
for (int j=1; j<centers_length; ++j) {
*
* TODO: for 1-sized clusters don't store a cluster center (it's the same as the single cluster point)
*/
- void computeClustering(NodePtr node, int* indices, int indices_length, int branching, int level)
+ void computeClustering(NodePtr node, int* dsindices, int indices_length, int branching, int level)
{
node->size = indices_length;
node->level = level;
if (indices_length < leaf_size_) { // leaf node
- node->indices = indices;
+ node->indices = dsindices;
std::sort(node->indices,node->indices+indices_length);
node->childs = NULL;
return;
std::vector<int> labels(indices_length);
int centers_length;
- (this->*chooseCenters)(branching, indices, indices_length, ¢ers[0], centers_length);
+ (this->*chooseCenters)(branching, dsindices, indices_length, ¢ers[0], centers_length);
if (centers_length<branching) {
- node->indices = indices;
+ node->indices = dsindices;
std::sort(node->indices,node->indices+indices_length);
node->childs = NULL;
return;
// assign points to clusters
DistanceType cost;
- computeLabels(indices, indices_length, ¢ers[0], centers_length, &labels[0], cost);
+ computeLabels(dsindices, indices_length, ¢ers[0], centers_length, &labels[0], cost);
node->childs = pool.allocate<NodePtr>(branching);
int start = 0;
for (int i=0; i<branching; ++i) {
for (int j=0; j<indices_length; ++j) {
if (labels[j]==i) {
- std::swap(indices[j],indices[end]);
+ std::swap(dsindices[j],dsindices[end]);
std::swap(labels[j],labels[end]);
end++;
}
node->childs[i] = pool.allocate<Node>();
node->childs[i]->pivot = centers[i];
node->childs[i]->indices = NULL;
- computeClustering(node->childs[i],indices+start, end-start, branching, level+1);
+ computeClustering(node->childs[i],dsindices+start, end-start, branching, level+1);
start=end;
}
}
int c2 = 1;
float p2;
int c1 = 1;
- float p1;
+ //float p1;
float time;
DistanceType dist;
while (p2<precision) {
c1 = c2;
- p1 = p2;
+ //p1 = p2;
c2 *=2;
p2 = search_with_ground_truth(index, inputData, testData, matches, nn, c2, time, dist, distance, skipMatches);
}
#include <iomanip>
#include <limits.h>
// TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP
+#ifdef __GXX_EXPERIMENTAL_CXX0X__
+# define USE_UNORDERED_MAP 1
+#else
+# define USE_UNORDERED_MAP 0
+#endif
#if USE_UNORDERED_MAP
#include <unordered_map>
#else
*************************************************************************/
#include "precomp.hpp"
-
-#ifdef _MSC_VER\r
-#pragma warning(disable: 4996)\r
-#endif
#include "opencv2/flann/flann.hpp"
namespace cvflann
#include <cstdarg>\r
#include <sstream>\r
\r
-#ifdef _MSC_VER\r
-#pragma warning(disable: 4996)\r
-#endif\r
-\r
#ifdef HAVE_CVCONFIG_H\r
# include "cvconfig.h"\r
#endif\r
source_group("Device\\Detail" FILES ${lib_device_hdrs_detail})
if (HAVE_CUDA)
- file(GLOB_RECURSE ncv_srcs "src/nvidia/*.cpp")
+ file(GLOB_RECURSE ncv_srcs "src/nvidia/*.cpp")
file(GLOB_RECURSE ncv_cuda "src/nvidia/*.cu")
file(GLOB_RECURSE ncv_hdrs "src/nvidia/*.hpp" "src/nvidia/*.h")
set(ncv_files ${ncv_srcs} ${ncv_hdrs} ${ncv_cuda})
source_group("Src\\NVidia" FILES ${ncv_files})
ocv_include_directories("src/nvidia" "src/nvidia/core" "src/nvidia/NPP_staging" ${CUDA_INCLUDE_DIRS})
+ ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations /wd4211 /wd4201 /wd4100 /wd4505 /wd4408)
#set (CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-keep")
#set (CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler;/EHsc-;")
-
+
if(MSVC)
if(NOT ENABLE_NOISY_WARNINGS)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4211 /wd4201 /wd4100 /wd4505 /wd4408")
-
foreach(var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
string(REPLACE "/W4" "/W3" ${var} "${${var}}")
endforeach()
-
+
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} -Xcompiler /wd4251)
endif()
endif()
- OCV_CUDA_COMPILE(cuda_objs ${lib_cuda} ${ncv_cuda})
+ ocv_cuda_compile(cuda_objs ${lib_cuda} ${ncv_cuda})
#CUDA_BUILD_CLEAN_TARGET()
-
+
set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
else()
set(lib_cuda "")
HEADERS ${lib_hdrs}
SOURCES ${lib_int_hdrs} ${lib_cuda_hdrs} ${lib_device_hdrs} ${lib_device_hdrs_detail} ${lib_srcs} ${lib_cuda} ${ncv_files} ${cuda_objs}
)
-
+
ocv_create_module(${cuda_link_libs})
-
+
if(HAVE_CUDA)
if(HAVE_CUFFT)
CUDA_ADD_CUFFT_TO_TARGET(${the_module})
if(HAVE_CUBLAS)
CUDA_ADD_CUBLAS_TO_TARGET(${the_module})
endif()
-
+
install(FILES src/nvidia/NPP_staging/NPP_staging.hpp src/nvidia/core/NCV.hpp
- DESTINATION ${OPENCV_INCLUDE_PREFIX}/opencv2/${name}
- COMPONENT main)
+ DESTINATION ${OPENCV_INCLUDE_PREFIX}/opencv2/${name}
+ COMPONENT main)
endif()
ocv_add_precompiled_headers(${the_module})
################################################################################################################
file(GLOB test_srcs "test/*.cpp")
file(GLOB test_hdrs "test/*.hpp" "test/*.h")
+
+set(nvidia "")
if(HAVE_CUDA)
- file(GLOB nvidia "test/nvidia/*.cpp" "test/nvidia/*.hpp" "test/nvidia/*.h")
+ file(GLOB nvidia "test/nvidia/*.cpp" "test/nvidia/*.hpp" "test/nvidia/*.h")
set(nvidia FILES "Src\\\\\\\\NVidia" ${nvidia}) # 8 ugly backslashes :'(
-else()
- set(nvidia "")
endif()
ocv_add_accuracy_tests(FILES "Include" ${test_hdrs}
{\r
public:\r
explicit GoodFeaturesToTrackDetector_GPU(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,\r
- int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04)\r
- {\r
- this->maxCorners = maxCorners;\r
- this->qualityLevel = qualityLevel;\r
- this->minDistance = minDistance;\r
- this->blockSize = blockSize;\r
- this->useHarrisDetector = useHarrisDetector;\r
- this->harrisK = harrisK;\r
- }\r
+ int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04);\r
\r
//! return 1 rows matrix with CV_32FC2 type\r
void operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask = GpuMat());\r
GpuMat tmpCorners_;\r
};\r
\r
+inline GoodFeaturesToTrackDetector_GPU::GoodFeaturesToTrackDetector_GPU(int maxCorners_, double qualityLevel_, double minDistance_,\r
+ int blockSize_, bool useHarrisDetector_, double harrisK_)\r
+{\r
+ maxCorners = maxCorners_;\r
+ qualityLevel = qualityLevel_;\r
+ minDistance = minDistance_;\r
+ blockSize = blockSize_;\r
+ useHarrisDetector = useHarrisDetector_;\r
+ harrisK = harrisK_;\r
+}\r
+\r
+\r
class CV_EXPORTS PyrLKOpticalFlow\r
{\r
public:\r
+#ifdef __GNUC__\r
+# pragma GCC diagnostic ignored "-Wmissing-declarations"\r
+#endif\r
+\r
#ifndef __OPENCV_PERF_PRECOMP_HPP__\r
#define __OPENCV_PERF_PRECOMP_HPP__\r
\r
#include "opencv2/gpu/gpu.hpp"\r
#include "perf_utility.hpp"\r
\r
-#if GTEST_CREATE_SHARED_LIBRARY\r
+#ifdef GTEST_CREATE_SHARED_LIBRARY\r
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined\r
#endif\r
\r
\r
#else /* !defined (HAVE_CUDA) */\r
\r
-namespace cv { namespace gpu { namespace device\r
-{\r
-#define OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name) \\r
- void name(const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream);\r
-\r
-#define OPENCV_GPU_DECLARE_CVTCOLOR_ALL(name) \\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _16u) \\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f)\r
-\r
-#define OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(name) \\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f) \\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _full_8u) \\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _full_32f)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_bgra)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_rgba)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr_to_bgr555)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr_to_bgr565)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgb_to_bgr555)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgb_to_bgr565)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgra_to_bgr555)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgra_to_bgr565)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgba_to_bgr555)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgba_to_bgr565)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_bgra)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_bgra)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(gray_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(gray_to_bgra)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(gray_to_bgr555)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(gray_to_bgr565)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_gray)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_gray)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_gray)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_gray)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_gray)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_gray)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_yuv)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_yuv)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_yuv4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_yuv4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_yuv)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_yuv)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_yuv4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_yuv4)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_bgra)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_bgra)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_YCrCb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_YCrCb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_YCrCb4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_YCrCb4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_YCrCb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_YCrCb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_YCrCb4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_YCrCb4)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_bgra)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_bgra)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_xyz)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_xyz)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_xyz4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_xyz4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_xyz)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_xyz)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_xyz4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_xyz4)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_bgra)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_bgra)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hsv)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hsv)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hsv4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hsv4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hsv)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hsv)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hsv4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hsv4)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_bgra)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_bgra)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hls)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hls)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hls4)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hls4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hls)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hls)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hls4)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hls4)\r
-\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_rgb)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_rgba)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_bgra)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_bgr)\r
- OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_bgra)\r
-\r
- #undef OPENCV_GPU_DECLARE_CVTCOLOR_ONE\r
- #undef OPENCV_GPU_DECLARE_CVTCOLOR_ALL\r
- #undef OPENCV_GPU_DECLARE_CVTCOLOR_8U32F\r
-}}}\r
-\r
+#include <cvt_colot_internal.h>\r
using namespace ::cv::gpu::device;\r
\r
namespace\r
//\r
//M*/\r
\r
-#include "internal_shared.hpp"\r
-#include "opencv2/gpu/device/transform.hpp"\r
-#include "opencv2/gpu/device/color.hpp"\r
+#include <internal_shared.hpp>\r
+#include <opencv2/gpu/device/transform.hpp>\r
+#include <opencv2/gpu/device/color.hpp>\r
+#include <cvt_colot_internal.h>\r
\r
namespace cv { namespace gpu { namespace device \r
{\r
__device__ __forceinline__ bool operator()(int y, int x) const \r
{ \r
return true; \r
- } \r
+ }\r
+ __device__ __forceinline__ MaskTrue(){}\r
+ __device__ __forceinline__ MaskTrue(const MaskTrue& mask_){}\r
};\r
\r
//////////////////////////////////////////////////////////////////////////////\r
return 0;\r
}\r
\r
+ __device__ __forceinline__ SumReductor(const SumReductor& other){}\r
+ __device__ __forceinline__ SumReductor(){}\r
+\r
__device__ __forceinline__ S operator ()(volatile S a, volatile S b) const\r
{\r
return a + b;\r
return 0;\r
}\r
\r
+ __device__ __forceinline__ AvgReductor(const AvgReductor& other){}\r
+ __device__ __forceinline__ AvgReductor(){}\r
+\r
__device__ __forceinline__ S operator ()(volatile S a, volatile S b) const\r
{\r
return a + b;\r
return numeric_limits<S>::max();\r
}\r
\r
+ __device__ __forceinline__ MinReductor(const MinReductor& other){}\r
+ __device__ __forceinline__ MinReductor(){}\r
+\r
template <typename T> __device__ __forceinline__ T operator ()(volatile T a, volatile T b) const\r
{\r
return saturate_cast<T>(::min(a, b));\r
return numeric_limits<S>::min();\r
}\r
\r
+ __device__ __forceinline__ MaxReductor(const MaxReductor& other){}\r
+ __device__ __forceinline__ MaxReductor(){}\r
+\r
template <typename T> __device__ __forceinline__ int operator ()(volatile T a, volatile T b) const\r
{\r
return ::max(a, b);\r
\r
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)\r
{\r
- #if __CUDA_ARCH__ >= 200\r
+ #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200\r
typedef double real_t;\r
#else\r
typedef float real_t;\r
template <typename Mask>\r
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer, unsigned int* maxCounter)\r
{\r
- #if __CUDA_ARCH__ >= 110\r
+ #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110\r
\r
extern __shared__ float N9[];\r
\r
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,\r
unsigned int* featureCounter)\r
{\r
- #if __CUDA_ARCH__ >= 110\r
+ #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110\r
\r
const int4 maxPos = maxPosBuffer[blockIdx.x];\r
\r
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __cvt_color_internal_h__
+#define __cvt_color_internal_h__
+
+namespace cv { namespace gpu { namespace device
+{
+#define OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name) \
+ void name(const DevMem2Db& src, const DevMem2Db& dst, cudaStream_t stream);
+
+#define OPENCV_GPU_DECLARE_CVTCOLOR_ALL(name) \
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _16u) \
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f)
+
+#define OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(name) \
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _8u) \
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _32f) \
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _full_8u) \
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(name ## _full_32f)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_bgra)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_rgba)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr_to_bgr555)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr_to_bgr565)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgb_to_bgr555)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgb_to_bgr565)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgra_to_bgr555)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgra_to_bgr565)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgba_to_bgr555)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(rgba_to_bgr565)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_bgra)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_bgra)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(gray_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(gray_to_bgra)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(gray_to_bgr555)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(gray_to_bgr565)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr555_to_gray)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ONE(bgr565_to_gray)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_gray)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_gray)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_gray)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_gray)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_yuv)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_yuv)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_yuv4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_yuv4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_yuv)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_yuv)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_yuv4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_yuv4)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv_to_bgra)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(yuv4_to_bgra)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_YCrCb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_YCrCb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_YCrCb4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_YCrCb4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_YCrCb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_YCrCb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_YCrCb4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_YCrCb4)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb_to_bgra)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(YCrCb4_to_bgra)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_xyz)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_xyz)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgb_to_xyz4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(rgba_to_xyz4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_xyz)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_xyz)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgr_to_xyz4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(bgra_to_xyz4)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz_to_bgra)
+ OPENCV_GPU_DECLARE_CVTCOLOR_ALL(xyz4_to_bgra)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hsv)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hsv)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hsv4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hsv4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hsv)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hsv)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hsv4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hsv4)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv_to_bgra)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hsv4_to_bgra)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hls)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hls)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgb_to_hls4)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(rgba_to_hls4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hls)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hls)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgr_to_hls4)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(bgra_to_hls4)
+
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_rgb)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_rgba)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls_to_bgra)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_bgr)
+ OPENCV_GPU_DECLARE_CVTCOLOR_8U32F(hls4_to_bgra)
+
+ #undef OPENCV_GPU_DECLARE_CVTCOLOR_ONE
+ #undef OPENCV_GPU_DECLARE_CVTCOLOR_ALL
+ #undef OPENCV_GPU_DECLARE_CVTCOLOR_8U32F
+}}}
+
+#endif
\r
__device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut)\r
{\r
-#if __CUDA_ARCH__ >= 110\r
+#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110\r
\r
__shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL * 2];\r
__shared__ Ncv32u numPassed;\r
}\r
else\r
{\r
-#if __CUDA_ARCH__ >= 110\r
+#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110\r
if (bPass && !threadIdx.x)\r
{\r
Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1);\r
Ncv8u width;\r
Ncv8u height;\r
__host__ __device__ NcvRect8u() : x(0), y(0), width(0), height(0) {};\r
- __host__ __device__ NcvRect8u(Ncv8u x, Ncv8u y, Ncv8u width, Ncv8u height) : x(x), y(y), width(width), height(height) {}\r
+ __host__ __device__ NcvRect8u(Ncv8u x_, Ncv8u y_, Ncv8u width_, Ncv8u height_) : x(x_), y(y_), width(width_), height(height_) {}\r
};\r
\r
\r
Ncv32s width; ///< Rectangle width.\r
Ncv32s height; ///< Rectangle height.\r
__host__ __device__ NcvRect32s() : x(0), y(0), width(0), height(0) {};\r
- __host__ __device__ NcvRect32s(Ncv32s x, Ncv32s y, Ncv32s width, Ncv32s height) : x(x), y(y), width(width), height(height) {}\r
+ __host__ __device__ NcvRect32s(Ncv32s x_, Ncv32s y_, Ncv32s width_, Ncv32s height_)\r
+ : x(x_), y(y_), width(width_), height(height_) {}\r
};\r
\r
\r
Ncv32u width; ///< Rectangle width.\r
Ncv32u height; ///< Rectangle height.\r
__host__ __device__ NcvRect32u() : x(0), y(0), width(0), height(0) {};\r
- __host__ __device__ NcvRect32u(Ncv32u x, Ncv32u y, Ncv32u width, Ncv32u height) : x(x), y(y), width(width), height(height) {}\r
+ __host__ __device__ NcvRect32u(Ncv32u x_, Ncv32u y_, Ncv32u width_, Ncv32u height_)\r
+ : x(x_), y(y_), width(width_), height(height_) {}\r
};\r
\r
\r
Ncv32s width; ///< Rectangle width.\r
Ncv32s height; ///< Rectangle height.\r
__host__ __device__ NcvSize32s() : width(0), height(0) {};\r
- __host__ __device__ NcvSize32s(Ncv32s width, Ncv32s height) : width(width), height(height) {}\r
+ __host__ __device__ NcvSize32s(Ncv32s width_, Ncv32s height_) : width(width_), height(height_) {}\r
};\r
\r
\r
Ncv32u width; ///< Rectangle width.\r
Ncv32u height; ///< Rectangle height.\r
__host__ __device__ NcvSize32u() : width(0), height(0) {};\r
- __host__ __device__ NcvSize32u(Ncv32u width, Ncv32u height) : width(width), height(height) {}\r
+ __host__ __device__ NcvSize32u(Ncv32u width_, Ncv32u height_) : width(width_), height(height_) {}\r
__host__ __device__ bool operator == (const NcvSize32u &another) const {return this->width == another.width && this->height == another.height;}\r
};\r
\r
Ncv32s x; ///< Point X.\r
Ncv32s y; ///< Point Y.\r
__host__ __device__ NcvPoint2D32s() : x(0), y(0) {};\r
- __host__ __device__ NcvPoint2D32s(Ncv32s x, Ncv32s y) : x(x), y(y) {}\r
+ __host__ __device__ NcvPoint2D32s(Ncv32s x_, Ncv32s y_) : x(x_), y(y_) {}\r
};\r
\r
\r
Ncv32u x; ///< Point X.\r
Ncv32u y; ///< Point Y.\r
__host__ __device__ NcvPoint2D32u() : x(0), y(0) {};\r
- __host__ __device__ NcvPoint2D32u(Ncv32u x, Ncv32u y) : x(x), y(y) {}\r
+ __host__ __device__ NcvPoint2D32u(Ncv32u x_, Ncv32u y_) : x(x_), y(y_) {}\r
};\r
\r
\r
\r
public:\r
\r
- NCVVectorAlloc(INCVMemAllocator &allocator, Ncv32u length)\r
+ NCVVectorAlloc(INCVMemAllocator &allocator_, Ncv32u length)\r
:\r
- allocator(allocator)\r
+ allocator(allocator_)\r
{\r
NCVStatus ncvStat;\r
\r
NCVMatrixAlloc& operator=(const NCVMatrixAlloc &);\r
public:\r
\r
- NCVMatrixAlloc(INCVMemAllocator &allocator, Ncv32u width, Ncv32u height, Ncv32u pitch=0)\r
+ NCVMatrixAlloc(INCVMemAllocator &allocator, Ncv32u width, Ncv32u height, Ncv32u _pitch=0)\r
:\r
allocator(allocator)\r
{\r
Ncv32u widthBytes = width * sizeof(T);\r
Ncv32u pitchBytes = alignUp(widthBytes, allocator.alignment());\r
\r
- if (pitch != 0)\r
+ if (_pitch != 0)\r
{\r
- ncvAssertPrintReturn(pitch >= pitchBytes &&\r
- (pitch & (allocator.alignment() - 1)) == 0,\r
+ ncvAssertPrintReturn(_pitch >= pitchBytes &&\r
+ (_pitch & (allocator.alignment() - 1)) == 0,\r
"NCVMatrixAlloc ctor:: incorrect pitch passed", );\r
- pitchBytes = pitch;\r
+ pitchBytes = _pitch;\r
}\r
\r
Ncv32u requiredAllocSize = pitchBytes * height;\r
\r
\r
\r
-#endif // _ncv_hpp_
\ No newline at end of file
+#endif // _ncv_hpp_\r
\r
#ifndef _ncvruntimetemplates_hpp_\r
#define _ncvruntimetemplates_hpp_\r
-#if _MSC_VER >= 1200\r
+#if defined _MSC_VER &&_MSC_VER >= 1200\r
#pragma warning( disable: 4800 )\r
#endif\r
\r
\r
namespace cv { namespace gpu { namespace device \r
{\r
- #if __CUDA_ARCH__ >= 200\r
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 200\r
\r
// for Fermi memory space is detected automatically\r
template <typename T> struct ForceGlob\r
static __device__ __forceinline__ T max() { return numeric_limits<T>::max(); }\r
static __device__ __forceinline__ T half() { return (T)(max()/2 + 1); }\r
};\r
+\r
template<> struct ColorChannel<float>\r
{\r
typedef float worktype_f;\r
template <typename T> static __device__ __forceinline__ void setAlpha(typename TypeVec<T, 3>::vec_type& vec, T val)\r
{\r
}\r
+\r
template <typename T> static __device__ __forceinline__ void setAlpha(typename TypeVec<T, 4>::vec_type& vec, T val)\r
{\r
vec.w = val;\r
}\r
+\r
template <typename T> static __device__ __forceinline__ T getAlpha(const typename TypeVec<T, 3>::vec_type& vec)\r
{\r
return ColorChannel<T>::max();\r
}\r
+\r
template <typename T> static __device__ __forceinline__ T getAlpha(const typename TypeVec<T, 4>::vec_type& vec)\r
{\r
return vec.w;\r
\r
namespace color_detail\r
{\r
- template <typename T, int scn, int dcn, int bidx> struct RGB2RGB : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx> struct RGB2RGB\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
\r
return dst;\r
}\r
+\r
+ __device__ __forceinline__ RGB2RGB()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+\r
+ __device__ __forceinline__ RGB2RGB(const RGB2RGB& other_)\r
+ :unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
\r
template <> struct RGB2RGB<uchar, 4, 4, 2> : unary_function<uint, uint>\r
\r
return dst;\r
}\r
+\r
+ __device__ __forceinline__ RGB2RGB():unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ RGB2RGB(const RGB2RGB& other_):unary_function<uint, uint>(){}\r
};\r
}\r
\r
{\r
return (ushort)(((&src.x)[bidx] >> 3) | ((src.y & ~3) << 3) | (((&src.x)[bidx^2] & ~7) << 8));\r
}\r
+\r
static __device__ __forceinline__ ushort cvt(uint src)\r
{\r
uint b = 0xffu & (src >> (bidx * 8));\r
return (ushort)((b >> 3) | ((g & ~3) << 3) | ((r & ~7) << 8));\r
}\r
};\r
+\r
template<int bidx> struct RGB2RGB5x5Converter<5, bidx>\r
{\r
static __device__ __forceinline__ ushort cvt(const uchar3& src)\r
{\r
return (ushort)(((&src.x)[bidx] >> 3) | ((src.y & ~7) << 2) | (((&src.x)[bidx^2] & ~7) << 7));\r
}\r
+\r
static __device__ __forceinline__ ushort cvt(uint src)\r
{\r
uint b = 0xffu & (src >> (bidx * 8));\r
};\r
\r
template<int scn, int bidx, int green_bits> struct RGB2RGB5x5;\r
+\r
template<int bidx, int green_bits> struct RGB2RGB5x5<3, bidx,green_bits> : unary_function<uchar3, ushort>\r
{\r
__device__ __forceinline__ ushort operator()(const uchar3& src) const\r
{\r
return RGB2RGB5x5Converter<green_bits, bidx>::cvt(src);\r
}\r
+\r
+ __device__ __forceinline__ RGB2RGB5x5():unary_function<uchar3, ushort>(){}\r
+ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5& other_):unary_function<uchar3, ushort>(){}\r
};\r
+\r
template<int bidx, int green_bits> struct RGB2RGB5x5<4, bidx,green_bits> : unary_function<uint, ushort>\r
{\r
__device__ __forceinline__ ushort operator()(uint src) const\r
{\r
return RGB2RGB5x5Converter<green_bits, bidx>::cvt(src);\r
}\r
+\r
+ __device__ __forceinline__ RGB2RGB5x5():unary_function<uint, ushort>(){}\r
+ __device__ __forceinline__ RGB2RGB5x5(const RGB2RGB5x5& other_):unary_function<uint, ushort>(){}\r
};\r
}\r
\r
namespace color_detail\r
{\r
template <int green_bits, int bidx> struct RGB5x52RGBConverter;\r
+\r
template <int bidx> struct RGB5x52RGBConverter<5, bidx>\r
{\r
static __device__ __forceinline__ void cvt(uint src, uchar3& dst)\r
dst.y = (src >> 2) & ~7;\r
(&dst.x)[bidx ^ 2] = (src >> 7) & ~7;\r
}\r
+\r
static __device__ __forceinline__ void cvt(uint src, uint& dst)\r
{\r
dst = 0;\r
dst |= ((src & 0x8000) * 0xffu) << 24;\r
}\r
};\r
+\r
template <int bidx> struct RGB5x52RGBConverter<6, bidx>\r
{\r
static __device__ __forceinline__ void cvt(uint src, uchar3& dst)\r
dst.y = (src >> 3) & ~3;\r
(&dst.x)[bidx ^ 2] = (src >> 8) & ~7;\r
}\r
+\r
static __device__ __forceinline__ void cvt(uint src, uint& dst)\r
{\r
dst = 0xffu << 24;\r
};\r
\r
template <int dcn, int bidx, int green_bits> struct RGB5x52RGB;\r
+\r
template <int bidx, int green_bits> struct RGB5x52RGB<3, bidx, green_bits> : unary_function<ushort, uchar3>\r
{\r
__device__ __forceinline__ uchar3 operator()(ushort src) const\r
RGB5x52RGBConverter<green_bits, bidx>::cvt(src, dst);\r
return dst;\r
}\r
+ __device__ __forceinline__ RGB5x52RGB():unary_function<ushort, uchar3>(){}\r
+ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB& other_):unary_function<ushort, uchar3>(){}\r
+\r
};\r
+\r
template <int bidx, int green_bits> struct RGB5x52RGB<4, bidx, green_bits> : unary_function<ushort, uint>\r
{\r
__device__ __forceinline__ uint operator()(ushort src) const\r
RGB5x52RGBConverter<green_bits, bidx>::cvt(src, dst);\r
return dst;\r
}\r
+ __device__ __forceinline__ RGB5x52RGB():unary_function<ushort, uint>(){}\r
+ __device__ __forceinline__ RGB5x52RGB(const RGB5x52RGB& other_):unary_function<ushort, uint>(){}\r
};\r
}\r
\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ Gray2RGB():unary_function<T, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ Gray2RGB(const Gray2RGB& other_)\r
+ : unary_function<T, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <> struct Gray2RGB<uchar, 4> : unary_function<uchar, uint>\r
{\r
__device__ __forceinline__ uint operator()(uint src) const\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ Gray2RGB():unary_function<uchar, uint>(){}\r
+ __device__ __forceinline__ Gray2RGB(const Gray2RGB& other_):unary_function<uchar, uint>(){}\r
};\r
}\r
\r
return (ushort)((t >> 3) | ((t & ~3) << 3) | ((t & ~7) << 8));\r
}\r
};\r
+\r
template<> struct Gray2RGB5x5Converter<5>\r
{\r
static __device__ __forceinline__ ushort cvt(uint t)\r
{\r
return Gray2RGB5x5Converter<green_bits>::cvt(src);\r
}\r
+\r
+ __device__ __forceinline__ Gray2RGB5x5():unary_function<uchar, ushort>(){}\r
+ __device__ __forceinline__ Gray2RGB5x5(const Gray2RGB5x5& other_):unary_function<uchar, ushort>(){}\r
};\r
}\r
\r
return (uchar)CV_DESCALE(((t << 3) & 0xf8) * B2Y + ((t >> 3) & 0xfc) * G2Y + ((t >> 8) & 0xf8) * R2Y, yuv_shift);\r
}\r
};\r
+\r
template <> struct RGB5x52GrayConverter<5>\r
{\r
static __device__ __forceinline__ uchar cvt(uint t)\r
{\r
return RGB5x52GrayConverter<green_bits>::cvt(src);\r
}\r
+ __device__ __forceinline__ RGB5x52Gray() : unary_function<ushort, uchar>(){}\r
+ __device__ __forceinline__ RGB5x52Gray(const RGB5x52Gray& other_) : unary_function<ushort, uchar>(){}\r
};\r
}\r
\r
{\r
return (T)CV_DESCALE((unsigned)(src[bidx] * B2Y + src[1] * G2Y + src[bidx^2] * R2Y), yuv_shift);\r
}\r
+\r
template <int bidx> static __device__ __forceinline__ uchar RGB2GrayConvert(uint src)\r
{\r
uint b = 0xffu & (src >> (bidx * 8));\r
uint r = 0xffu & (src >> ((bidx ^ 2) * 8));\r
return CV_DESCALE((uint)(b * B2Y + g * G2Y + r * R2Y), yuv_shift);\r
}\r
+\r
template <int bidx> static __device__ __forceinline__ float RGB2GrayConvert(const float* src)\r
{\r
return src[bidx] * 0.114f + src[1] * 0.587f + src[bidx^2] * 0.299f;\r
{\r
return RGB2GrayConvert<bidx>(&src.x);\r
}\r
+ __device__ __forceinline__ RGB2Gray() : unary_function<typename TypeVec<T, scn>::vec_type, T>(){}\r
+ __device__ __forceinline__ RGB2Gray(const RGB2Gray& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, T>(){}\r
};\r
+\r
template <int bidx> struct RGB2Gray<uchar, 4, bidx> : unary_function<uint, uchar>\r
{\r
__device__ __forceinline__ uchar operator()(uint src) const\r
{\r
return RGB2GrayConvert<bidx>(src);\r
}\r
+ __device__ __forceinline__ RGB2Gray() : unary_function<uint, uchar>(){}\r
+ __device__ __forceinline__ RGB2Gray(const RGB2Gray& other_) : unary_function<uint, uchar>(){}\r
};\r
}\r
\r
dst.z = (src[bidx] - dst.x) * c_RGB2YUVCoeffs_f[4] + ColorChannel<float>::half();\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx> struct RGB2YUV : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx> struct RGB2YUV\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
RGB2YUVConvert<bidx>(&src.x, dst);\r
return dst;\r
}\r
+ __device__ __forceinline__ RGB2YUV()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ RGB2YUV(const RGB2YUV& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
}\r
\r
template <int bidx, typename T, typename D> static __device__ void YUV2RGBConvert(const T& src, D* dst)\r
{\r
const int b = src.x + CV_DESCALE((src.z - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift);\r
- const int g = src.x + CV_DESCALE((src.z - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[2] + (src.y - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift);\r
+\r
+ const int g = src.x + CV_DESCALE((src.z - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[2]\r
+ + (src.y - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift);\r
+\r
const int r = src.x + CV_DESCALE((src.y - ColorChannel<D>::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift);\r
\r
dst[bidx] = saturate_cast<D>(b);\r
dst[1] = saturate_cast<D>(g);\r
dst[bidx^2] = saturate_cast<D>(r);\r
}\r
+\r
template <int bidx> static __device__ uint YUV2RGBConvert(uint src)\r
{\r
const int x = 0xff & (src);\r
const int z = 0xff & (src >> 16);\r
\r
const int b = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[3], yuv_shift);\r
- const int g = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[2] + (y - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift);\r
+\r
+ const int g = x + CV_DESCALE((z - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[2]\r
+ + (y - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[1], yuv_shift);\r
+\r
const int r = x + CV_DESCALE((y - ColorChannel<uchar>::half()) * c_YUV2RGBCoeffs_i[0], yuv_shift);\r
\r
uint dst = 0xffu << 24;\r
\r
return dst;\r
}\r
+\r
template <int bidx, typename T> static __device__ __forceinline__ void YUV2RGBConvert(const T& src, float* dst)\r
{\r
dst[bidx] = src.x + (src.z - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[3];\r
- dst[1] = src.x + (src.z - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[2] + (src.y - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[1];\r
+\r
+ dst[1] = src.x + (src.z - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[2]\r
+ + (src.y - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[1];\r
+\r
dst[bidx^2] = src.x + (src.y - ColorChannel<float>::half()) * c_YUV2RGBCoeffs_f[0];\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx> struct YUV2RGB : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx> struct YUV2RGB\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ YUV2RGB()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ YUV2RGB(const YUV2RGB& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <int bidx> struct YUV2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>\r
{\r
__device__ __forceinline__ uint operator ()(uint src) const\r
{\r
return YUV2RGBConvert<bidx>(src);\r
}\r
+ __device__ __forceinline__ YUV2RGB() : unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ YUV2RGB(const YUV2RGB& other_) : unary_function<uint, uint>(){}\r
};\r
}\r
\r
dst.y = saturate_cast<T>(Cr);\r
dst.z = saturate_cast<T>(Cb);\r
}\r
+\r
template <int bidx> static __device__ uint RGB2YCrCbConvert(uint src)\r
{\r
const int delta = ColorChannel<uchar>::half() * (1 << yuv_shift);\r
\r
return dst;\r
}\r
+\r
template <int bidx, typename D> static __device__ __forceinline__ void RGB2YCrCbConvert(const float* src, D& dst)\r
{\r
dst.x = src[0] * c_RGB2YCrCbCoeffs_f[bidx^2] + src[1] * c_RGB2YCrCbCoeffs_f[1] + src[2] * c_RGB2YCrCbCoeffs_f[bidx];\r
dst.z = (src[bidx] - dst.x) * c_RGB2YCrCbCoeffs_f[4] + ColorChannel<float>::half();\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx> struct RGB2YCrCb : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx> struct RGB2YCrCb\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
RGB2YCrCbConvert<bidx>(&src.x, dst);\r
return dst;\r
}\r
+ __device__ __forceinline__ RGB2YCrCb()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <int bidx> struct RGB2YCrCb<uchar, 4, 4, bidx> : unary_function<uint, uint>\r
{\r
__device__ __forceinline__ uint operator ()(uint src) const\r
{\r
return RGB2YCrCbConvert<bidx>(src);\r
}\r
+\r
+ __device__ __forceinline__ RGB2YCrCb() : unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ RGB2YCrCb(const RGB2YCrCb& other_) : unary_function<uint, uint>(){}\r
};\r
}\r
\r
dst[1] = saturate_cast<D>(g);\r
dst[bidx^2] = saturate_cast<D>(r);\r
}\r
+\r
template <int bidx> static __device__ uint YCrCb2RGBConvert(uint src)\r
{\r
const int x = 0xff & (src);\r
\r
return dst;\r
}\r
+\r
template <int bidx, typename T> __device__ __forceinline__ void YCrCb2RGBConvert(const T& src, float* dst)\r
{\r
dst[bidx] = src.x + (src.z - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[3];\r
dst[bidx^2] = src.x + (src.y - ColorChannel<float>::half()) * c_YCrCb2RGBCoeffs_f[0];\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx> struct YCrCb2RGB : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx> struct YCrCb2RGB\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator ()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ YCrCb2RGB()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <int bidx> struct YCrCb2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>\r
{\r
__device__ __forceinline__ uint operator ()(uint src) const\r
{\r
return YCrCb2RGBConvert<bidx>(src);\r
}\r
+ __device__ __forceinline__ YCrCb2RGB() : unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ YCrCb2RGB(const YCrCb2RGB& other_) : unary_function<uint, uint>(){}\r
};\r
}\r
\r
dst.y = saturate_cast<T>(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[3] + src[1] * c_RGB2XYZ_D65i[4] + src[bidx] * c_RGB2XYZ_D65i[5], xyz_shift));\r
dst.z = saturate_cast<T>(CV_DESCALE(src[bidx^2] * c_RGB2XYZ_D65i[6] + src[1] * c_RGB2XYZ_D65i[7] + src[bidx] * c_RGB2XYZ_D65i[8], xyz_shift));\r
}\r
+\r
template <int bidx> static __device__ __forceinline__ uint RGB2XYZConvert(uint src)\r
{\r
const uint b = 0xffu & (src >> (bidx * 8));\r
\r
return dst;\r
}\r
+\r
template <int bidx, typename D> static __device__ __forceinline__ void RGB2XYZConvert(const float* src, D& dst)\r
{\r
dst.x = src[bidx^2] * c_RGB2XYZ_D65f[0] + src[1] * c_RGB2XYZ_D65f[1] + src[bidx] * c_RGB2XYZ_D65f[2];\r
dst.z = src[bidx^2] * c_RGB2XYZ_D65f[6] + src[1] * c_RGB2XYZ_D65f[7] + src[bidx] * c_RGB2XYZ_D65f[8];\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx> struct RGB2XYZ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx> struct RGB2XYZ\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ RGB2XYZ()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <int bidx> struct RGB2XYZ<uchar, 4, 4, bidx> : unary_function<uint, uint>\r
{\r
__device__ __forceinline__ uint operator()(uint src) const\r
{\r
return RGB2XYZConvert<bidx>(src);\r
}\r
+ __device__ __forceinline__ RGB2XYZ() : unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ RGB2XYZ(const RGB2XYZ& other_) : unary_function<uint, uint>(){}\r
};\r
}\r
\r
dst[1] = saturate_cast<D>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[3] + src.y * c_XYZ2sRGB_D65i[4] + src.z * c_XYZ2sRGB_D65i[5], xyz_shift));\r
dst[bidx] = saturate_cast<D>(CV_DESCALE(src.x * c_XYZ2sRGB_D65i[6] + src.y * c_XYZ2sRGB_D65i[7] + src.z * c_XYZ2sRGB_D65i[8], xyz_shift));\r
}\r
+\r
template <int bidx> static __device__ __forceinline__ uint XYZ2RGBConvert(uint src)\r
{\r
const int x = 0xff & src;\r
\r
return dst;\r
}\r
+\r
template <int bidx, typename T> static __device__ __forceinline__ void XYZ2RGBConvert(const T& src, float* dst)\r
{\r
dst[bidx^2] = src.x * c_XYZ2sRGB_D65f[0] + src.y * c_XYZ2sRGB_D65f[1] + src.z * c_XYZ2sRGB_D65f[2];\r
dst[bidx] = src.x * c_XYZ2sRGB_D65f[6] + src.y * c_XYZ2sRGB_D65f[7] + src.z * c_XYZ2sRGB_D65f[8];\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx> struct XYZ2RGB : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx> struct XYZ2RGB\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ XYZ2RGB()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <int bidx> struct XYZ2RGB<uchar, 4, 4, bidx> : unary_function<uint, uint>\r
{\r
__device__ __forceinline__ uint operator()(uint src) const\r
{\r
return XYZ2RGBConvert<bidx>(src);\r
}\r
+ __device__ __forceinline__ XYZ2RGB() : unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ XYZ2RGB(const XYZ2RGB& other_) : unary_function<uint, uint>(){}\r
};\r
}\r
\r
dst.y = (uchar)s;\r
dst.z = (uchar)v;\r
}\r
+\r
template <int bidx, int hr> static __device__ uint RGB2HSVConvert(uint src)\r
{\r
const int hsv_shift = 12;\r
\r
return dst;\r
}\r
+\r
template <int bidx, int hr, typename D> static __device__ void RGB2HSVConvert(const float* src, D& dst)\r
{\r
const float hscale = hr * (1.f / 360.f);\r
dst.z = v;\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx, int hr> struct RGB2HSV : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx, int hr> struct RGB2HSV\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ RGB2HSV()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ RGB2HSV(const RGB2HSV& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <int bidx, int hr> struct RGB2HSV<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>\r
{\r
__device__ __forceinline__ uint operator()(uint src) const\r
{\r
return RGB2HSVConvert<bidx, hr>(src);\r
}\r
+ __device__ __forceinline__ RGB2HSV():unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ RGB2HSV(const RGB2HSV& other_):unary_function<uint, uint>(){}\r
};\r
}\r
\r
dst[1] = g;\r
dst[bidx^2] = r;\r
}\r
+\r
template <int bidx, int HR, typename T> static __device__ void HSV2RGBConvert(const T& src, uchar* dst)\r
{\r
float3 buf;\r
dst[1] = saturate_cast<uchar>(buf.y * 255.f);\r
dst[2] = saturate_cast<uchar>(buf.z * 255.f);\r
}\r
+\r
template <int bidx, int hr> static __device__ uint HSV2RGBConvert(uint src)\r
{\r
float3 buf;\r
return dst;\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx, int hr> struct HSV2RGB : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx, int hr> struct HSV2RGB\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ HSV2RGB()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ HSV2RGB(const HSV2RGB& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <int bidx, int hr> struct HSV2RGB<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>\r
{\r
__device__ __forceinline__ uint operator()(uint src) const\r
{\r
return HSV2RGBConvert<bidx, hr>(src);\r
}\r
+ __device__ __forceinline__ HSV2RGB():unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ HSV2RGB(const HSV2RGB& other_):unary_function<uint, uint>(){}\r
};\r
}\r
\r
dst.y = l;\r
dst.z = s;\r
}\r
+\r
template <int bidx, int hr, typename D> static __device__ void RGB2HLSConvert(const uchar* src, D& dst)\r
{\r
float3 buf;\r
dst.y = saturate_cast<uchar>(buf.y*255.f);\r
dst.z = saturate_cast<uchar>(buf.z*255.f);\r
}\r
+\r
template <int bidx, int hr> static __device__ uint RGB2HLSConvert(uint src)\r
{\r
float3 buf;\r
return dst;\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx, int hr> struct RGB2HLS : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx, int hr> struct RGB2HLS\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ RGB2HLS()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ RGB2HLS(const RGB2HLS& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <int bidx, int hr> struct RGB2HLS<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>\r
{\r
__device__ __forceinline__ uint operator()(uint src) const\r
{\r
return RGB2HLSConvert<bidx, hr>(src);\r
}\r
+ __device__ __forceinline__ RGB2HLS() : unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ RGB2HLS(const RGB2HLS& other_) : unary_function<uint, uint>(){}\r
};\r
}\r
\r
dst[1] = g;\r
dst[bidx^2] = r;\r
}\r
+\r
template <int bidx, int hr, typename T> static __device__ void HLS2RGBConvert(const T& src, uchar* dst)\r
{\r
float3 buf;\r
dst[1] = saturate_cast<uchar>(buf.y * 255.f);\r
dst[2] = saturate_cast<uchar>(buf.z * 255.f);\r
}\r
+\r
template <int bidx, int hr> static __device__ uint HLS2RGBConvert(uint src)\r
{\r
float3 buf;\r
return dst;\r
}\r
\r
- template <typename T, int scn, int dcn, int bidx, int hr> struct HLS2RGB : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
+ template <typename T, int scn, int dcn, int bidx, int hr> struct HLS2RGB\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>\r
{\r
__device__ __forceinline__ typename TypeVec<T, dcn>::vec_type operator()(const typename TypeVec<T, scn>::vec_type& src) const\r
{\r
\r
return dst;\r
}\r
+ __device__ __forceinline__ HLS2RGB()\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
+ __device__ __forceinline__ HLS2RGB(const HLS2RGB& other_)\r
+ : unary_function<typename TypeVec<T, scn>::vec_type, typename TypeVec<T, dcn>::vec_type>(){}\r
};\r
+\r
template <int bidx, int hr> struct HLS2RGB<uchar, 4, 4, bidx, hr> : unary_function<uint, uint>\r
{\r
__device__ __forceinline__ uint operator()(uint src) const\r
{\r
return HLS2RGBConvert<bidx, hr>(src);\r
}\r
+ __device__ __forceinline__ HLS2RGB() : unary_function<uint, uint>(){}\r
+ __device__ __forceinline__ HLS2RGB(const HLS2RGB& other_) : unary_function<uint, uint>(){}\r
};\r
}\r
\r
using thrust::binary_function;\r
\r
// Arithmetic Operations\r
-\r
template <typename T> struct plus : binary_function<T, T, T>\r
{\r
- __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a + b;\r
}\r
+ __device__ __forceinline__ plus(const plus& other):binary_function<T,T,T>(){}\r
+ __device__ __forceinline__ plus():binary_function<T,T,T>(){}\r
};\r
+\r
template <typename T> struct minus : binary_function<T, T, T>\r
{\r
- __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a - b;\r
}\r
+ __device__ __forceinline__ minus(const minus& other):binary_function<T,T,T>(){}\r
+ __device__ __forceinline__ minus():binary_function<T,T,T>(){}\r
};\r
+\r
template <typename T> struct multiplies : binary_function<T, T, T>\r
{\r
- __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a * b;\r
}\r
+ __device__ __forceinline__ multiplies(const multiplies& other):binary_function<T,T,T>(){}\r
+ __device__ __forceinline__ multiplies():binary_function<T,T,T>(){}\r
};\r
+\r
template <typename T> struct divides : binary_function<T, T, T>\r
{\r
- __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a / b;\r
}\r
+ __device__ __forceinline__ divides(const divides& other):binary_function<T,T,T>(){}\r
+ __device__ __forceinline__ divides():binary_function<T,T,T>(){}\r
};\r
+\r
template <typename T> struct modulus : binary_function<T, T, T>\r
{\r
- __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a % b;\r
}\r
+ __device__ __forceinline__ modulus(const modulus& other):binary_function<T,T,T>(){}\r
+ __device__ __forceinline__ modulus():binary_function<T,T,T>(){}\r
};\r
+\r
template <typename T> struct negate : unary_function<T, T>\r
{\r
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a) const\r
{\r
return -a;\r
}\r
+ __device__ __forceinline__ negate(const negate& other):unary_function<T,T>(){}\r
+ __device__ __forceinline__ negate():unary_function<T,T>(){}\r
};\r
\r
// Comparison Operations\r
-\r
template <typename T> struct equal_to : binary_function<T, T, bool>\r
{\r
- __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a == b;\r
}\r
+ __device__ __forceinline__ equal_to(const equal_to& other):binary_function<T,T,bool>(){}\r
+ __device__ __forceinline__ equal_to():binary_function<T,T,bool>(){}\r
};\r
+\r
template <typename T> struct not_equal_to : binary_function<T, T, bool>\r
{\r
- __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a != b;\r
}\r
+ __device__ __forceinline__ not_equal_to(const not_equal_to& other):binary_function<T,T,bool>(){}\r
+ __device__ __forceinline__ not_equal_to():binary_function<T,T,bool>(){}\r
};\r
+\r
template <typename T> struct greater : binary_function<T, T, bool>\r
{\r
- __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a > b;\r
}\r
+ __device__ __forceinline__ greater(const greater& other):binary_function<T,T,bool>(){}\r
+ __device__ __forceinline__ greater():binary_function<T,T,bool>(){}\r
};\r
+\r
template <typename T> struct less : binary_function<T, T, bool>\r
{\r
- __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a < b;\r
}\r
+ __device__ __forceinline__ less(const less& other):binary_function<T,T,bool>(){}\r
+ __device__ __forceinline__ less():binary_function<T,T,bool>(){}\r
};\r
+\r
template <typename T> struct greater_equal : binary_function<T, T, bool>\r
{\r
- __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a >= b;\r
}\r
+ __device__ __forceinline__ greater_equal(const greater_equal& other):binary_function<T,T,bool>(){}\r
+ __device__ __forceinline__ greater_equal():binary_function<T,T,bool>(){}\r
};\r
+\r
template <typename T> struct less_equal : binary_function<T, T, bool>\r
{\r
- __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a <= b;\r
}\r
+ __device__ __forceinline__ less_equal(const less_equal& other):binary_function<T,T,bool>(){}\r
+ __device__ __forceinline__ less_equal():binary_function<T,T,bool>(){}\r
};\r
\r
// Logical Operations\r
-\r
template <typename T> struct logical_and : binary_function<T, T, bool>\r
{\r
- __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a && b;\r
}\r
+ __device__ __forceinline__ logical_and(const logical_and& other):binary_function<T,T,bool>(){}\r
+ __device__ __forceinline__ logical_and():binary_function<T,T,bool>(){}\r
};\r
+\r
template <typename T> struct logical_or : binary_function<T, T, bool>\r
{\r
- __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a || b;\r
}\r
+ __device__ __forceinline__ logical_or(const logical_or& other):binary_function<T,T,bool>(){}\r
+ __device__ __forceinline__ logical_or():binary_function<T,T,bool>(){}\r
};\r
+\r
template <typename T> struct logical_not : unary_function<T, bool>\r
{\r
__device__ __forceinline__ bool operator ()(typename TypeTraits<T>::ParameterType a) const\r
{\r
return !a;\r
}\r
+ __device__ __forceinline__ logical_not(const logical_not& other):unary_function<T,bool>(){}\r
+ __device__ __forceinline__ logical_not():unary_function<T,bool>(){}\r
};\r
\r
// Bitwise Operations\r
-\r
template <typename T> struct bit_and : binary_function<T, T, T>\r
{\r
- __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a & b;\r
}\r
+ __device__ __forceinline__ bit_and(const bit_and& other):binary_function<T,T,T>(){}\r
+ __device__ __forceinline__ bit_and():binary_function<T,T,T>(){}\r
};\r
+\r
template <typename T> struct bit_or : binary_function<T, T, T>\r
{\r
- __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a | b;\r
}\r
+ __device__ __forceinline__ bit_or(const bit_or& other):binary_function<T,T,T>(){}\r
+ __device__ __forceinline__ bit_or():binary_function<T,T,T>(){}\r
};\r
+\r
template <typename T> struct bit_xor : binary_function<T, T, T>\r
{\r
- __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a, typename TypeTraits<T>::ParameterType b) const\r
+ __device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType a,\r
+ typename TypeTraits<T>::ParameterType b) const\r
{\r
return a ^ b;\r
}\r
+ __device__ __forceinline__ bit_xor(const bit_xor& other):binary_function<T,T,T>(){}\r
+ __device__ __forceinline__ bit_xor():binary_function<T,T,T>(){}\r
};\r
+\r
template <typename T> struct bit_not : unary_function<T, T>\r
{\r
__device__ __forceinline__ T operator ()(typename TypeTraits<T>::ParameterType v) const \r
{\r
return ~v;\r
}\r
+ __device__ __forceinline__ bit_not(const bit_not& other):unary_function<T,T>(){}\r
+ __device__ __forceinline__ bit_not():unary_function<T,T>(){}\r
};\r
\r
// Generalized Identity Operations\r
-\r
template <typename T> struct identity : unary_function<T, T>\r
{\r
__device__ __forceinline__ typename TypeTraits<T>::ParameterType operator()(typename TypeTraits<T>::ParameterType x) const \r
{\r
return x;\r
}\r
+ __device__ __forceinline__ identity(const identity& other):unary_function<T,T>(){}\r
+ __device__ __forceinline__ identity():unary_function<T,T>(){}\r
};\r
\r
template <typename T1, typename T2> struct project1st : binary_function<T1, T2, T1>\r
{\r
return lhs;\r
}\r
+ __device__ __forceinline__ project1st(const project1st& other):binary_function<T1,T2,T1>(){}\r
+ __device__ __forceinline__ project1st():binary_function<T1,T2,T1>(){}\r
};\r
+\r
template <typename T1, typename T2> struct project2nd : binary_function<T1, T2, T2>\r
{\r
__device__ __forceinline__ typename TypeTraits<T2>::ParameterType operator()(typename TypeTraits<T1>::ParameterType lhs, typename TypeTraits<T2>::ParameterType rhs) const \r
{\r
return rhs;\r
}\r
+ __device__ __forceinline__ project2nd(const project2nd& other):binary_function<T1,T2,T2>(){}\r
+ __device__ __forceinline__ project2nd():binary_function<T1,T2,T2>(){}\r
};\r
\r
// Min/Max Operations\r
template <> struct name<type> : binary_function<type, type, type> \\r
{ \\r
__device__ __forceinline__ type operator()(type lhs, type rhs) const {return op(lhs, rhs);} \\r
+ __device__ __forceinline__ name(const name& other):binary_function<type, type, type>(){}\\r
+ __device__ __forceinline__ name():binary_function<type, type, type>(){}\\r
};\r
\r
template <typename T> struct maximum : binary_function<T, T, T>\r
{\r
return lhs < rhs ? rhs : lhs;\r
}\r
+ __device__ __forceinline__ maximum(const maximum& other):binary_function<T, T, T>(){}\r
+ __device__ __forceinline__ maximum():binary_function<T, T, T>(){}\r
};\r
\r
OPENCV_GPU_IMPLEMENT_MINMAX(maximum, uchar, ::max)\r
{\r
return lhs < rhs ? lhs : rhs;\r
}\r
+ __device__ __forceinline__ minimum(const minimum& other):binary_function<T, T, T>(){}\r
+ __device__ __forceinline__ minimum():binary_function<T, T, T>(){}\r
};\r
\r
OPENCV_GPU_IMPLEMENT_MINMAX(minimum, uchar, ::min)\r
#undef OPENCV_GPU_IMPLEMENT_MINMAX\r
\r
// Math functions\r
-\r
+///bound=========================================\r
#define OPENCV_GPU_IMPLEMENT_UN_FUNCTOR(name, func) \\r
template <typename T> struct name ## _func : unary_function<T, float> \\r
{ \\r
};\r
\r
// Saturate Cast Functor\r
-\r
template <typename T, typename D> struct saturate_cast_func : unary_function<T, D>\r
{\r
__device__ __forceinline__ D operator ()(typename TypeTraits<T>::ParameterType v) const\r
{\r
return saturate_cast<D>(v);\r
}\r
+ __device__ __forceinline__ saturate_cast_func(const saturate_cast_func& other):unary_function<T, D>(){}\r
+ __device__ __forceinline__ saturate_cast_func():unary_function<T, D>(){}\r
};\r
\r
// Threshold Functors\r
-\r
template <typename T> struct thresh_binary_func : unary_function<T, T>\r
{\r
__host__ __device__ __forceinline__ thresh_binary_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}\r
{\r
return (src > thresh) * maxVal;\r
}\r
+ __device__ __forceinline__ thresh_binary_func(const thresh_binary_func& other)\r
+ : unary_function<T, T>(), thresh(other.thresh), maxVal(other.maxVal){}\r
+\r
+ __device__ __forceinline__ thresh_binary_func():unary_function<T, T>(){}\r
\r
const T thresh;\r
const T maxVal;\r
};\r
+\r
template <typename T> struct thresh_binary_inv_func : unary_function<T, T>\r
{\r
__host__ __device__ __forceinline__ thresh_binary_inv_func(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}\r
{\r
return (src <= thresh) * maxVal;\r
}\r
+ __device__ __forceinline__ thresh_binary_inv_func(const thresh_binary_inv_func& other)\r
+ : unary_function<T, T>(), thresh(other.thresh), maxVal(other.maxVal){}\r
+\r
+ __device__ __forceinline__ thresh_binary_inv_func():unary_function<T, T>(){}\r
\r
const T thresh;\r
const T maxVal;\r
};\r
+\r
template <typename T> struct thresh_trunc_func : unary_function<T, T>\r
{\r
explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {}\r
return minimum<T>()(src, thresh);\r
}\r
\r
+ __device__ __forceinline__ thresh_trunc_func(const thresh_trunc_func& other)\r
+ : unary_function<T, T>(), thresh(other.thresh){}\r
+\r
+ __device__ __forceinline__ thresh_trunc_func():unary_function<T, T>(){}\r
+\r
const T thresh;\r
};\r
+\r
template <typename T> struct thresh_to_zero_func : unary_function<T, T>\r
{\r
explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {}\r
{\r
return (src > thresh) * src;\r
}\r
+ __device__ __forceinline__ thresh_to_zero_func(const thresh_to_zero_func& other)\r
+ : unary_function<T, T>(), thresh(other.thresh){}\r
+\r
+ __device__ __forceinline__ thresh_to_zero_func():unary_function<T, T>(){}\r
\r
const T thresh;\r
};\r
+\r
template <typename T> struct thresh_to_zero_inv_func : unary_function<T, T>\r
{\r
explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {}\r
{\r
return (src <= thresh) * src;\r
}\r
+ __device__ __forceinline__ thresh_to_zero_inv_func(const thresh_to_zero_inv_func& other)\r
+ : unary_function<T, T>(), thresh(other.thresh){}\r
+\r
+ __device__ __forceinline__ thresh_to_zero_inv_func():unary_function<T, T>(){}\r
\r
const T thresh;\r
};\r
-\r
+//bound!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ============>\r
// Function Object Adaptors\r
-\r
template <typename Predicate> struct unary_negate : unary_function<typename Predicate::argument_type, bool>\r
{\r
explicit __host__ __device__ __forceinline__ unary_negate(const Predicate& p) : pred(p) {}\r
}\r
template<> __device__ __forceinline__ uchar saturate_cast<uchar>(double v)\r
{\r
- #if __CUDA_ARCH__ >= 130\r
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130\r
int iv = __double2int_rn(v); \r
return saturate_cast<uchar>(iv);\r
#else\r
}\r
template<> __device__ __forceinline__ schar saturate_cast<schar>(double v)\r
{ \r
- #if __CUDA_ARCH__ >= 130\r
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130\r
int iv = __double2int_rn(v); \r
return saturate_cast<schar>(iv);\r
#else\r
}\r
template<> __device__ __forceinline__ ushort saturate_cast<ushort>(double v)\r
{ \r
- #if __CUDA_ARCH__ >= 130\r
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130\r
int iv = __double2int_rn(v); \r
return saturate_cast<ushort>(iv);\r
#else\r
}\r
template<> __device__ __forceinline__ short saturate_cast<short>(double v)\r
{ \r
- #if __CUDA_ARCH__ >= 130\r
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130\r
int iv = __double2int_rn(v); \r
return saturate_cast<short>(iv);\r
#else\r
}\r
template<> __device__ __forceinline__ int saturate_cast<int>(double v) \r
{\r
- #if __CUDA_ARCH__ >= 130 \r
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130\r
return __double2int_rn(v);\r
#else\r
return saturate_cast<int>((float)v);\r
}\r
template<> __device__ __forceinline__ uint saturate_cast<uint>(double v) \r
{ \r
- #if __CUDA_ARCH__ >= 130\r
+ #if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 130\r
return __double2uint_rn(v);\r
#else\r
return saturate_cast<uint>((float)v);\r
}\r
}}}\r
\r
-#endif /* __OPENCV_GPU_SATURATE_CAST_HPP__ */
\ No newline at end of file
+#endif /* __OPENCV_GPU_SATURATE_CAST_HPP__ */\r
namespace cv { namespace gpu { namespace device \r
{\r
template <typename T, typename D, typename UnOp, typename Mask>\r
- static inline void transform(DevMem2D_<T> src, DevMem2D_<D> dst, UnOp op, Mask mask, cudaStream_t stream)\r
+ static inline void transform(DevMem2D_<T> src, DevMem2D_<D> dst, UnOp op, const Mask& mask, cudaStream_t stream)\r
{\r
typedef TransformFunctorTraits<UnOp> ft;\r
transform_detail::TransformDispatcher<VecTraits<T>::cn == 1 && VecTraits<D>::cn == 1 && ft::smart_shift != 1>::call(src, dst, op, mask, stream);\r
}\r
\r
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\r
- static inline void transform(DevMem2D_<T1> src1, DevMem2D_<T2> src2, DevMem2D_<D> dst, BinOp op, Mask mask, cudaStream_t stream)\r
+ static inline void transform(DevMem2D_<T1> src1, DevMem2D_<T2> src2, DevMem2D_<D> dst, BinOp op, const Mask& mask, cudaStream_t stream)\r
{\r
typedef TransformFunctorTraits<BinOp> ft;\r
transform_detail::TransformDispatcher<VecTraits<T1>::cn == 1 && VecTraits<T2>::cn == 1 && VecTraits<D>::cn == 1 && ft::smart_shift != 1>::call(src1, src2, dst, op, mask, stream);\r
struct SingleMask\r
{\r
explicit __host__ __device__ __forceinline__ SingleMask(PtrStepb mask_) : mask(mask_) {}\r
+ __host__ __device__ __forceinline__ SingleMask(const SingleMask& mask_): mask(mask_.mask){}\r
\r
__device__ __forceinline__ bool operator()(int y, int x) const\r
{ \r
\r
struct SingleMaskChannels\r
{\r
- __host__ __device__ __forceinline__ SingleMaskChannels(PtrStepb mask_, int channels_) : mask(mask_), channels(channels_) {}\r
+ __host__ __device__ __forceinline__ SingleMaskChannels(PtrStepb mask_, int channels_)
+ : mask(mask_), channels(channels_) {}\r
+ __host__ __device__ __forceinline__ SingleMaskChannels(const SingleMaskChannels& mask_)\r
+ :mask(mask_.mask), channels(mask_.channels){}\r
\r
__device__ __forceinline__ bool operator()(int y, int x) const\r
{ \r
\r
struct MaskCollection\r
{\r
- explicit __host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_) : maskCollection(maskCollection_) {}\r
+ explicit __host__ __device__ __forceinline__ MaskCollection(PtrStepb* maskCollection_)\r
+ : maskCollection(maskCollection_) {}\r
+\r
+ __device__ __forceinline__ MaskCollection(const MaskCollection& masks_)\r
+ : maskCollection(masks_.maskCollection), curMask(masks_.curMask){}\r
\r
__device__ __forceinline__ void next()\r
{\r
\r
struct WithOutMask\r
{\r
+ __device__ __forceinline__ WithOutMask(){}\r
+ __device__ __forceinline__ WithOutMask(const WithOutMask& mask){}\r
+\r
__device__ __forceinline__ void next() const\r
{\r
}\r
#ifndef __OPENCV_PRECOMP_H__\r
#define __OPENCV_PRECOMP_H__\r
\r
-#if _MSC_VER >= 1200\r
+#if defined _MSC_VER && _MSC_VER >= 1200\r
#pragma warning( disable: 4251 4710 4711 4514 4996 )\r
#endif\r
\r
--- /dev/null
+#ifndef __main_test_nvidia_h__
+#define __main_test_nvidia_h__
+
+#include<string>
+
+enum OutputLevel
+{
+ OutputLevelNone,
+ OutputLevelCompact,
+ OutputLevelFull
+};
+
+bool nvidia_NPPST_Integral_Image(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NPPST_Squared_Integral_Image(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NPPST_RectStdDev(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NPPST_Resize(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NPPST_Vector_Operations(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NPPST_Transpose(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NCV_Vector_Operations(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NCV_Haar_Cascade_Loader(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NCV_Haar_Cascade_Application(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NCV_Hypotheses_Filtration(const std::string& test_data_path, OutputLevel outputLevel);
+bool nvidia_NCV_Visualization(const std::string& test_data_path, OutputLevel outputLevel);
+
+#endif
#include <vector>\r
\r
#include "NCVTest.hpp"\r
-\r
-enum OutputLevel\r
-{\r
- OutputLevelNone,\r
- OutputLevelCompact,\r
- OutputLevelFull\r
-};\r
+#include <main_test_nvidia.h>\r
+//enum OutputLevel\r
+//{\r
+// OutputLevelNone,\r
+// OutputLevelCompact,\r
+// OutputLevelFull\r
+//};\r
\r
class NCVAutoTestLister\r
{\r
#ifndef _ncvtest_hpp_\r
#define _ncvtest_hpp_\r
\r
-#pragma warning( disable : 4201 4408 4127 4100)\r
+#if defined _MSC_VER\r
+# pragma warning( disable : 4201 4408 4127 4100)\r
+#endif\r
\r
#include <string>\r
#include <vector>\r
public:\r
virtual bool executeTest(NCVTestReport &report) = 0;\r
virtual std::string getName() const = 0;\r
+ virtual ~INCVTest(){}\r
};\r
\r
\r
#include "NCVAutoTestLister.hpp"\r
#include "NCVTestSourceProvider.hpp"\r
\r
+#include <main_test_nvidia.h>\r
+\r
static std::string path;\r
\r
+namespace {\r
\r
template <class T_in, class T_out>\r
-void generateIntegralTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T_in> &src,\r
+void generateIntegralTests(NCVAutoTestLister &testLister,\r
+ NCVTestSourceProvider<T_in> &src,\r
Ncv32u maxWidth, Ncv32u maxHeight)\r
{\r
for (Ncv32f _i=1.0; _i<maxWidth; _i*=1.2f)\r
testLister.add(new TestIntegralImage<T_in, T_out>(testName, src, 2, i));\r
}\r
\r
- //test VGA\r
testLister.add(new TestIntegralImage<T_in, T_out>("LinIntImg_VGA", src, 640, 480));\r
-\r
- //TODO: add tests of various resolutions up to 4096x4096\r
}\r
\r
-\r
void generateSquaredIntegralTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv8u> &src,\r
Ncv32u maxWidth, Ncv32u maxHeight)\r
{\r
testLister.add(new TestIntegralImageSquared(testName, src, 32, i));\r
}\r
\r
- //test VGA\r
testLister.add(new TestIntegralImageSquared("SqLinIntImg_VGA", src, 640, 480));\r
-\r
- //TODO: add tests of various resolutions up to 4096x4096\r
}\r
\r
-\r
void generateRectStdDevTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv8u> &src,\r
Ncv32u maxWidth, Ncv32u maxHeight)\r
{\r
testLister.add(new TestRectStdDev(testName, src, i-1, i*2-1, rect, 2.5, true));\r
}\r
\r
- //test VGA\r
testLister.add(new TestRectStdDev("RectStdDev_VGA", src, 640, 480, rect, 1, true));\r
-\r
- //TODO: add tests of various resolutions up to 4096x4096\r
}\r
\r
-\r
template <class T>\r
void generateResizeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T> &src)\r
{\r
- //test VGA\r
for (Ncv32u i=1; i<480; i+=3)\r
{\r
char testName[80];\r
testLister.add(new TestResize<T>(testName, src, 640, 480, i, false));\r
}\r
\r
- //test HD\r
for (Ncv32u i=1; i<1080; i+=5)\r
{\r
char testName[80];\r
testLister.add(new TestResize<T>(testName, src, 1920, 1080, i, true));\r
testLister.add(new TestResize<T>(testName, src, 1920, 1080, i, false));\r
}\r
-\r
- //TODO: add tests of various resolutions up to 4096x4096\r
}\r
\r
-\r
void generateNPPSTVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)\r
{\r
//compaction\r
testLister.add(new TestTranspose<T>("TestTranspose_reg_0", src, 1072, 375));\r
}\r
\r
-\r
template <class T>\r
-void generateDrawRectsTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T> &src, NCVTestSourceProvider<Ncv32u> &src32u,\r
+void generateDrawRectsTests(NCVAutoTestLister &testLister,\r
+ NCVTestSourceProvider<T> &src,\r
+ NCVTestSourceProvider<Ncv32u> &src32u,\r
Ncv32u maxWidth, Ncv32u maxHeight)\r
{\r
for (Ncv32f _i=16.0; _i<maxWidth; _i*=1.1f)\r
\r
//test VGA\r
testLister.add(new TestDrawRects<T>("DrawRects_VGA", src, src32u, 640, 480, 640*480/1000, (T)0xFF));\r
-\r
- //TODO: add tests of various resolutions up to 4096x4096\r
}\r
\r
-\r
void generateVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)\r
{\r
//growth\r
testLister.add(new TestHypothesesGrow("VectorGrow00b", src, 10, 42, 1.2f, 10, 0, 10, 0));\r
}\r
\r
-\r
void generateHypothesesFiltrationTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)\r
{\r
for (Ncv32f _i=1.0; _i<maxLength; _i*=1.1f)\r
testLister.add(new TestHaarCascadeLoader("haarcascade_eye_tree_eyeglasses.xml", path + "haarcascade_eye_tree_eyeglasses.xml"));\r
}\r
\r
-\r
void generateHaarApplicationTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv8u> &src,\r
Ncv32u maxWidth, Ncv32u maxHeight)\r
{\r
\r
static void devNullOutput(const std::string& msg)\r
{\r
-\r
}\r
\r
bool nvidia_NPPST_Integral_Image(const std::string& test_data_path, OutputLevel outputLevel)\r
return testListerII.invoke();\r
}\r
\r
+}\r
+\r
bool nvidia_NPPST_Squared_Integral_Image(const std::string& test_data_path, OutputLevel outputLevel)\r
{\r
path = test_data_path;\r
//\r
//M*/\r
\r
+#ifdef __GNUC__\r
+# pragma GCC diagnostic ignored "-Wmissing-declarations"\r
+#endif\r
+\r
#ifndef __OPENCV_TEST_PRECOMP_HPP__\r
#define __OPENCV_TEST_PRECOMP_HPP__\r
\r
bool useRoi;\r
\r
cv::Mat img;\r
- cv::Mat kernel;\r
\r
virtual void SetUp()\r
{\r
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "precomp.hpp"
-
-namespace {
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////
-// Integral
-
-PARAM_TEST_CASE(Integral, cv::gpu::DeviceInfo, cv::Size, UseRoi)
-{
- cv::gpu::DeviceInfo devInfo;
- cv::Size size;
- bool useRoi;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- size = GET_PARAM(1);
- useRoi = GET_PARAM(2);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(Integral, Accuracy)
-{
- cv::Mat src = randomMat(size, CV_8UC1);
-
- cv::gpu::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_32SC1, useRoi);
- cv::gpu::integral(loadMat(src, useRoi), dst);
-
- cv::Mat dst_gold;
- cv::integral(src, dst_gold, CV_32S);
-
- EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Integral, testing::Combine(
- ALL_DEVICES,
- DIFFERENT_SIZES,
- WHOLE_SUBMAT));
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////
-// HistEven
-
-struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>
-{
- cv::gpu::DeviceInfo devInfo;
-
- virtual void SetUp()
- {
- devInfo = GetParam();
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(HistEven, Accuracy)
-{
- cv::Mat img = readImage("stereobm/aloe-L.png");
- ASSERT_FALSE(img.empty());
-
- cv::Mat hsv;
- cv::cvtColor(img, hsv, CV_BGR2HSV);
-
- int hbins = 30;
- float hranges[] = {0.0f, 180.0f};
-
- std::vector<cv::gpu::GpuMat> srcs;
- cv::gpu::split(loadMat(hsv), srcs);
-
- cv::gpu::GpuMat hist;
- cv::gpu::histEven(srcs[0], hist, hbins, (int)hranges[0], (int)hranges[1]);
-
- cv::MatND histnd;
- int histSize[] = {hbins};
- const float* ranges[] = {hranges};
- int channels[] = {0};
- cv::calcHist(&hsv, 1, channels, cv::Mat(), histnd, 1, histSize, ranges);
-
- cv::Mat hist_gold = histnd;
- hist_gold = hist_gold.t();
- hist_gold.convertTo(hist_gold, CV_32S);
-
- EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, ALL_DEVICES);
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////
-// CalcHist
-
-void calcHistGold(const cv::Mat& src, cv::Mat& hist)
-{
- hist.create(1, 256, CV_32SC1);
- hist.setTo(cv::Scalar::all(0));
-
- int* hist_row = hist.ptr<int>();
- for (int y = 0; y < src.rows; ++y)
- {
- const uchar* src_row = src.ptr(y);
-
- for (int x = 0; x < src.cols; ++x)
- ++hist_row[src_row[x]];
- }
-}
-
-PARAM_TEST_CASE(CalcHist, cv::gpu::DeviceInfo, cv::Size)
-{
- cv::gpu::DeviceInfo devInfo;
-
- cv::Size size;
- cv::Mat src;
- cv::Mat hist_gold;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- size = GET_PARAM(1);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(CalcHist, Accuracy)
-{
- cv::Mat src = randomMat(size, CV_8UC1);
-
- cv::gpu::GpuMat hist;
- cv::gpu::calcHist(loadMat(src), hist);
-
- cv::Mat hist_gold;
- calcHistGold(src, hist_gold);
-
- EXPECT_MAT_NEAR(hist_gold, hist, 0.0);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CalcHist, testing::Combine(
- ALL_DEVICES,
- DIFFERENT_SIZES));
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////
-// EqualizeHist
-
-PARAM_TEST_CASE(EqualizeHist, cv::gpu::DeviceInfo, cv::Size)
-{
- cv::gpu::DeviceInfo devInfo;
- cv::Size size;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- size = GET_PARAM(1);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(EqualizeHist, Accuracy)
-{
- cv::Mat src = randomMat(size, CV_8UC1);
-
- cv::gpu::GpuMat dst;
- cv::gpu::equalizeHist(loadMat(src), dst);
-
- cv::Mat dst_gold;
- cv::equalizeHist(src, dst_gold);
-
- EXPECT_MAT_NEAR(dst_gold, dst, 3.0);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, EqualizeHist, testing::Combine(
- ALL_DEVICES,
- DIFFERENT_SIZES));
-
-////////////////////////////////////////////////////////////////////////
-// ColumnSum
-
-PARAM_TEST_CASE(ColumnSum, cv::gpu::DeviceInfo, cv::Size)
-{
- cv::gpu::DeviceInfo devInfo;
- cv::Size size;
-
- cv::Mat src;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- size = GET_PARAM(1);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(ColumnSum, Accuracy)
-{
- cv::Mat src = randomMat(size, CV_32FC1);
-
- cv::gpu::GpuMat d_dst;
- cv::gpu::columnSum(loadMat(src), d_dst);
-
- cv::Mat dst(d_dst);
-
- for (int j = 0; j < src.cols; ++j)
- {
- float gold = src.at<float>(0, j);
- float res = dst.at<float>(0, j);
- ASSERT_NEAR(res, gold, 1e-5);
- }
-
- for (int i = 1; i < src.rows; ++i)
- {
- for (int j = 0; j < src.cols; ++j)
- {
- float gold = src.at<float>(i, j) += src.at<float>(i - 1, j);
- float res = dst.at<float>(i, j);
- ASSERT_NEAR(res, gold, 1e-5);
- }
- }
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, ColumnSum, testing::Combine(
- ALL_DEVICES,
- DIFFERENT_SIZES));
-
-////////////////////////////////////////////////////////
-// Canny
-
-IMPLEMENT_PARAM_CLASS(AppertureSize, int);
-IMPLEMENT_PARAM_CLASS(L2gradient, bool);
-
-PARAM_TEST_CASE(Canny, cv::gpu::DeviceInfo, AppertureSize, L2gradient, UseRoi)
-{
- cv::gpu::DeviceInfo devInfo;
- int apperture_size;
- bool useL2gradient;
- bool useRoi;
-
- cv::Mat edges_gold;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- apperture_size = GET_PARAM(1);
- useL2gradient = GET_PARAM(2);
- useRoi = GET_PARAM(3);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(Canny, Accuracy)
-{
- cv::Mat img = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);
- ASSERT_FALSE(img.empty());
-
- double low_thresh = 50.0;
- double high_thresh = 100.0;
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "precomp.hpp"\r
+\r
+namespace {\r
+\r
+///////////////////////////////////////////////////////////////////////////////////////////////////////\r
+// Integral\r
+\r
+PARAM_TEST_CASE(Integral, cv::gpu::DeviceInfo, cv::Size, UseRoi)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ cv::Size size;\r
+ bool useRoi;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ size = GET_PARAM(1);\r
+ useRoi = GET_PARAM(2);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(Integral, Accuracy)\r
+{\r
+ cv::Mat src = randomMat(size, CV_8UC1);\r
+\r
+ cv::gpu::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_32SC1, useRoi);\r
+ cv::gpu::integral(loadMat(src, useRoi), dst);\r
+\r
+ cv::Mat dst_gold;\r
+ cv::integral(src, dst_gold, CV_32S);\r
+\r
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.0);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Integral, testing::Combine(\r
+ ALL_DEVICES,\r
+ DIFFERENT_SIZES,\r
+ WHOLE_SUBMAT));\r
+\r
+///////////////////////////////////////////////////////////////////////////////////////////////////////\r
+// HistEven\r
+\r
+struct HistEven : testing::TestWithParam<cv::gpu::DeviceInfo>\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GetParam();\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(HistEven, Accuracy)\r
+{\r
+ cv::Mat img = readImage("stereobm/aloe-L.png");\r
+ ASSERT_FALSE(img.empty());\r
+\r
+ cv::Mat hsv;\r
+ cv::cvtColor(img, hsv, CV_BGR2HSV);\r
+\r
+ int hbins = 30;\r
+ float hranges[] = {0.0f, 180.0f};\r
+\r
+ std::vector<cv::gpu::GpuMat> srcs;\r
+ cv::gpu::split(loadMat(hsv), srcs);\r
+\r
+ cv::gpu::GpuMat hist;\r
+ cv::gpu::histEven(srcs[0], hist, hbins, (int)hranges[0], (int)hranges[1]);\r
+\r
+ cv::MatND histnd;\r
+ int histSize[] = {hbins};\r
+ const float* ranges[] = {hranges};\r
+ int channels[] = {0};\r
+ cv::calcHist(&hsv, 1, channels, cv::Mat(), histnd, 1, histSize, ranges);\r
+\r
+ cv::Mat hist_gold = histnd;\r
+ hist_gold = hist_gold.t();\r
+ hist_gold.convertTo(hist_gold, CV_32S);\r
+\r
+ EXPECT_MAT_NEAR(hist_gold, hist, 0.0);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, HistEven, ALL_DEVICES);\r
+\r
+///////////////////////////////////////////////////////////////////////////////////////////////////////\r
+// CalcHist\r
+\r
+void calcHistGold(const cv::Mat& src, cv::Mat& hist)\r
+{\r
+ hist.create(1, 256, CV_32SC1);\r
+ hist.setTo(cv::Scalar::all(0));\r
+\r
+ int* hist_row = hist.ptr<int>();\r
+ for (int y = 0; y < src.rows; ++y)\r
+ {\r
+ const uchar* src_row = src.ptr(y);\r
+\r
+ for (int x = 0; x < src.cols; ++x)\r
+ ++hist_row[src_row[x]];\r
+ }\r
+}\r
+\r
+PARAM_TEST_CASE(CalcHist, cv::gpu::DeviceInfo, cv::Size)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+\r
+ cv::Size size;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ size = GET_PARAM(1);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(CalcHist, Accuracy)\r
+{\r
+ cv::Mat src = randomMat(size, CV_8UC1);\r
+\r
+ cv::gpu::GpuMat hist;\r
+ cv::gpu::calcHist(loadMat(src), hist);\r
+\r
+ cv::Mat hist_gold;\r
+ calcHistGold(src, hist_gold);\r
+\r
+ EXPECT_MAT_NEAR(hist_gold, hist, 0.0);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CalcHist, testing::Combine(\r
+ ALL_DEVICES,\r
+ DIFFERENT_SIZES));\r
+\r
+///////////////////////////////////////////////////////////////////////////////////////////////////////\r
+// EqualizeHist\r
+\r
+PARAM_TEST_CASE(EqualizeHist, cv::gpu::DeviceInfo, cv::Size)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ cv::Size size;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ size = GET_PARAM(1);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(EqualizeHist, Accuracy)\r
+{\r
+ cv::Mat src = randomMat(size, CV_8UC1);\r
+\r
+ cv::gpu::GpuMat dst;\r
+ cv::gpu::equalizeHist(loadMat(src), dst);\r
+\r
+ cv::Mat dst_gold;\r
+ cv::equalizeHist(src, dst_gold);\r
+\r
+ EXPECT_MAT_NEAR(dst_gold, dst, 3.0);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, EqualizeHist, testing::Combine(\r
+ ALL_DEVICES,\r
+ DIFFERENT_SIZES));\r
+\r
+////////////////////////////////////////////////////////////////////////\r
+// ColumnSum\r
+\r
+PARAM_TEST_CASE(ColumnSum, cv::gpu::DeviceInfo, cv::Size)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ cv::Size size;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ size = GET_PARAM(1);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(ColumnSum, Accuracy)\r
+{\r
+ cv::Mat src = randomMat(size, CV_32FC1);\r
+\r
+ cv::gpu::GpuMat d_dst;\r
+ cv::gpu::columnSum(loadMat(src), d_dst);\r
+\r
+ cv::Mat dst(d_dst);\r
+\r
+ for (int j = 0; j < src.cols; ++j)\r
+ {\r
+ float gold = src.at<float>(0, j);\r
+ float res = dst.at<float>(0, j);\r
+ ASSERT_NEAR(res, gold, 1e-5);\r
+ }\r
+\r
+ for (int i = 1; i < src.rows; ++i)\r
+ {\r
+ for (int j = 0; j < src.cols; ++j)\r
+ {\r
+ float gold = src.at<float>(i, j) += src.at<float>(i - 1, j);\r
+ float res = dst.at<float>(i, j);\r
+ ASSERT_NEAR(res, gold, 1e-5);\r
+ }\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, ColumnSum, testing::Combine(\r
+ ALL_DEVICES,\r
+ DIFFERENT_SIZES));\r
+\r
+////////////////////////////////////////////////////////\r
+// Canny\r
+\r
+IMPLEMENT_PARAM_CLASS(AppertureSize, int);\r
+IMPLEMENT_PARAM_CLASS(L2gradient, bool);\r
+\r
+PARAM_TEST_CASE(Canny, cv::gpu::DeviceInfo, AppertureSize, L2gradient, UseRoi)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ int apperture_size;\r
+ bool useL2gradient;\r
+ bool useRoi;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ apperture_size = GET_PARAM(1);\r
+ useL2gradient = GET_PARAM(2);\r
+ useRoi = GET_PARAM(3);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(Canny, Accuracy)\r
+{\r
+ cv::Mat img = readImage("stereobm/aloe-L.png", cv::IMREAD_GRAYSCALE);\r
+ ASSERT_FALSE(img.empty());\r
+\r
+ double low_thresh = 50.0;\r
+ double high_thresh = 100.0;\r
\r
if (!supportFeature(devInfo, cv::gpu::SHARED_ATOMICS))\r
{\r
try\r
- {
- cv::gpu::GpuMat edges;
+ {\r
+ cv::gpu::GpuMat edges;\r
cv::gpu::Canny(loadMat(img), edges, low_thresh, high_thresh, apperture_size, useL2gradient);\r
}\r
catch (const cv::Exception& e)\r
}\r
}\r
else\r
- {
- cv::gpu::GpuMat edges;
- cv::gpu::Canny(loadMat(img, useRoi), edges, low_thresh, high_thresh, apperture_size, useL2gradient);
-
- cv::Mat edges_gold;
- cv::Canny(img, edges_gold, low_thresh, high_thresh, apperture_size, useL2gradient);
-
- EXPECT_MAT_SIMILAR(edges_gold, edges, 1e-2);
- }
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Canny, testing::Combine(
- ALL_DEVICES,
- testing::Values(AppertureSize(3), AppertureSize(5)),
- testing::Values(L2gradient(false), L2gradient(true)),
- WHOLE_SUBMAT));
-
-////////////////////////////////////////////////////////////////////////////////
-// MeanShift
-
-struct MeanShift : testing::TestWithParam<cv::gpu::DeviceInfo>
-{
- cv::gpu::DeviceInfo devInfo;
-
- cv::Mat img;
-
- int spatialRad;
- int colorRad;
-
- virtual void SetUp()
- {
- devInfo = GetParam();
-
- cv::gpu::setDevice(devInfo.deviceID());
-
- img = readImageType("meanshift/cones.png", CV_8UC4);
- ASSERT_FALSE(img.empty());
-
- spatialRad = 30;
- colorRad = 30;
- }
-};
-
-TEST_P(MeanShift, Filtering)
-{
- cv::Mat img_template;
- if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
- img_template = readImage("meanshift/con_result.png");
- else
- img_template = readImage("meanshift/con_result_CC1X.png");
- ASSERT_FALSE(img_template.empty());
-
- cv::gpu::GpuMat d_dst;
- cv::gpu::meanShiftFiltering(loadMat(img), d_dst, spatialRad, colorRad);
-
- ASSERT_EQ(CV_8UC4, d_dst.type());
-
- cv::Mat dst(d_dst);
-
- cv::Mat result;
- cv::cvtColor(dst, result, CV_BGRA2BGR);
-
- EXPECT_MAT_NEAR(img_template, result, 0.0);
-}
-
-TEST_P(MeanShift, Proc)
-{
- cv::FileStorage fs;
- if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
- fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap.yaml", cv::FileStorage::READ);
- else
- fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap_CC1X.yaml", cv::FileStorage::READ);
- ASSERT_TRUE(fs.isOpened());
-
- cv::Mat spmap_template;
- fs["spmap"] >> spmap_template;
- ASSERT_FALSE(spmap_template.empty());
-
- cv::gpu::GpuMat rmap_filtered;
- cv::gpu::meanShiftFiltering(loadMat(img), rmap_filtered, spatialRad, colorRad);
-
- cv::gpu::GpuMat rmap;
- cv::gpu::GpuMat spmap;
- cv::gpu::meanShiftProc(loadMat(img), rmap, spmap, spatialRad, colorRad);
-
- ASSERT_EQ(CV_8UC4, rmap.type());
-
- EXPECT_MAT_NEAR(rmap_filtered, rmap, 0.0);
- EXPECT_MAT_NEAR(spmap_template, spmap, 0.0);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShift, ALL_DEVICES);
-
-////////////////////////////////////////////////////////////////////////////////
-// MeanShiftSegmentation
-
-IMPLEMENT_PARAM_CLASS(MinSize, int);
-
-PARAM_TEST_CASE(MeanShiftSegmentation, cv::gpu::DeviceInfo, MinSize)
-{
- cv::gpu::DeviceInfo devInfo;
- int minsize;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- minsize = GET_PARAM(1);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(MeanShiftSegmentation, Regression)
-{
- cv::Mat img = readImageType("meanshift/cones.png", CV_8UC4);
- ASSERT_FALSE(img.empty());
-
- std::ostringstream path;
- path << "meanshift/cones_segmented_sp10_sr10_minsize" << minsize;
- if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))
- path << ".png";
- else
- path << "_CC1X.png";
- cv::Mat dst_gold = readImage(path.str());
- ASSERT_FALSE(dst_gold.empty());
-
- cv::Mat dst;
- cv::gpu::meanShiftSegmentation(loadMat(img), dst, 10, 10, minsize);
-
- cv::Mat dst_rgb;
- cv::cvtColor(dst, dst_rgb, CV_BGRA2BGR);
-
- EXPECT_MAT_SIMILAR(dst_gold, dst_rgb, 1e-3);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShiftSegmentation, testing::Combine(
- ALL_DEVICES,
- testing::Values(MinSize(0), MinSize(4), MinSize(20), MinSize(84), MinSize(340), MinSize(1364))));
-
-////////////////////////////////////////////////////////////////////////////
-// Blend
-
-template <typename T>
-void blendLinearGold(const cv::Mat& img1, const cv::Mat& img2, const cv::Mat& weights1, const cv::Mat& weights2, cv::Mat& result_gold)
-{
- result_gold.create(img1.size(), img1.type());
-
- int cn = img1.channels();
-
- for (int y = 0; y < img1.rows; ++y)
- {
- const float* weights1_row = weights1.ptr<float>(y);
- const float* weights2_row = weights2.ptr<float>(y);
- const T* img1_row = img1.ptr<T>(y);
- const T* img2_row = img2.ptr<T>(y);
- T* result_gold_row = result_gold.ptr<T>(y);
-
- for (int x = 0; x < img1.cols * cn; ++x)
- {
- float w1 = weights1_row[x / cn];
- float w2 = weights2_row[x / cn];
- result_gold_row[x] = static_cast<T>((img1_row[x] * w1 + img2_row[x] * w2) / (w1 + w2 + 1e-5f));
- }
- }
-}
-
-PARAM_TEST_CASE(Blend, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)
-{
- cv::gpu::DeviceInfo devInfo;
- cv::Size size;
- int type;
- bool useRoi;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- size = GET_PARAM(1);
- type = GET_PARAM(2);
- useRoi = GET_PARAM(3);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(Blend, Accuracy)
-{
- int depth = CV_MAT_DEPTH(type);
-
- cv::Mat img1 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);
- cv::Mat img2 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);
- cv::Mat weights1 = randomMat(size, CV_32F, 0, 1);
- cv::Mat weights2 = randomMat(size, CV_32F, 0, 1);
-
- cv::gpu::GpuMat result;
- cv::gpu::blendLinear(loadMat(img1, useRoi), loadMat(img2, useRoi), loadMat(weights1, useRoi), loadMat(weights2, useRoi), result);
-
- cv::Mat result_gold;
- if (depth == CV_8U)
- blendLinearGold<uchar>(img1, img2, weights1, weights2, result_gold);
- else
- blendLinearGold<float>(img1, img2, weights1, weights2, result_gold);
-
- EXPECT_MAT_NEAR(result_gold, result, CV_MAT_DEPTH(type) == CV_8U ? 1.0 : 1e-5);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Blend, testing::Combine(
- ALL_DEVICES,
- DIFFERENT_SIZES,
- testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),
- WHOLE_SUBMAT));
-
-////////////////////////////////////////////////////////
-// Convolve
-
-void convolveDFT(const cv::Mat& A, const cv::Mat& B, cv::Mat& C, bool ccorr = false)
-{
- // reallocate the output array if needed
- C.create(std::abs(A.rows - B.rows) + 1, std::abs(A.cols - B.cols) + 1, A.type());
- cv::Size dftSize;
-
- // compute the size of DFT transform
- dftSize.width = cv::getOptimalDFTSize(A.cols + B.cols - 1);
- dftSize.height = cv::getOptimalDFTSize(A.rows + B.rows - 1);
-
- // allocate temporary buffers and initialize them with 0s
- cv::Mat tempA(dftSize, A.type(), cv::Scalar::all(0));
- cv::Mat tempB(dftSize, B.type(), cv::Scalar::all(0));
-
- // copy A and B to the top-left corners of tempA and tempB, respectively
- cv::Mat roiA(tempA, cv::Rect(0, 0, A.cols, A.rows));
- A.copyTo(roiA);
- cv::Mat roiB(tempB, cv::Rect(0, 0, B.cols, B.rows));
- B.copyTo(roiB);
-
- // now transform the padded A & B in-place;
- // use "nonzeroRows" hint for faster processing
- cv::dft(tempA, tempA, 0, A.rows);
- cv::dft(tempB, tempB, 0, B.rows);
-
- // multiply the spectrums;
- // the function handles packed spectrum representations well
- cv::mulSpectrums(tempA, tempB, tempA, 0, ccorr);
-
- // transform the product back from the frequency domain.
- // Even though all the result rows will be non-zero,
- // you need only the first C.rows of them, and thus you
- // pass nonzeroRows == C.rows
- cv::dft(tempA, tempA, cv::DFT_INVERSE + cv::DFT_SCALE, C.rows);
-
- // now copy the result back to C.
- tempA(cv::Rect(0, 0, C.cols, C.rows)).copyTo(C);
-}
-
-IMPLEMENT_PARAM_CLASS(KSize, int);
-IMPLEMENT_PARAM_CLASS(Ccorr, bool);
-
-PARAM_TEST_CASE(Convolve, cv::gpu::DeviceInfo, cv::Size, KSize, Ccorr)
-{
- cv::gpu::DeviceInfo devInfo;
- cv::Size size;
- int ksize;
- bool ccorr;
-
- cv::Mat src;
- cv::Mat kernel;
-
- cv::Mat dst_gold;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- size = GET_PARAM(1);
- ksize = GET_PARAM(2);
- ccorr = GET_PARAM(3);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(Convolve, Accuracy)
-{
- cv::Mat src = randomMat(size, CV_32FC1, 0.0, 100.0);
- cv::Mat kernel = randomMat(cv::Size(ksize, ksize), CV_32FC1, 0.0, 1.0);
-
- cv::gpu::GpuMat dst;
- cv::gpu::convolve(loadMat(src), loadMat(kernel), dst, ccorr);
-
- cv::Mat dst_gold;
- convolveDFT(src, kernel, dst_gold, ccorr);
-
- EXPECT_MAT_NEAR(dst, dst_gold, 1e-1);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Convolve, testing::Combine(
- ALL_DEVICES,
- DIFFERENT_SIZES,
- testing::Values(KSize(3), KSize(7), KSize(11), KSize(17), KSize(19), KSize(23), KSize(45)),
- testing::Values(Ccorr(false), Ccorr(true))));
-
-////////////////////////////////////////////////////////////////////////////////
-// MatchTemplate8U
-
-CV_ENUM(TemplateMethod, cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED, cv::TM_CCORR, cv::TM_CCORR_NORMED, cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)
-#define ALL_TEMPLATE_METHODS testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_SQDIFF_NORMED), TemplateMethod(cv::TM_CCORR), TemplateMethod(cv::TM_CCORR_NORMED), TemplateMethod(cv::TM_CCOEFF), TemplateMethod(cv::TM_CCOEFF_NORMED))
-
-IMPLEMENT_PARAM_CLASS(TemplateSize, cv::Size);
-
-PARAM_TEST_CASE(MatchTemplate8U, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)
-{
- cv::gpu::DeviceInfo devInfo;
- cv::Size size;
- cv::Size templ_size;
- int cn;
- int method;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- size = GET_PARAM(1);
- templ_size = GET_PARAM(2);
- cn = GET_PARAM(3);
- method = GET_PARAM(4);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(MatchTemplate8U, Accuracy)
-{
- cv::Mat image = randomMat(size, CV_MAKETYPE(CV_8U, cn));
- cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_8U, cn));
-
- cv::gpu::GpuMat dst;
- cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);
-
- cv::Mat dst_gold;
- cv::matchTemplate(image, templ, dst_gold, method);
-
- EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate8U, testing::Combine(
- ALL_DEVICES,
- DIFFERENT_SIZES,
- testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
- testing::Values(Channels(1), Channels(3), Channels(4)),
- ALL_TEMPLATE_METHODS));
-
-////////////////////////////////////////////////////////////////////////////////
-// MatchTemplate32F
-
-PARAM_TEST_CASE(MatchTemplate32F, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)
-{
- cv::gpu::DeviceInfo devInfo;
- cv::Size size;
- cv::Size templ_size;
- int cn;
- int method;
-
- int n, m, h, w;
- cv::Mat image, templ;
-
- cv::Mat dst_gold;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- size = GET_PARAM(1);
- templ_size = GET_PARAM(2);
- cn = GET_PARAM(3);
- method = GET_PARAM(4);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(MatchTemplate32F, Regression)
-{
- cv::Mat image = randomMat(size, CV_MAKETYPE(CV_32F, cn));
- cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_32F, cn));
-
- cv::gpu::GpuMat dst;
- cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);
-
- cv::Mat dst_gold;
- cv::matchTemplate(image, templ, dst_gold, method);
-
- EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate32F, testing::Combine(
- ALL_DEVICES,
- DIFFERENT_SIZES,
- testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),
- testing::Values(Channels(1), Channels(3), Channels(4)),
- testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_CCORR))));
-
-////////////////////////////////////////////////////////////////////////////////
-// MatchTemplateBlackSource
-
-PARAM_TEST_CASE(MatchTemplateBlackSource, cv::gpu::DeviceInfo, TemplateMethod)
-{
- cv::gpu::DeviceInfo devInfo;
- int method;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- method = GET_PARAM(1);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(MatchTemplateBlackSource, Accuracy)
-{
- cv::Mat image = readImage("matchtemplate/black.png");
- ASSERT_FALSE(image.empty());
-
- cv::Mat pattern = readImage("matchtemplate/cat.png");
- ASSERT_FALSE(pattern.empty());
-
- cv::gpu::GpuMat d_dst;
- cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, method);
-
- cv::Mat dst(d_dst);
-
- double maxValue;
- cv::Point maxLoc;
- cv::minMaxLoc(dst, NULL, &maxValue, NULL, &maxLoc);
-
- cv::Point maxLocGold = cv::Point(284, 12);
-
- ASSERT_EQ(maxLocGold, maxLoc);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplateBlackSource, testing::Combine(
- ALL_DEVICES,
- testing::Values(TemplateMethod(cv::TM_CCOEFF_NORMED), TemplateMethod(cv::TM_CCORR_NORMED))));
-
-////////////////////////////////////////////////////////////////////////////////
-// MatchTemplate_CCOEF_NORMED
-
-PARAM_TEST_CASE(MatchTemplate_CCOEF_NORMED, cv::gpu::DeviceInfo, std::pair<std::string, std::string>)
-{
- cv::gpu::DeviceInfo devInfo;
- std::string imageName;
- std::string patternName;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- imageName = GET_PARAM(1).first;
- patternName = GET_PARAM(1).second;
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(MatchTemplate_CCOEF_NORMED, Accuracy)
-{
- cv::Mat image = readImage(imageName);
- ASSERT_FALSE(image.empty());
-
- cv::Mat pattern = readImage(patternName);
- ASSERT_FALSE(pattern.empty());
-
- cv::gpu::GpuMat d_dst;
- cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, CV_TM_CCOEFF_NORMED);
-
- cv::Mat dst(d_dst);
-
- cv::Point minLoc, maxLoc;
- double minVal, maxVal;
- cv::minMaxLoc(dst, &minVal, &maxVal, &minLoc, &maxLoc);
-
- cv::Mat dstGold;
- cv::matchTemplate(image, pattern, dstGold, CV_TM_CCOEFF_NORMED);
-
- double minValGold, maxValGold;
- cv::Point minLocGold, maxLocGold;
- cv::minMaxLoc(dstGold, &minValGold, &maxValGold, &minLocGold, &maxLocGold);
-
- ASSERT_EQ(minLocGold, minLoc);
- ASSERT_EQ(maxLocGold, maxLoc);
- ASSERT_LE(maxVal, 1.0);
- ASSERT_GE(minVal, -1.0);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CCOEF_NORMED, testing::Combine(
- ALL_DEVICES,
- testing::Values(std::make_pair(std::string("matchtemplate/source-0.png"), std::string("matchtemplate/target-0.png")))));
-
-////////////////////////////////////////////////////////////////////////////////
-// MatchTemplate_CanFindBigTemplate
-
-struct MatchTemplate_CanFindBigTemplate : testing::TestWithParam<cv::gpu::DeviceInfo>
-{
- cv::gpu::DeviceInfo devInfo;
-
- virtual void SetUp()
- {
- devInfo = GetParam();
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)
-{
- cv::Mat scene = readImage("matchtemplate/scene.jpg");
- ASSERT_FALSE(scene.empty());
-
- cv::Mat templ = readImage("matchtemplate/template.jpg");
- ASSERT_FALSE(templ.empty());
-
- cv::gpu::GpuMat d_result;
- cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, CV_TM_SQDIFF_NORMED);
-
- cv::Mat result(d_result);
-
- double minVal;
- cv::Point minLoc;
- cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);
-
- ASSERT_GE(minVal, 0);
- ASSERT_LT(minVal, 1e-3);
- ASSERT_EQ(344, minLoc.x);
- ASSERT_EQ(0, minLoc.y);
-}
-
-TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF)
-{
- cv::Mat scene = readImage("matchtemplate/scene.jpg");
- ASSERT_FALSE(scene.empty());
-
- cv::Mat templ = readImage("matchtemplate/template.jpg");
- ASSERT_FALSE(templ.empty());
-
- cv::gpu::GpuMat d_result;
- cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, CV_TM_SQDIFF);
-
- cv::Mat result(d_result);
-
- double minVal;
- cv::Point minLoc;
- cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);
-
- ASSERT_GE(minVal, 0);
- ASSERT_EQ(344, minLoc.x);
- ASSERT_EQ(0, minLoc.y);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CanFindBigTemplate, ALL_DEVICES);
-
-////////////////////////////////////////////////////////////////////////////
-// MulSpectrums
-
-CV_FLAGS(DftFlags, 0, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX_OUTPUT, cv::DFT_REAL_OUTPUT)
-
-PARAM_TEST_CASE(MulSpectrums, cv::gpu::DeviceInfo, cv::Size, DftFlags)
-{
- cv::gpu::DeviceInfo devInfo;
- cv::Size size;
- int flag;
-
- cv::Mat a, b;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- size = GET_PARAM(1);
- flag = GET_PARAM(2);
-
- cv::gpu::setDevice(devInfo.deviceID());
-
- a = randomMat(size, CV_32FC2);
- b = randomMat(size, CV_32FC2);
- }
-};
-
-TEST_P(MulSpectrums, Simple)
-{
- cv::gpu::GpuMat c;
- cv::gpu::mulSpectrums(loadMat(a), loadMat(b), c, flag, false);
-
- cv::Mat c_gold;
- cv::mulSpectrums(a, b, c_gold, flag, false);
-
- EXPECT_MAT_NEAR(c_gold, c, 1e-2);
-}
-
-TEST_P(MulSpectrums, Scaled)
-{
- float scale = 1.f / size.area();
-
- cv::gpu::GpuMat c;
- cv::gpu::mulAndScaleSpectrums(loadMat(a), loadMat(b), c, flag, scale, false);
-
- cv::Mat c_gold;
- cv::mulSpectrums(a, b, c_gold, flag, false);
- c_gold.convertTo(c_gold, c_gold.type(), scale);
-
- EXPECT_MAT_NEAR(c_gold, c, 1e-2);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MulSpectrums, testing::Combine(
- ALL_DEVICES,
- DIFFERENT_SIZES,
- testing::Values(DftFlags(0), DftFlags(cv::DFT_ROWS))));
-
-////////////////////////////////////////////////////////////////////////////
-// Dft
-
-struct Dft : testing::TestWithParam<cv::gpu::DeviceInfo>
-{
- cv::gpu::DeviceInfo devInfo;
-
- virtual void SetUp()
- {
- devInfo = GetParam();
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-void testC2C(const std::string& hint, int cols, int rows, int flags, bool inplace)
-{
- SCOPED_TRACE(hint);
-
- cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC2, 0.0, 10.0);
-
- cv::Mat b_gold;
- cv::dft(a, b_gold, flags);
-
- cv::gpu::GpuMat d_b;
- cv::gpu::GpuMat d_b_data;
- if (inplace)
- {
- d_b_data.create(1, a.size().area(), CV_32FC2);
- d_b = cv::gpu::GpuMat(a.rows, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());
- }
- cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), flags);
-
- EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());
- ASSERT_EQ(CV_32F, d_b.depth());
- ASSERT_EQ(2, d_b.channels());
- EXPECT_MAT_NEAR(b_gold, cv::Mat(d_b), rows * cols * 1e-4);
-}
-
-TEST_P(Dft, C2C)
-{
- int cols = randomInt(2, 100);
- int rows = randomInt(2, 100);
-
- for (int i = 0; i < 2; ++i)
- {
- bool inplace = i != 0;
-
- testC2C("no flags", cols, rows, 0, inplace);
- testC2C("no flags 0 1", cols, rows + 1, 0, inplace);
- testC2C("no flags 1 0", cols, rows + 1, 0, inplace);
- testC2C("no flags 1 1", cols + 1, rows, 0, inplace);
- testC2C("DFT_INVERSE", cols, rows, cv::DFT_INVERSE, inplace);
- testC2C("DFT_ROWS", cols, rows, cv::DFT_ROWS, inplace);
- testC2C("single col", 1, rows, 0, inplace);
- testC2C("single row", cols, 1, 0, inplace);
- testC2C("single col inversed", 1, rows, cv::DFT_INVERSE, inplace);
- testC2C("single row inversed", cols, 1, cv::DFT_INVERSE, inplace);
- testC2C("single row DFT_ROWS", cols, 1, cv::DFT_ROWS, inplace);
- testC2C("size 1 2", 1, 2, 0, inplace);
- testC2C("size 2 1", 2, 1, 0, inplace);
- }
-}
-
-void testR2CThenC2R(const std::string& hint, int cols, int rows, bool inplace)
-{
- SCOPED_TRACE(hint);
-
- cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC1, 0.0, 10.0);
-
- cv::gpu::GpuMat d_b, d_c;
- cv::gpu::GpuMat d_b_data, d_c_data;
- if (inplace)
- {
- if (a.cols == 1)
- {
- d_b_data.create(1, (a.rows / 2 + 1) * a.cols, CV_32FC2);
- d_b = cv::gpu::GpuMat(a.rows / 2 + 1, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());
- }
- else
- {
- d_b_data.create(1, a.rows * (a.cols / 2 + 1), CV_32FC2);
- d_b = cv::gpu::GpuMat(a.rows, a.cols / 2 + 1, CV_32FC2, d_b_data.ptr(), (a.cols / 2 + 1) * d_b_data.elemSize());
- }
- d_c_data.create(1, a.size().area(), CV_32F);
- d_c = cv::gpu::GpuMat(a.rows, a.cols, CV_32F, d_c_data.ptr(), a.cols * d_c_data.elemSize());
- }
-
- cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), 0);
- cv::gpu::dft(d_b, d_c, cv::Size(cols, rows), cv::DFT_REAL_OUTPUT | cv::DFT_SCALE);
-
- EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());
- EXPECT_TRUE(!inplace || d_c.ptr() == d_c_data.ptr());
- ASSERT_EQ(CV_32F, d_c.depth());
- ASSERT_EQ(1, d_c.channels());
-
- cv::Mat c(d_c);
- EXPECT_MAT_NEAR(a, c, rows * cols * 1e-5);
-}
-
-TEST_P(Dft, R2CThenC2R)
-{
- int cols = randomInt(2, 100);
- int rows = randomInt(2, 100);
-
- testR2CThenC2R("sanity", cols, rows, false);
- testR2CThenC2R("sanity 0 1", cols, rows + 1, false);
- testR2CThenC2R("sanity 1 0", cols + 1, rows, false);
- testR2CThenC2R("sanity 1 1", cols + 1, rows + 1, false);
- testR2CThenC2R("single col", 1, rows, false);
- testR2CThenC2R("single col 1", 1, rows + 1, false);
- testR2CThenC2R("single row", cols, 1, false);
- testR2CThenC2R("single row 1", cols + 1, 1, false);
-
- testR2CThenC2R("sanity", cols, rows, true);
- testR2CThenC2R("sanity 0 1", cols, rows + 1, true);
- testR2CThenC2R("sanity 1 0", cols + 1, rows, true);
- testR2CThenC2R("sanity 1 1", cols + 1, rows + 1, true);
- testR2CThenC2R("single row", cols, 1, true);
- testR2CThenC2R("single row 1", cols + 1, 1, true);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Dft, ALL_DEVICES);
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////
-// CornerHarris
-
-IMPLEMENT_PARAM_CLASS(BlockSize, int);
-IMPLEMENT_PARAM_CLASS(ApertureSize, int);
-
-PARAM_TEST_CASE(CornerHarris, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)
-{
- cv::gpu::DeviceInfo devInfo;
- int type;
- int borderType;
- int blockSize;
- int apertureSize;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- type = GET_PARAM(1);
- borderType = GET_PARAM(2);
- blockSize = GET_PARAM(3);
- apertureSize = GET_PARAM(4);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(CornerHarris, Accuracy)
-{
- cv::Mat src = readImageType("stereobm/aloe-L.png", type);
- ASSERT_FALSE(src.empty());
-
- double k = randomDouble(0.1, 0.9);
-
- cv::gpu::GpuMat dst;
- cv::gpu::cornerHarris(loadMat(src), dst, blockSize, apertureSize, k, borderType);
-
- cv::Mat dst_gold;
- cv::cornerHarris(src, dst_gold, blockSize, apertureSize, k, borderType);
-
- EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerHarris, testing::Combine(
- ALL_DEVICES,
- testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
- testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
- testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),
- testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////
-// cornerMinEigen
-
-PARAM_TEST_CASE(CornerMinEigen, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)
-{
- cv::gpu::DeviceInfo devInfo;
- int type;
- int borderType;
- int blockSize;
- int apertureSize;
-
- virtual void SetUp()
- {
- devInfo = GET_PARAM(0);
- type = GET_PARAM(1);
- borderType = GET_PARAM(2);
- blockSize = GET_PARAM(3);
- apertureSize = GET_PARAM(4);
-
- cv::gpu::setDevice(devInfo.deviceID());
- }
-};
-
-TEST_P(CornerMinEigen, Accuracy)
-{
- cv::Mat src = readImageType("stereobm/aloe-L.png", type);
- ASSERT_FALSE(src.empty());
-
- cv::gpu::GpuMat dst;
- cv::gpu::cornerMinEigenVal(loadMat(src), dst, blockSize, apertureSize, borderType);
-
- cv::Mat dst_gold;
- cv::cornerMinEigenVal(src, dst_gold, blockSize, apertureSize, borderType);
-
- EXPECT_MAT_NEAR(dst_gold, dst, 0.02);
-}
-
-INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerMinEigen, testing::Combine(
- ALL_DEVICES,
- testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),
- testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),
- testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),
- testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));
-
-} // namespace
+ {\r
+ cv::gpu::GpuMat edges;\r
+ cv::gpu::Canny(loadMat(img, useRoi), edges, low_thresh, high_thresh, apperture_size, useL2gradient);\r
+\r
+ cv::Mat edges_gold;\r
+ cv::Canny(img, edges_gold, low_thresh, high_thresh, apperture_size, useL2gradient);\r
+\r
+ EXPECT_MAT_SIMILAR(edges_gold, edges, 1e-2);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Canny, testing::Combine(\r
+ ALL_DEVICES,\r
+ testing::Values(AppertureSize(3), AppertureSize(5)),\r
+ testing::Values(L2gradient(false), L2gradient(true)),\r
+ WHOLE_SUBMAT));\r
+\r
+////////////////////////////////////////////////////////////////////////////////\r
+// MeanShift\r
+\r
+struct MeanShift : testing::TestWithParam<cv::gpu::DeviceInfo>\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+\r
+ cv::Mat img;\r
+\r
+ int spatialRad;\r
+ int colorRad;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GetParam();\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ img = readImageType("meanshift/cones.png", CV_8UC4);\r
+ ASSERT_FALSE(img.empty());\r
+\r
+ spatialRad = 30;\r
+ colorRad = 30;\r
+ }\r
+};\r
+\r
+TEST_P(MeanShift, Filtering)\r
+{\r
+ cv::Mat img_template;\r
+ if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))\r
+ img_template = readImage("meanshift/con_result.png");\r
+ else\r
+ img_template = readImage("meanshift/con_result_CC1X.png");\r
+ ASSERT_FALSE(img_template.empty());\r
+\r
+ cv::gpu::GpuMat d_dst;\r
+ cv::gpu::meanShiftFiltering(loadMat(img), d_dst, spatialRad, colorRad);\r
+\r
+ ASSERT_EQ(CV_8UC4, d_dst.type());\r
+\r
+ cv::Mat dst(d_dst);\r
+\r
+ cv::Mat result;\r
+ cv::cvtColor(dst, result, CV_BGRA2BGR);\r
+\r
+ EXPECT_MAT_NEAR(img_template, result, 0.0);\r
+}\r
+\r
+TEST_P(MeanShift, Proc)\r
+{\r
+ cv::FileStorage fs;\r
+ if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))\r
+ fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap.yaml", cv::FileStorage::READ);\r
+ else\r
+ fs.open(std::string(cvtest::TS::ptr()->get_data_path()) + "meanshift/spmap_CC1X.yaml", cv::FileStorage::READ);\r
+ ASSERT_TRUE(fs.isOpened());\r
+\r
+ cv::Mat spmap_template;\r
+ fs["spmap"] >> spmap_template;\r
+ ASSERT_FALSE(spmap_template.empty());\r
+\r
+ cv::gpu::GpuMat rmap_filtered;\r
+ cv::gpu::meanShiftFiltering(loadMat(img), rmap_filtered, spatialRad, colorRad);\r
+\r
+ cv::gpu::GpuMat rmap;\r
+ cv::gpu::GpuMat spmap;\r
+ cv::gpu::meanShiftProc(loadMat(img), rmap, spmap, spatialRad, colorRad);\r
+\r
+ ASSERT_EQ(CV_8UC4, rmap.type());\r
+\r
+ EXPECT_MAT_NEAR(rmap_filtered, rmap, 0.0);\r
+ EXPECT_MAT_NEAR(spmap_template, spmap, 0.0);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShift, ALL_DEVICES);\r
+\r
+////////////////////////////////////////////////////////////////////////////////\r
+// MeanShiftSegmentation\r
+\r
+IMPLEMENT_PARAM_CLASS(MinSize, int);\r
+\r
+PARAM_TEST_CASE(MeanShiftSegmentation, cv::gpu::DeviceInfo, MinSize)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ int minsize;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ minsize = GET_PARAM(1);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(MeanShiftSegmentation, Regression)\r
+{\r
+ cv::Mat img = readImageType("meanshift/cones.png", CV_8UC4);\r
+ ASSERT_FALSE(img.empty());\r
+\r
+ std::ostringstream path;\r
+ path << "meanshift/cones_segmented_sp10_sr10_minsize" << minsize;\r
+ if (supportFeature(devInfo, cv::gpu::FEATURE_SET_COMPUTE_20))\r
+ path << ".png";\r
+ else\r
+ path << "_CC1X.png";\r
+ cv::Mat dst_gold = readImage(path.str());\r
+ ASSERT_FALSE(dst_gold.empty());\r
+\r
+ cv::Mat dst;\r
+ cv::gpu::meanShiftSegmentation(loadMat(img), dst, 10, 10, minsize);\r
+\r
+ cv::Mat dst_rgb;\r
+ cv::cvtColor(dst, dst_rgb, CV_BGRA2BGR);\r
+\r
+ EXPECT_MAT_SIMILAR(dst_gold, dst_rgb, 1e-3);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MeanShiftSegmentation, testing::Combine(\r
+ ALL_DEVICES,\r
+ testing::Values(MinSize(0), MinSize(4), MinSize(20), MinSize(84), MinSize(340), MinSize(1364))));\r
+\r
+////////////////////////////////////////////////////////////////////////////\r
+// Blend\r
+\r
+template <typename T>\r
+void blendLinearGold(const cv::Mat& img1, const cv::Mat& img2, const cv::Mat& weights1, const cv::Mat& weights2, cv::Mat& result_gold)\r
+{\r
+ result_gold.create(img1.size(), img1.type());\r
+\r
+ int cn = img1.channels();\r
+\r
+ for (int y = 0; y < img1.rows; ++y)\r
+ {\r
+ const float* weights1_row = weights1.ptr<float>(y);\r
+ const float* weights2_row = weights2.ptr<float>(y);\r
+ const T* img1_row = img1.ptr<T>(y);\r
+ const T* img2_row = img2.ptr<T>(y);\r
+ T* result_gold_row = result_gold.ptr<T>(y);\r
+\r
+ for (int x = 0; x < img1.cols * cn; ++x)\r
+ {\r
+ float w1 = weights1_row[x / cn];\r
+ float w2 = weights2_row[x / cn];\r
+ result_gold_row[x] = static_cast<T>((img1_row[x] * w1 + img2_row[x] * w2) / (w1 + w2 + 1e-5f));\r
+ }\r
+ }\r
+}\r
+\r
+PARAM_TEST_CASE(Blend, cv::gpu::DeviceInfo, cv::Size, MatType, UseRoi)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ cv::Size size;\r
+ int type;\r
+ bool useRoi;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ size = GET_PARAM(1);\r
+ type = GET_PARAM(2);\r
+ useRoi = GET_PARAM(3);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(Blend, Accuracy)\r
+{\r
+ int depth = CV_MAT_DEPTH(type);\r
+\r
+ cv::Mat img1 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);\r
+ cv::Mat img2 = randomMat(size, type, 0.0, depth == CV_8U ? 255.0 : 1.0);\r
+ cv::Mat weights1 = randomMat(size, CV_32F, 0, 1);\r
+ cv::Mat weights2 = randomMat(size, CV_32F, 0, 1);\r
+\r
+ cv::gpu::GpuMat result;\r
+ cv::gpu::blendLinear(loadMat(img1, useRoi), loadMat(img2, useRoi), loadMat(weights1, useRoi), loadMat(weights2, useRoi), result);\r
+\r
+ cv::Mat result_gold;\r
+ if (depth == CV_8U)\r
+ blendLinearGold<uchar>(img1, img2, weights1, weights2, result_gold);\r
+ else\r
+ blendLinearGold<float>(img1, img2, weights1, weights2, result_gold);\r
+\r
+ EXPECT_MAT_NEAR(result_gold, result, CV_MAT_DEPTH(type) == CV_8U ? 1.0 : 1e-5);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Blend, testing::Combine(\r
+ ALL_DEVICES,\r
+ DIFFERENT_SIZES,\r
+ testing::Values(MatType(CV_8UC1), MatType(CV_8UC3), MatType(CV_8UC4), MatType(CV_32FC1), MatType(CV_32FC3), MatType(CV_32FC4)),\r
+ WHOLE_SUBMAT));\r
+\r
+////////////////////////////////////////////////////////\r
+// Convolve\r
+\r
+void convolveDFT(const cv::Mat& A, const cv::Mat& B, cv::Mat& C, bool ccorr = false)\r
+{\r
+ // reallocate the output array if needed\r
+ C.create(std::abs(A.rows - B.rows) + 1, std::abs(A.cols - B.cols) + 1, A.type());\r
+ cv::Size dftSize;\r
+\r
+ // compute the size of DFT transform\r
+ dftSize.width = cv::getOptimalDFTSize(A.cols + B.cols - 1);\r
+ dftSize.height = cv::getOptimalDFTSize(A.rows + B.rows - 1);\r
+\r
+ // allocate temporary buffers and initialize them with 0s\r
+ cv::Mat tempA(dftSize, A.type(), cv::Scalar::all(0));\r
+ cv::Mat tempB(dftSize, B.type(), cv::Scalar::all(0));\r
+\r
+ // copy A and B to the top-left corners of tempA and tempB, respectively\r
+ cv::Mat roiA(tempA, cv::Rect(0, 0, A.cols, A.rows));\r
+ A.copyTo(roiA);\r
+ cv::Mat roiB(tempB, cv::Rect(0, 0, B.cols, B.rows));\r
+ B.copyTo(roiB);\r
+\r
+ // now transform the padded A & B in-place;\r
+ // use "nonzeroRows" hint for faster processing\r
+ cv::dft(tempA, tempA, 0, A.rows);\r
+ cv::dft(tempB, tempB, 0, B.rows);\r
+\r
+ // multiply the spectrums;\r
+ // the function handles packed spectrum representations well\r
+ cv::mulSpectrums(tempA, tempB, tempA, 0, ccorr);\r
+\r
+ // transform the product back from the frequency domain.\r
+ // Even though all the result rows will be non-zero,\r
+ // you need only the first C.rows of them, and thus you\r
+ // pass nonzeroRows == C.rows\r
+ cv::dft(tempA, tempA, cv::DFT_INVERSE + cv::DFT_SCALE, C.rows);\r
+\r
+ // now copy the result back to C.\r
+ tempA(cv::Rect(0, 0, C.cols, C.rows)).copyTo(C);\r
+}\r
+\r
+IMPLEMENT_PARAM_CLASS(KSize, int);\r
+IMPLEMENT_PARAM_CLASS(Ccorr, bool);\r
+\r
+PARAM_TEST_CASE(Convolve, cv::gpu::DeviceInfo, cv::Size, KSize, Ccorr)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ cv::Size size;\r
+ int ksize;\r
+ bool ccorr;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ size = GET_PARAM(1);\r
+ ksize = GET_PARAM(2);\r
+ ccorr = GET_PARAM(3);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(Convolve, Accuracy)\r
+{\r
+ cv::Mat src = randomMat(size, CV_32FC1, 0.0, 100.0);\r
+ cv::Mat kernel = randomMat(cv::Size(ksize, ksize), CV_32FC1, 0.0, 1.0);\r
+\r
+ cv::gpu::GpuMat dst;\r
+ cv::gpu::convolve(loadMat(src), loadMat(kernel), dst, ccorr);\r
+\r
+ cv::Mat dst_gold;\r
+ convolveDFT(src, kernel, dst_gold, ccorr);\r
+\r
+ EXPECT_MAT_NEAR(dst, dst_gold, 1e-1);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Convolve, testing::Combine(\r
+ ALL_DEVICES,\r
+ DIFFERENT_SIZES,\r
+ testing::Values(KSize(3), KSize(7), KSize(11), KSize(17), KSize(19), KSize(23), KSize(45)),\r
+ testing::Values(Ccorr(false), Ccorr(true))));\r
+\r
+////////////////////////////////////////////////////////////////////////////////\r
+// MatchTemplate8U\r
+\r
+CV_ENUM(TemplateMethod, cv::TM_SQDIFF, cv::TM_SQDIFF_NORMED, cv::TM_CCORR, cv::TM_CCORR_NORMED, cv::TM_CCOEFF, cv::TM_CCOEFF_NORMED)\r
+#define ALL_TEMPLATE_METHODS testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_SQDIFF_NORMED), TemplateMethod(cv::TM_CCORR), TemplateMethod(cv::TM_CCORR_NORMED), TemplateMethod(cv::TM_CCOEFF), TemplateMethod(cv::TM_CCOEFF_NORMED))\r
+\r
+IMPLEMENT_PARAM_CLASS(TemplateSize, cv::Size);\r
+\r
+PARAM_TEST_CASE(MatchTemplate8U, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ cv::Size size;\r
+ cv::Size templ_size;\r
+ int cn;\r
+ int method;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ size = GET_PARAM(1);\r
+ templ_size = GET_PARAM(2);\r
+ cn = GET_PARAM(3);\r
+ method = GET_PARAM(4);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(MatchTemplate8U, Accuracy)\r
+{\r
+ cv::Mat image = randomMat(size, CV_MAKETYPE(CV_8U, cn));\r
+ cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_8U, cn));\r
+\r
+ cv::gpu::GpuMat dst;\r
+ cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);\r
+\r
+ cv::Mat dst_gold;\r
+ cv::matchTemplate(image, templ, dst_gold, method);\r
+\r
+ EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate8U, testing::Combine(\r
+ ALL_DEVICES,\r
+ DIFFERENT_SIZES,\r
+ testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),\r
+ testing::Values(Channels(1), Channels(3), Channels(4)),\r
+ ALL_TEMPLATE_METHODS));\r
+\r
+////////////////////////////////////////////////////////////////////////////////\r
+// MatchTemplate32F\r
+\r
+PARAM_TEST_CASE(MatchTemplate32F, cv::gpu::DeviceInfo, cv::Size, TemplateSize, Channels, TemplateMethod)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ cv::Size size;\r
+ cv::Size templ_size;\r
+ int cn;\r
+ int method;\r
+\r
+ int n, m, h, w;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ size = GET_PARAM(1);\r
+ templ_size = GET_PARAM(2);\r
+ cn = GET_PARAM(3);\r
+ method = GET_PARAM(4);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(MatchTemplate32F, Regression)\r
+{\r
+ cv::Mat image = randomMat(size, CV_MAKETYPE(CV_32F, cn));\r
+ cv::Mat templ = randomMat(templ_size, CV_MAKETYPE(CV_32F, cn));\r
+\r
+ cv::gpu::GpuMat dst;\r
+ cv::gpu::matchTemplate(loadMat(image), loadMat(templ), dst, method);\r
+\r
+ cv::Mat dst_gold;\r
+ cv::matchTemplate(image, templ, dst_gold, method);\r
+\r
+ EXPECT_MAT_NEAR(dst_gold, dst, templ_size.area() * 1e-1);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate32F, testing::Combine(\r
+ ALL_DEVICES,\r
+ DIFFERENT_SIZES,\r
+ testing::Values(TemplateSize(cv::Size(5, 5)), TemplateSize(cv::Size(16, 16)), TemplateSize(cv::Size(30, 30))),\r
+ testing::Values(Channels(1), Channels(3), Channels(4)),\r
+ testing::Values(TemplateMethod(cv::TM_SQDIFF), TemplateMethod(cv::TM_CCORR))));\r
+\r
+////////////////////////////////////////////////////////////////////////////////\r
+// MatchTemplateBlackSource\r
+\r
+PARAM_TEST_CASE(MatchTemplateBlackSource, cv::gpu::DeviceInfo, TemplateMethod)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ int method;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ method = GET_PARAM(1);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(MatchTemplateBlackSource, Accuracy)\r
+{\r
+ cv::Mat image = readImage("matchtemplate/black.png");\r
+ ASSERT_FALSE(image.empty());\r
+\r
+ cv::Mat pattern = readImage("matchtemplate/cat.png");\r
+ ASSERT_FALSE(pattern.empty());\r
+\r
+ cv::gpu::GpuMat d_dst;\r
+ cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, method);\r
+\r
+ cv::Mat dst(d_dst);\r
+\r
+ double maxValue;\r
+ cv::Point maxLoc;\r
+ cv::minMaxLoc(dst, NULL, &maxValue, NULL, &maxLoc);\r
+\r
+ cv::Point maxLocGold = cv::Point(284, 12);\r
+\r
+ ASSERT_EQ(maxLocGold, maxLoc);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplateBlackSource, testing::Combine(\r
+ ALL_DEVICES,\r
+ testing::Values(TemplateMethod(cv::TM_CCOEFF_NORMED), TemplateMethod(cv::TM_CCORR_NORMED))));\r
+\r
+////////////////////////////////////////////////////////////////////////////////\r
+// MatchTemplate_CCOEF_NORMED\r
+\r
+PARAM_TEST_CASE(MatchTemplate_CCOEF_NORMED, cv::gpu::DeviceInfo, std::pair<std::string, std::string>)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ std::string imageName;\r
+ std::string patternName;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ imageName = GET_PARAM(1).first;\r
+ patternName = GET_PARAM(1).second;\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(MatchTemplate_CCOEF_NORMED, Accuracy)\r
+{\r
+ cv::Mat image = readImage(imageName);\r
+ ASSERT_FALSE(image.empty());\r
+\r
+ cv::Mat pattern = readImage(patternName);\r
+ ASSERT_FALSE(pattern.empty());\r
+\r
+ cv::gpu::GpuMat d_dst;\r
+ cv::gpu::matchTemplate(loadMat(image), loadMat(pattern), d_dst, CV_TM_CCOEFF_NORMED);\r
+\r
+ cv::Mat dst(d_dst);\r
+\r
+ cv::Point minLoc, maxLoc;\r
+ double minVal, maxVal;\r
+ cv::minMaxLoc(dst, &minVal, &maxVal, &minLoc, &maxLoc);\r
+\r
+ cv::Mat dstGold;\r
+ cv::matchTemplate(image, pattern, dstGold, CV_TM_CCOEFF_NORMED);\r
+\r
+ double minValGold, maxValGold;\r
+ cv::Point minLocGold, maxLocGold;\r
+ cv::minMaxLoc(dstGold, &minValGold, &maxValGold, &minLocGold, &maxLocGold);\r
+\r
+ ASSERT_EQ(minLocGold, minLoc);\r
+ ASSERT_EQ(maxLocGold, maxLoc);\r
+ ASSERT_LE(maxVal, 1.0);\r
+ ASSERT_GE(minVal, -1.0);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CCOEF_NORMED, testing::Combine(\r
+ ALL_DEVICES,\r
+ testing::Values(std::make_pair(std::string("matchtemplate/source-0.png"), std::string("matchtemplate/target-0.png")))));\r
+\r
+////////////////////////////////////////////////////////////////////////////////\r
+// MatchTemplate_CanFindBigTemplate\r
+\r
+struct MatchTemplate_CanFindBigTemplate : testing::TestWithParam<cv::gpu::DeviceInfo>\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GetParam();\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF_NORMED)\r
+{\r
+ cv::Mat scene = readImage("matchtemplate/scene.jpg");\r
+ ASSERT_FALSE(scene.empty());\r
+\r
+ cv::Mat templ = readImage("matchtemplate/template.jpg");\r
+ ASSERT_FALSE(templ.empty());\r
+\r
+ cv::gpu::GpuMat d_result;\r
+ cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, CV_TM_SQDIFF_NORMED);\r
+\r
+ cv::Mat result(d_result);\r
+\r
+ double minVal;\r
+ cv::Point minLoc;\r
+ cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);\r
+\r
+ ASSERT_GE(minVal, 0);\r
+ ASSERT_LT(minVal, 1e-3);\r
+ ASSERT_EQ(344, minLoc.x);\r
+ ASSERT_EQ(0, minLoc.y);\r
+}\r
+\r
+TEST_P(MatchTemplate_CanFindBigTemplate, SQDIFF)\r
+{\r
+ cv::Mat scene = readImage("matchtemplate/scene.jpg");\r
+ ASSERT_FALSE(scene.empty());\r
+\r
+ cv::Mat templ = readImage("matchtemplate/template.jpg");\r
+ ASSERT_FALSE(templ.empty());\r
+\r
+ cv::gpu::GpuMat d_result;\r
+ cv::gpu::matchTemplate(loadMat(scene), loadMat(templ), d_result, CV_TM_SQDIFF);\r
+\r
+ cv::Mat result(d_result);\r
+\r
+ double minVal;\r
+ cv::Point minLoc;\r
+ cv::minMaxLoc(result, &minVal, 0, &minLoc, 0);\r
+\r
+ ASSERT_GE(minVal, 0);\r
+ ASSERT_EQ(344, minLoc.x);\r
+ ASSERT_EQ(0, minLoc.y);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MatchTemplate_CanFindBigTemplate, ALL_DEVICES);\r
+\r
+////////////////////////////////////////////////////////////////////////////\r
+// MulSpectrums\r
+\r
+CV_FLAGS(DftFlags, 0, cv::DFT_INVERSE, cv::DFT_SCALE, cv::DFT_ROWS, cv::DFT_COMPLEX_OUTPUT, cv::DFT_REAL_OUTPUT)\r
+\r
+PARAM_TEST_CASE(MulSpectrums, cv::gpu::DeviceInfo, cv::Size, DftFlags)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ cv::Size size;\r
+ int flag;\r
+\r
+ cv::Mat a, b;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ size = GET_PARAM(1);\r
+ flag = GET_PARAM(2);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ a = randomMat(size, CV_32FC2);\r
+ b = randomMat(size, CV_32FC2);\r
+ }\r
+};\r
+\r
+TEST_P(MulSpectrums, Simple)\r
+{\r
+ cv::gpu::GpuMat c;\r
+ cv::gpu::mulSpectrums(loadMat(a), loadMat(b), c, flag, false);\r
+\r
+ cv::Mat c_gold;\r
+ cv::mulSpectrums(a, b, c_gold, flag, false);\r
+\r
+ EXPECT_MAT_NEAR(c_gold, c, 1e-2);\r
+}\r
+\r
+TEST_P(MulSpectrums, Scaled)\r
+{\r
+ float scale = 1.f / size.area();\r
+\r
+ cv::gpu::GpuMat c;\r
+ cv::gpu::mulAndScaleSpectrums(loadMat(a), loadMat(b), c, flag, scale, false);\r
+\r
+ cv::Mat c_gold;\r
+ cv::mulSpectrums(a, b, c_gold, flag, false);\r
+ c_gold.convertTo(c_gold, c_gold.type(), scale);\r
+\r
+ EXPECT_MAT_NEAR(c_gold, c, 1e-2);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, MulSpectrums, testing::Combine(\r
+ ALL_DEVICES,\r
+ DIFFERENT_SIZES,\r
+ testing::Values(DftFlags(0), DftFlags(cv::DFT_ROWS))));\r
+\r
+////////////////////////////////////////////////////////////////////////////\r
+// Dft\r
+\r
+struct Dft : testing::TestWithParam<cv::gpu::DeviceInfo>\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GetParam();\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+void testC2C(const std::string& hint, int cols, int rows, int flags, bool inplace)\r
+{\r
+ SCOPED_TRACE(hint);\r
+\r
+ cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC2, 0.0, 10.0);\r
+\r
+ cv::Mat b_gold;\r
+ cv::dft(a, b_gold, flags);\r
+\r
+ cv::gpu::GpuMat d_b;\r
+ cv::gpu::GpuMat d_b_data;\r
+ if (inplace)\r
+ {\r
+ d_b_data.create(1, a.size().area(), CV_32FC2);\r
+ d_b = cv::gpu::GpuMat(a.rows, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());\r
+ }\r
+ cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), flags);\r
+\r
+ EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());\r
+ ASSERT_EQ(CV_32F, d_b.depth());\r
+ ASSERT_EQ(2, d_b.channels());\r
+ EXPECT_MAT_NEAR(b_gold, cv::Mat(d_b), rows * cols * 1e-4);\r
+}\r
+\r
+TEST_P(Dft, C2C)\r
+{\r
+ int cols = randomInt(2, 100);\r
+ int rows = randomInt(2, 100);\r
+\r
+ for (int i = 0; i < 2; ++i)\r
+ {\r
+ bool inplace = i != 0;\r
+\r
+ testC2C("no flags", cols, rows, 0, inplace);\r
+ testC2C("no flags 0 1", cols, rows + 1, 0, inplace);\r
+ testC2C("no flags 1 0", cols, rows + 1, 0, inplace);\r
+ testC2C("no flags 1 1", cols + 1, rows, 0, inplace);\r
+ testC2C("DFT_INVERSE", cols, rows, cv::DFT_INVERSE, inplace);\r
+ testC2C("DFT_ROWS", cols, rows, cv::DFT_ROWS, inplace);\r
+ testC2C("single col", 1, rows, 0, inplace);\r
+ testC2C("single row", cols, 1, 0, inplace);\r
+ testC2C("single col inversed", 1, rows, cv::DFT_INVERSE, inplace);\r
+ testC2C("single row inversed", cols, 1, cv::DFT_INVERSE, inplace);\r
+ testC2C("single row DFT_ROWS", cols, 1, cv::DFT_ROWS, inplace);\r
+ testC2C("size 1 2", 1, 2, 0, inplace);\r
+ testC2C("size 2 1", 2, 1, 0, inplace);\r
+ }\r
+}\r
+\r
+void testR2CThenC2R(const std::string& hint, int cols, int rows, bool inplace)\r
+{\r
+ SCOPED_TRACE(hint);\r
+\r
+ cv::Mat a = randomMat(cv::Size(cols, rows), CV_32FC1, 0.0, 10.0);\r
+\r
+ cv::gpu::GpuMat d_b, d_c;\r
+ cv::gpu::GpuMat d_b_data, d_c_data;\r
+ if (inplace)\r
+ {\r
+ if (a.cols == 1)\r
+ {\r
+ d_b_data.create(1, (a.rows / 2 + 1) * a.cols, CV_32FC2);\r
+ d_b = cv::gpu::GpuMat(a.rows / 2 + 1, a.cols, CV_32FC2, d_b_data.ptr(), a.cols * d_b_data.elemSize());\r
+ }\r
+ else\r
+ {\r
+ d_b_data.create(1, a.rows * (a.cols / 2 + 1), CV_32FC2);\r
+ d_b = cv::gpu::GpuMat(a.rows, a.cols / 2 + 1, CV_32FC2, d_b_data.ptr(), (a.cols / 2 + 1) * d_b_data.elemSize());\r
+ }\r
+ d_c_data.create(1, a.size().area(), CV_32F);\r
+ d_c = cv::gpu::GpuMat(a.rows, a.cols, CV_32F, d_c_data.ptr(), a.cols * d_c_data.elemSize());\r
+ }\r
+\r
+ cv::gpu::dft(loadMat(a), d_b, cv::Size(cols, rows), 0);\r
+ cv::gpu::dft(d_b, d_c, cv::Size(cols, rows), cv::DFT_REAL_OUTPUT | cv::DFT_SCALE);\r
+\r
+ EXPECT_TRUE(!inplace || d_b.ptr() == d_b_data.ptr());\r
+ EXPECT_TRUE(!inplace || d_c.ptr() == d_c_data.ptr());\r
+ ASSERT_EQ(CV_32F, d_c.depth());\r
+ ASSERT_EQ(1, d_c.channels());\r
+\r
+ cv::Mat c(d_c);\r
+ EXPECT_MAT_NEAR(a, c, rows * cols * 1e-5);\r
+}\r
+\r
+TEST_P(Dft, R2CThenC2R)\r
+{\r
+ int cols = randomInt(2, 100);\r
+ int rows = randomInt(2, 100);\r
+\r
+ testR2CThenC2R("sanity", cols, rows, false);\r
+ testR2CThenC2R("sanity 0 1", cols, rows + 1, false);\r
+ testR2CThenC2R("sanity 1 0", cols + 1, rows, false);\r
+ testR2CThenC2R("sanity 1 1", cols + 1, rows + 1, false);\r
+ testR2CThenC2R("single col", 1, rows, false);\r
+ testR2CThenC2R("single col 1", 1, rows + 1, false);\r
+ testR2CThenC2R("single row", cols, 1, false);\r
+ testR2CThenC2R("single row 1", cols + 1, 1, false);\r
+\r
+ testR2CThenC2R("sanity", cols, rows, true);\r
+ testR2CThenC2R("sanity 0 1", cols, rows + 1, true);\r
+ testR2CThenC2R("sanity 1 0", cols + 1, rows, true);\r
+ testR2CThenC2R("sanity 1 1", cols + 1, rows + 1, true);\r
+ testR2CThenC2R("single row", cols, 1, true);\r
+ testR2CThenC2R("single row 1", cols + 1, 1, true);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, Dft, ALL_DEVICES);\r
+\r
+///////////////////////////////////////////////////////////////////////////////////////////////////////\r
+// CornerHarris\r
+\r
+IMPLEMENT_PARAM_CLASS(BlockSize, int);\r
+IMPLEMENT_PARAM_CLASS(ApertureSize, int);\r
+\r
+PARAM_TEST_CASE(CornerHarris, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ int type;\r
+ int borderType;\r
+ int blockSize;\r
+ int apertureSize;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ type = GET_PARAM(1);\r
+ borderType = GET_PARAM(2);\r
+ blockSize = GET_PARAM(3);\r
+ apertureSize = GET_PARAM(4);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(CornerHarris, Accuracy)\r
+{\r
+ cv::Mat src = readImageType("stereobm/aloe-L.png", type);\r
+ ASSERT_FALSE(src.empty());\r
+\r
+ double k = randomDouble(0.1, 0.9);\r
+\r
+ cv::gpu::GpuMat dst;\r
+ cv::gpu::cornerHarris(loadMat(src), dst, blockSize, apertureSize, k, borderType);\r
+\r
+ cv::Mat dst_gold;\r
+ cv::cornerHarris(src, dst_gold, blockSize, apertureSize, k, borderType);\r
+\r
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.02);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerHarris, testing::Combine(\r
+ ALL_DEVICES,\r
+ testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),\r
+ testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),\r
+ testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),\r
+ testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));\r
+\r
+///////////////////////////////////////////////////////////////////////////////////////////////////////\r
+// cornerMinEigen\r
+\r
+PARAM_TEST_CASE(CornerMinEigen, cv::gpu::DeviceInfo, MatType, BorderType, BlockSize, ApertureSize)\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+ int type;\r
+ int borderType;\r
+ int blockSize;\r
+ int apertureSize;\r
+\r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GET_PARAM(0);\r
+ type = GET_PARAM(1);\r
+ borderType = GET_PARAM(2);\r
+ blockSize = GET_PARAM(3);\r
+ apertureSize = GET_PARAM(4);\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+ }\r
+};\r
+\r
+TEST_P(CornerMinEigen, Accuracy)\r
+{\r
+ cv::Mat src = readImageType("stereobm/aloe-L.png", type);\r
+ ASSERT_FALSE(src.empty());\r
+\r
+ cv::gpu::GpuMat dst;\r
+ cv::gpu::cornerMinEigenVal(loadMat(src), dst, blockSize, apertureSize, borderType);\r
+\r
+ cv::Mat dst_gold;\r
+ cv::cornerMinEigenVal(src, dst_gold, blockSize, apertureSize, borderType);\r
+\r
+ EXPECT_MAT_NEAR(dst_gold, dst, 0.02);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(GPU_ImgProc, CornerMinEigen, testing::Combine(\r
+ ALL_DEVICES,\r
+ testing::Values(MatType(CV_8UC1), MatType(CV_32FC1)),\r
+ testing::Values(BorderType(cv::BORDER_REFLECT101), BorderType(cv::BORDER_REPLICATE), BorderType(cv::BORDER_REFLECT)),\r
+ testing::Values(BlockSize(3), BlockSize(5), BlockSize(7)),\r
+ testing::Values(ApertureSize(0), ApertureSize(3), ApertureSize(5), ApertureSize(7))));\r
+\r
+} // namespace\r
//\r
//M*/\r
\r
+#include <main_test_nvidia.h>\r
#include "precomp.hpp"\r
\r
#ifdef HAVE_CUDA\r
using namespace cvtest;\r
using namespace testing;\r
\r
-enum OutputLevel\r
-{\r
- OutputLevelNone,\r
- OutputLevelCompact,\r
- OutputLevelFull\r
-};\r
-\r
-bool nvidia_NPPST_Integral_Image(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NPPST_Squared_Integral_Image(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NPPST_RectStdDev(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NPPST_Resize(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NPPST_Vector_Operations(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NPPST_Transpose(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NCV_Vector_Operations(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NCV_Haar_Cascade_Loader(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NCV_Haar_Cascade_Application(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NCV_Hypotheses_Filtration(const std::string& test_data_path, OutputLevel outputLevel);\r
-bool nvidia_NCV_Visualization(const std::string& test_data_path, OutputLevel outputLevel);\r
+//enum OutputLevel\r
+//{\r
+// OutputLevelNone,\r
+// OutputLevelCompact,\r
+// OutputLevelFull\r
+//};\r
\r
struct NVidiaTest : TestWithParam<cv::gpu::DeviceInfo>\r
{\r
\r
OutputLevel nvidiaTestOutputLevel = OutputLevelCompact;\r
\r
-TEST_P(NPPST, Integral)\r
-{\r
- bool res = nvidia_NPPST_Integral_Image(path, nvidiaTestOutputLevel);\r
+//TEST_P(NPPST, Integral)\r
+//{\r
+// bool res = nvidia_NPPST_Integral_Image(path, nvidiaTestOutputLevel);\r
\r
- ASSERT_TRUE(res);\r
-}\r
+// ASSERT_TRUE(res);\r
+//}\r
\r
TEST_P(NPPST, SquaredIntegral)\r
{\r
}\r
\r
#ifdef DUMP\r
- void dump(const cv::Mat& block_hists, const std::vector<cv::Point>& locations)\r
+ void dump(const cv::Mat& blockHists, const std::vector<cv::Point>& locations)\r
{\r
- f.write((char*)&block_hists.rows, sizeof(block_hists.rows));\r
- f.write((char*)&block_hists.cols, sizeof(block_hists.cols));\r
+ f.write((char*)&blockHists.rows, sizeof(blockHists.rows));\r
+ f.write((char*)&blockHists.cols, sizeof(blockHists.cols));\r
\r
- for (int i = 0; i < block_hists.rows; ++i)\r
+ for (int i = 0; i < blockHists.rows; ++i)\r
{\r
- for (int j = 0; j < block_hists.cols; ++j)\r
+ for (int j = 0; j < blockHists.cols; ++j)\r
{\r
- float val = block_hists.at<float>(i, j);\r
+ float val = blockHists.at<float>(i, j);\r
f.write((char*)&val, sizeof(val));\r
}\r
}\r
f.write((char*)&locations[i], sizeof(locations[i]));\r
}\r
#else\r
- void compare(const cv::Mat& block_hists, const std::vector<cv::Point>& locations)\r
+ void compare(const cv::Mat& blockHists, const std::vector<cv::Point>& locations)\r
{\r
int rows, cols;\r
f.read((char*)&rows, sizeof(rows));\r
f.read((char*)&cols, sizeof(cols));\r
- ASSERT_EQ(rows, block_hists.rows);\r
- ASSERT_EQ(cols, block_hists.cols);\r
+ ASSERT_EQ(rows, blockHists.rows);\r
+ ASSERT_EQ(cols, blockHists.cols);\r
\r
- for (int i = 0; i < block_hists.rows; ++i)\r
+ for (int i = 0; i < blockHists.rows; ++i)\r
{\r
- for (int j = 0; j < block_hists.cols; ++j)\r
+ for (int j = 0; j < blockHists.cols; ++j)\r
{\r
float val;\r
f.read((char*)&val, sizeof(val));\r
- ASSERT_NEAR(val, block_hists.at<float>(i, j), 1e-3);\r
+ ASSERT_NEAR(val, blockHists.at<float>(i, j), 1e-3);\r
}\r
}\r
\r
endif()
include(${QT_USE_FILE})
+ if(QT_INCLUDE_DIR)
+ ocv_include_directories(${QT_INCLUDE_DIR})
+ endif()
+
QT4_ADD_RESOURCES(_RCC_OUTFILES src/window_QT.qrc)
QT4_WRAP_CPP(_MOC_OUTFILES src/window_QT.h)
list(APPEND HIGHGUI_LIBRARIES ${QT_LIBRARIES} ${QT_QTTEST_LIBRARY})
list(APPEND highgui_srcs src/window_QT.cpp ${_MOC_OUTFILES} ${_RCC_OUTFILES} )
+
+ ocv_check_flag_support(CXX -Wno-missing-declarations HAVE_CXX_WNO_MISSING_DECLARATIONS)
+ if(HAVE_CXX_WNO_MISSING_DECLARATIONS)
+ set_source_files_properties(${_RCC_OUTFILES} PROPERTIES COMPILE_FLAGS -Wno-missing-declarations)
+ endif()
elseif(WIN32)
list(APPEND highgui_srcs src/window_w32.cpp)
elseif(HAVE_GTK)
if(IOS)
list(APPEND HIGHGUI_LIBRARIES "-framework ImageIO")
endif()
- #TODO: check if need to link with some framework on OS X: -framework ApplicationServices ??
endif(WITH_IMAGEIO)
if(WITH_AVFOUNDATION)
if(WIN32)
link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only
- include_directories(AFTER "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW
+ include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW
endif()
if(UNIX)
set_target_properties(${the_module} PROPERTIES LINK_INTERFACE_LIBRARIES "")
ocv_add_precompiled_headers(${the_module})
-
-if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated-declarations")
-endif()
+ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated-declarations)
if(WIN32 AND WITH_FFMPEG)
#copy ffmpeg dll to the output folder
CVAPI(void) cvSaveWindowParameters(const char* name);
CVAPI(void) cvLoadWindowParameters(const char* name);
CVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);
-CVAPI(void) cvStopLoop();
+CVAPI(void) cvStopLoop( void );
typedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata);
enum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2};
/* this function is used to set some external parameters in case of X Window */
CVAPI(int) cvInitSystem( int argc, char** argv );
-CVAPI(int) cvStartWindowThread();
+CVAPI(int) cvStartWindowThread( void );
// --------- YV ---------
enum
CV_WND_PROP_AUTOSIZE = 1, //to change/get window's autosize property
CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property
CV_WND_PROP_OPENGL = 3, //to change/get window's opengl support
-
+
//These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty
CV_WINDOW_NORMAL = 0x00000000, //the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
CV_WINDOW_AUTOSIZE = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed
CV_WINDOW_OPENGL = 0x00001000, //window with opengl support
-
+
//Those flags are only for Qt
CV_GUI_EXPANDED = 0x00000000, //status bar and tool bar
CV_GUI_NORMAL = 0x00000010, //old fashious way
-
+
//These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty
CV_WINDOW_FULLSCREEN = 1,//change the window to fullscreen
CV_WINDOW_FREERATIO = 0x00000100,//the image expends as much as it can (no ratio constraint)
CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
CV_CAP_ANDROID =1000, // Android
-
+
CV_CAP_XIAPI =1100, // XIMEA Camera API
-
- CV_CAP_AVFOUNDATION = 1200 // AVFoundation framework for iOS (OS X Lion will have the same API)
+
+ CV_CAP_AVFOUNDATION = 1200 // AVFoundation framework for iOS (OS X Lion will have the same API)
};
/* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
CV_CAP_PROP_TRIGGER_DELAY =25,
CV_CAP_PROP_WHITE_BALANCE_RED_V =26,
CV_CAP_PROP_ZOOM =27,
- CV_CAP_PROP_FOCUS =28,
- CV_CAP_PROP_GUID =29,
- CV_CAP_PROP_ISO_SPEED =30,
+ CV_CAP_PROP_FOCUS =28,
+ CV_CAP_PROP_GUID =29,
+ CV_CAP_PROP_ISO_SPEED =30,
CV_CAP_PROP_MAX_DC1394 =31,
- CV_CAP_PROP_BACKLIGHT =32,
- CV_CAP_PROP_PAN =33,
- CV_CAP_PROP_TILT =34,
- CV_CAP_PROP_ROLL =35,
- CV_CAP_PROP_IRIS =36,
+ CV_CAP_PROP_BACKLIGHT =32,
+ CV_CAP_PROP_PAN =33,
+ CV_CAP_PROP_TILT =34,
+ CV_CAP_PROP_ROLL =35,
+ CV_CAP_PROP_IRIS =36,
CV_CAP_PROP_SETTINGS =37,
CV_CAP_PROP_AUTOGRAB =1024, // property for highgui class CvCapture_Android only
CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
-
+
// Properties of cameras available through GStreamer interface
CV_CAP_GSTREAMER_QUEUE_LENGTH = 200, // default is 1
CV_CAP_PROP_PVAPI_MULTICASTIP = 300, // ip for anable multicast master mode. 0 for disable multicast
-
+
// Properties of cameras available through XIMEA SDK interface
- CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
+ CV_CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
CV_CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
CV_CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
CV_CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
CV_CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
- CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
+ CV_CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
CV_CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
CV_CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
- CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
+ CV_CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
CV_CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
- CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
+ CV_CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
CV_CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
CV_CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
CV_CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
CV_CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
CV_CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
CV_CAP_PROP_XI_TIMEOUT = 420, // Image capture timeout in milliseconds
-
+
// Properties for Android cameras
CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
CVAPI(int) cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
// Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY
-CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
+CVAPI(int) cvGetCaptureDomain( CvCapture* capture);
/* "black box" video file writer structure */
typedef struct CvVideoWriter CvVideoWriter;
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts/ts.hpp"
#include "opencv2/highgui/highgui.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
#include "precomp.hpp"
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4711 )
-#endif
-
#if defined _M_X64 && defined _MSC_VER && !defined CV_ICC
#pragma optimize("",off)
-#pragma warning( disable: 4748 )
+#pragma warning(disable: 4748)
#endif
namespace cv
return capture;
break;
#endif
-
+
#ifdef HAVE_PVAPI
case CV_CAP_PVAPI:
capture = cvCreateCameraCapture_PvAPI (index);
return capture;
break;
#endif
-
+
#ifdef HAVE_XIMEA
case CV_CAP_XIAPI:
capture = cvCreateCameraCapture_XIMEA (index);
if (! result)
result = cvCreateFileCapture_QT (filename);
#endif
-
+
#ifdef HAVE_AVFOUNDATION
if (! result)
result = cvCreateFileCapture_AVFoundation (filename);
if (! result)
result = cvCreateFileCapture_OpenNI (filename);
#endif
-
+
if (! result)
result = cvCreateFileCapture_Images (filename);
CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color )
{
- //CV_FUNCNAME( "cvCreateVideoWriter" );
+ //CV_FUNCNAME( "cvCreateVideoWriter" );
- CvVideoWriter *result = 0;
+ CvVideoWriter *result = 0;
- if(!fourcc || !fps)
- result = cvCreateVideoWriter_Images(filename);
+ if(!fourcc || !fps)
+ result = cvCreateVideoWriter_Images(filename);
- if(!result)
- result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
+ if(!result)
+ result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
-/* #ifdef HAVE_XINE
- if(!result)
- result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
- #endif
+/* #ifdef HAVE_XINE
+ if(!result)
+ result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
+ #endif
*/
-#ifdef HAVE_AVFOUNDATION
+#ifdef HAVE_AVFOUNDATION
if (! result)
result = cvCreateVideoWriter_AVFoundation(filename, fourcc, fps, frameSize, is_color);
#endif
#ifdef HAVE_QUICKTIME
- if(!result)
- result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
+ if(!result)
+ result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
#endif
#ifdef HAVE_GSTREAMER
result = cvCreateVideoWriter_GStreamer(filename, fourcc, fps, frameSize, is_color);
#endif
- if(!result)
- result = cvCreateVideoWriter_Images(filename);
+ if(!result)
+ result = cvCreateVideoWriter_Images(filename);
- return result;
+ return result;
}
CV_IMPL int cvWriteFrame( CvVideoWriter* writer, const IplImage* image )
VideoCapture::VideoCapture()
{}
-
+
VideoCapture::VideoCapture(const string& filename)
{
open(filename);
}
-
+
VideoCapture::VideoCapture(int device)
{
open(device);
{
cap.release();
}
-
+
bool VideoCapture::open(const string& filename)
{
cap = cvCreateFileCapture(filename.c_str());
return isOpened();
}
-
+
bool VideoCapture::open(int device)
{
cap = cvCreateCameraCapture(device);
return isOpened();
}
-
+
bool VideoCapture::isOpened() const { return !cap.empty(); }
-
+
void VideoCapture::release()
{
cap.release();
{
return cvGrabFrame(cap) != 0;
}
-
+
bool VideoCapture::retrieve(Mat& image, int channel)
{
IplImage* _img = cvRetrieveFrame(cap, channel);
image.release();
return !image.empty();
}
-
+
VideoCapture& VideoCapture::operator >> (Mat& image)
{
read(image);
return *this;
}
-
+
bool VideoCapture::set(int propId, double value)
{
return cvSetCaptureProperty(cap, propId, value) != 0;
}
-
+
double VideoCapture::get(int propId)
{
return cvGetCaptureProperty(cap, propId);
VideoWriter::VideoWriter()
{}
-
+
VideoWriter::VideoWriter(const string& filename, int fourcc, double fps, Size frameSize, bool isColor)
{
open(filename, fourcc, fps, frameSize, isColor);
void VideoWriter::release()
{
writer.release();
-}
-
+}
+
VideoWriter::~VideoWriter()
{
release();
}
-
+
bool VideoWriter::open(const string& filename, int fourcc, double fps, Size frameSize, bool isColor)
{
writer = cvCreateVideoWriter(filename.c_str(), fourcc, fps, frameSize, isColor);
bool VideoWriter::isOpened() const
{
return !writer.empty();
-}
+}
void VideoWriter::write(const Mat& image)
{
IplImage _img = image;
cvWriteFrame(writer, &_img);
}
-
+
VideoWriter& VideoWriter::operator << (const Mat& image)
{
write(image);
- return *this;
+ return *this;
}
}
/****************** Capturing video from camera via CMU lib *******************/
-#if HAVE_CMU1394
+#ifdef HAVE_CMU1394
// This firewire capability added by Philip Gruebele (pgruebele@cox.net).
// For this to work you need to install the CMU firewire DCAM drivers,
static dc1394error_t dc1394_deinterlace_stereo_frames_fixed(dc1394video_frame_t *in,
dc1394video_frame_t *out, dc1394stereo_method_t method)
{
- dc1394error_t err;
-
if((in->color_coding == DC1394_COLOR_CODING_RAW16) ||
(in->color_coding == DC1394_COLOR_CODING_MONO16) ||
(in->color_coding == DC1394_COLOR_CODING_YUV422))
{
case DC1394_STEREO_METHOD_INTERLACED:
- err = adaptBufferStereoLocal(in, out);
+ adaptBufferStereoLocal(in, out);
//FIXED by AB:
// dc1394_deinterlace_stereo(in->image, out->image, in->size[0], in->size[1]);
dc1394_deinterlace_stereo(in->image, out->image, out->size[0], out->size[1]);
break;
case DC1394_STEREO_METHOD_FIELD:
- err = adaptBufferStereoLocal(in, out);
+ adaptBufferStereoLocal(in, out);
memcpy(out->image, in->image, out->image_bytes);
break;
}
#include "precomp.hpp"
-#if _MSC_VER >= 100
+#if defined _MSC_VER && _MSC_VER >= 100
+//'sprintf': name was marked as #pragma deprecated
#pragma warning(disable: 4995)
#endif
#include <vector>
//Include Directshow stuff here so we don't worry about needing all the h files.
-#if _MSC_VER >= 1500
-#include "DShow.h"
-#include "strmif.h"
-#include "Aviriff.h"
-#include "dvdmedia.h"
-#include "bdaiface.h"
+#if defined _MSC_VER && _MSC_VER >= 1500
+# include "DShow.h"
+# include "strmif.h"
+# include "Aviriff.h"
+# include "dvdmedia.h"
+# include "bdaiface.h"
#else
-#ifdef _MSC_VER
-#define __extension__
-typedef BOOL WINBOOL;
+# ifdef _MSC_VER
+# define __extension__
+ typedef BOOL WINBOOL;
#endif
+
#include "dshow/dshow.h"
#include "dshow/dvdmedia.h"
#include "dshow/bdatypes.h"
virtual HRESULT STDMETHODCALLTYPE Clone(
/* [out] */ IEnumPIDMap **ppIEnumPIDMap) = 0;
+
+ virtual ~IEnumPIDMap() {}
};
interface IMPEG2PIDMap : public IUnknown
virtual HRESULT STDMETHODCALLTYPE EnumPIDMap(
/* [out] */ IEnumPIDMap **pIEnumPIDMap) = 0;
+
+ virtual ~IMPEG2PIDMap() {}
};
#endif
BYTE *pBuffer,
LONG BufferLen) = 0;
+ virtual ~ISampleGrabberCB() {}
};
interface ISampleGrabber : public IUnknown
ISampleGrabberCB *pCallback,
LONG WhichMethodToCallback) = 0;
+ virtual ~ISampleGrabber() {}
};
#ifndef HEADER
//Manual control over settings thanks.....
//These are experimental for now.
- bool setVideoSettingFilter(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false);
- bool setVideoSettingFilterPct(int deviceID, long Property, float pctValue, long Flags = NULL);
+ bool setVideoSettingFilter(int deviceID, long Property, long lValue, long Flags = 0, bool useDefaultValue = false);
+ bool setVideoSettingFilterPct(int deviceID, long Property, float pctValue, long Flags = 0);
bool getVideoSettingFilter(int deviceID, long Property, long &min, long &max, long &SteppingDelta, long ¤tValue, long &flags, long &defaultValue);
- bool setVideoSettingCamera(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false);
- bool setVideoSettingCameraPct(int deviceID, long Property, float pctValue, long Flags = NULL);
+ bool setVideoSettingCamera(int deviceID, long Property, long lValue, long Flags = 0, bool useDefaultValue = false);
+ bool setVideoSettingCameraPct(int deviceID, long Property, float pctValue, long Flags = 0);
bool getVideoSettingCamera(int deviceID, long Property, long &min, long &max, long &SteppingDelta, long ¤tValue, long &flags, long &defaultValue);
//bool setVideoSettingCam(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false);
/////////////////////////// HANDY FUNCTIONS /////////////////////////////
-void MyFreeMediaType(AM_MEDIA_TYPE& mt){
+static void MyFreeMediaType(AM_MEDIA_TYPE& mt){
if (mt.cbFormat != 0)
{
CoTaskMemFree((PVOID)mt.pbFormat);
}
}
-void MyDeleteMediaType(AM_MEDIA_TYPE *pmt)
+static void MyDeleteMediaType(AM_MEDIA_TYPE *pmt)
{
if (pmt != NULL)
{
//------------------------------------------------
- ~SampleGrabberCallback(){
+ virtual ~SampleGrabberCallback(){
ptrBuffer = NULL;
DeleteCriticalSection(&critSection);
CloseHandle(hEvent);
// ----------------------------------------------------------------------
void videoDevice::destroyGraph(){
- HRESULT hr = NULL;
+ HRESULT hr = 0;
//int FuncRetval=0;
//int NumFilters=0;
IBaseFilter * pFilter = NULL;
if (pEnum->Next(1, &pFilter, &cFetched) == S_OK)
{
- FILTER_INFO FilterInfo={0};
+ FILTER_INFO FilterInfo;
memset(&FilterInfo, 0, sizeof(FilterInfo));
hr = pFilter->QueryFilterInfo(&FilterInfo);
FilterInfo.pGraph->Release();
//
// ----------------------------------------------------------------------
-bool videoInput::setupDevice(int deviceNumber, int connection){
+bool videoInput::setupDevice(int deviceNumber, int _connection){
if(deviceNumber >= VI_MAX_CAMERAS || VDList[deviceNumber]->readyToCapture) return false;
- setPhyCon(deviceNumber, connection);
+ setPhyCon(deviceNumber, _connection);
if(setup(deviceNumber))return true;
return false;
}
//
// ----------------------------------------------------------------------
-bool videoInput::setupDevice(int deviceNumber, int w, int h, int connection){
+bool videoInput::setupDevice(int deviceNumber, int w, int h, int _connection){
if(deviceNumber >= VI_MAX_CAMERAS || VDList[deviceNumber]->readyToCapture) return false;
setAttemptCaptureSize(deviceNumber,w,h);
- setPhyCon(deviceNumber, connection);
+ setPhyCon(deviceNumber, _connection);
if(setup(deviceNumber))return true;
return false;
}
void videoInput::showSettingsWindow(int id){
if(isDeviceSetup(id)){
- HANDLE myTempThread;
+ //HANDLE myTempThread;
//we reconnect to the device as we have freed our reference to it
//why have we freed our reference? because there seemed to be an issue
//with some mpeg devices if we didn't
HRESULT hr = getDevice(&VDList[id]->pVideoInputFilter, id, VDList[id]->wDeviceName, VDList[id]->nDeviceName);
if(hr == S_OK){
- myTempThread = (HANDLE)_beginthread(basicThread, 0, (void *)&VDList[id]);
+ //myTempThread = (HANDLE)
+ _beginthread(basicThread, 0, (void *)&VDList[id]);
}
}
}
float halfStep = (float)stepAmnt * 0.5f;
if( mod < halfStep ) rasterValue -= mod;
else rasterValue += stepAmnt - mod;
- printf("RASTER - pctValue is %f - value is %i - step is %i - mod is %i - rasterValue is %i\n", pctValue, value, stepAmnt, mod, rasterValue);
+ printf("RASTER - pctValue is %f - value is %li - step is %li - mod is %li - rasterValue is %li\n", pctValue, value, stepAmnt, mod, rasterValue);
}
return setVideoSettingFilter(deviceID, Property, rasterValue, Flags, false);
float halfStep = (float)stepAmnt * 0.5f;
if( mod < halfStep ) rasterValue -= mod;
else rasterValue += stepAmnt - mod;
- printf("RASTER - pctValue is %f - value is %i - step is %i - mod is %i - rasterValue is %i\n", pctValue, value, stepAmnt, mod, rasterValue);
+ printf("RASTER - pctValue is %f - value is %li - step is %li - mod is %li - rasterValue is %li\n", pctValue, value, stepAmnt, mod, rasterValue);
}
return setVideoSettingCamera(deviceID, Property, rasterValue, Flags, false);
stopDevice(id);
//set our fps if needed
- if( avgFrameTime != -1){
+ if( avgFrameTime != (unsigned long)-1){
VDList[id]->requestedFrameTime = avgFrameTime;
}
//find perfect match or closest size
int nearW = 9999999;
int nearH = 9999999;
- bool foundClosestMatch = true;
+ //bool foundClosestMatch = true;
int iCount = 0;
int iSize = 0;
//see if we have an exact match!
if(exactMatchX && exactMatchY){
- foundClosestMatch = false;
+ //foundClosestMatch = false;
exactMatch = true;
widthOut = widthIn;
return hr;
}
-HRESULT videoInput::ShowStreamPropertyPages(IAMStreamConfig *pStream){
+HRESULT videoInput::ShowStreamPropertyPages(IAMStreamConfig * /*pStream*/){
HRESULT hr = NOERROR;
return hr;
LONG lInpin, lOutpin;
hr = Crossbar->get_PinCounts(&lOutpin , &lInpin);
- BOOL IPin=TRUE; LONG pIndex=0 , pRIndex=0 , pType=0;
+ BOOL iPin=TRUE; LONG pIndex=0 , pRIndex=0 , pType=0;
while( pIndex < lInpin)
{
- hr = Crossbar->get_CrossbarPinInfo( IPin , pIndex , &pRIndex , &pType);
+ hr = Crossbar->get_CrossbarPinInfo( iPin , pIndex , &pRIndex , &pType);
if( pType == conType){
if(verbose)printf("SETUP: Found Physical Interface");
icvReleaseVideoWriter_FFMPEG_p = (CvReleaseVideoWriter_Plugin)cvReleaseVideoWriter_FFMPEG;
icvWriteFrame_FFMPEG_p = (CvWriteFrame_Plugin)cvWriteFrame_FFMPEG;
#endif
-
+
ffmpegInitialized = 1;
}
}
{
unsigned char* data = 0;
int step=0, width=0, height=0, cn=0;
-
+
if(!ffmpegCapture ||
!icvRetrieveFrame_FFMPEG_p(ffmpegCapture,&data,&step,&width,&height,&cn))
return 0;
return cvCreateFileCapture_VFW(filename);
#else
return 0;
-#endif
+#endif
}
return cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, isColor);
#else
return 0;
-#endif
+#endif
}
#ifndef HAVE_FFMPEG_SWSCALE
#error "libswscale is necessary to build the newer OpenCV ffmpeg wrapper"
#endif
-
+
// if the header path is not specified explicitly, let's deduce it
#if !defined HAVE_FFMPEG_AVCODEC_H && !defined HAVE_LIBAVCODEC_AVCODEC_H
#define AV_NOPTS_VALUE_ ((int64_t)AV_NOPTS_VALUE)
#endif
-int get_number_of_cpus(void)
+static int get_number_of_cpus(void)
{
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(52, 111, 0)
return 1;
void seek(int64_t frame_number);
void seek(double sec);
- bool slowSeek( int framenumber );
+ bool slowSeek( int framenumber );
int64_t get_total_frames();
double get_duration_sec();
AVCodec * avcodec;
int video_stream;
AVStream * video_st;
- AVFrame * picture;
- AVFrame rgb_picture;
+ AVFrame * picture;
+ AVFrame rgb_picture;
int64_t picture_pts;
AVPacket packet;
sws_freeContext(img_convert_ctx);
img_convert_ctx = 0;
}
-
+
if( picture )
av_free(picture);
if( ic )
{
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 24, 2)
- av_close_input_file(ic);
+ av_close_input_file(ic);
#else
- avformat_close_input(&ic);
+ avformat_close_input(&ic);
#endif
ic = NULL;
av_register_all();
av_log_set_level(AV_LOG_ERROR);
-
+
initialized = true;
}
}
bool CvCapture_FFMPEG::open( const char* _filename )
{
icvInitFFMPEG_internal();
-
+
unsigned i;
bool valid = false;
close();
-
+
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)
int err = avformat_open_input(&ic, _filename, NULL, NULL);
#else
int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
-#endif
-
+#endif
+
if (err < 0) {
CV_WARN("Error opening file");
goto exit_func;
const int max_number_of_attempts = 1 << 16;
if( !ic || !video_st ) return false;
-
+
if( ic->streams[video_stream]->nb_frames > 0 &&
frame_number > ic->streams[video_stream]->nb_frames )
return false;
av_free_packet (&packet);
-
+
picture_pts = AV_NOPTS_VALUE_;
// get the next frame
break;
continue;
}
-
+
// Decode video frame
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
avcodec_decode_video2(video_st->codec, picture, &got_picture, &packet);
if( valid && first_frame_number < 0 )
first_frame_number = dts_to_frame_number(picture_pts);
-
+
// return if we have a new picture or not
return valid;
}
{
if( img_convert_ctx )
sws_freeContext(img_convert_ctx);
-
+
frame.width = video_st->codec->width;
frame.height = video_st->codec->height;
{
fps = r2d(ic->streams[video_stream]->avg_frame_rate);
}
-#endif
+#endif
if (fps < eps_zero)
{
{
_frame_number = std::min(_frame_number, get_total_frames());
int delta = 16;
-
+
// if we have not grabbed a single frame before first seek, let's read the first frame
// and get some valuable information during the process
if( first_frame_number < 0 )
grabFrame();
-
+
for(;;)
{
int64_t _frame_number_temp = std::max(_frame_number-delta, (int64_t)0);
if( _frame_number > 0 )
{
grabFrame();
-
+
if( _frame_number > 1 )
{
frame_number = dts_to_frame_number(picture_pts) - first_frame_number;
//printf("_frame_number = %d, frame_number = %d, delta = %d\n",
// (int)_frame_number, (int)frame_number, delta);
-
+
if( frame_number < 0 || frame_number > _frame_number-1 )
{
if( _frame_number_temp == 0 || delta >= INT_MAX/4 )
void init();
- AVOutputFormat * fmt;
+ AVOutputFormat * fmt;
AVFormatContext * oc;
uint8_t * outbuf;
uint32_t outbuf_size;
static const int OPENCV_NO_FRAMES_WRITTEN_CODE = 1000;
-int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
+static int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
{
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext * c = video_st->codec;
#if LIBAVFORMAT_BUILD > 4752
if(c->coded_frame->pts != (int64_t)AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
+ pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
#else
pkt.pts = c->coded_frame->pts;
#endif
bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int width, int height, int cn, int origin )
{
bool ret = false;
-
+
if( (width & -2) != frame_width || (height & -2) != frame_height || !data )
return false;
width = frame_width;
// nothing to do if already released
if ( !picture )
return;
-
+
/* no more frame to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by
passing the same picture again */
}
av_write_trailer(oc);
}
-
+
if( img_convert_ctx )
{
sws_freeContext(img_convert_ctx);
double fps, int width, int height, bool is_color )
{
icvInitFFMPEG_internal();
-
+
CodecID codec_id = CODEC_ID_NONE;
int err, codec_pix_fmt;
double bitrate_scale = 1;
return false;
if(fps <= 0)
return false;
-
+
// we allow frames of odd width or height, but in this case we truncate
// the rightmost column/the bottom row. Probably, this should be handled more elegantly,
// but some internal functions inside FFMPEG swscale require even width/height.
codec_pix_fmt = PIX_FMT_YUV420P;
break;
}
-
+
double bitrate = MIN(bitrate_scale*fps*width*height, (double)INT_MAX/2);
// TODO -- safe to ignore output audio stream?
err=avformat_write_header(oc, NULL);
#else
err=av_write_header( oc );
-#endif
-
+#endif
+
if(err < 0)
{
close();
sinkpad = gst_element_get_static_pad (color, "sink");
-
+
// printf("linking dynamic pad to colourconverter %p %p\n", uridecodebin, pad);
gst_pad_link (pad, sinkpad);
if(manualpipeline) {
GstIterator *it = gst_bin_iterate_sinks(GST_BIN(uridecodebin));
if(gst_iterator_next(it, (gpointer *)&sink) != GST_ITERATOR_OK) {
- CV_ERROR(CV_StsError, "GStreamer: cannot find appsink in manual pipeline\n");
- return false;
+ CV_ERROR(CV_StsError, "GStreamer: cannot find appsink in manual pipeline\n");
+ return false;
}
- pipeline = uridecodebin;
+ pipeline = uridecodebin;
} else {
- pipeline = gst_pipeline_new (NULL);
+ pipeline = gst_pipeline_new (NULL);
color = gst_element_factory_make("ffmpegcolorspace", NULL);
sink = gst_element_factory_make("appsink", NULL);
gst_app_sink_set_max_buffers (GST_APP_SINK(sink), 1);
gst_app_sink_set_drop (GST_APP_SINK(sink), stream);
- {
- GstCaps* caps;
- caps = gst_caps_new_simple("video/x-raw-rgb",
- "red_mask", G_TYPE_INT, 0x0000FF,
- "green_mask", G_TYPE_INT, 0x00FF00,
- "blue_mask", G_TYPE_INT, 0xFF0000,
- NULL);
- gst_app_sink_set_caps(GST_APP_SINK(sink), caps);
+ gst_app_sink_set_caps(GST_APP_SINK(sink), gst_caps_new_simple("video/x-raw-rgb",
+ "red_mask", G_TYPE_INT, 0x0000FF,
+ "green_mask", G_TYPE_INT, 0x00FF00,
+ "blue_mask", G_TYPE_INT, 0xFF0000,
+ NULL));
gst_caps_unref(caps);
- }
if(gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_READY) ==
GST_STATE_CHANGE_FAILURE) {
return -1;
} else {
buffer_number--;
- fprintf (stderr, "Insufficient buffer memory on %s -- decreaseing buffers\n", deviceName);
+ fprintf (stderr, "Insufficient buffer memory on %s -- decreaseing buffers\n", deviceName);
- goto try_again;
+ goto try_again;
}
}
if (capture->buffers[MAX_V4L_BUFFERS].start) {
free(capture->buffers[MAX_V4L_BUFFERS].start);
capture->buffers[MAX_V4L_BUFFERS].start = NULL;
- }
-
+ }
+
capture->buffers[MAX_V4L_BUFFERS].start = malloc(buf.length);
capture->buffers[MAX_V4L_BUFFERS].length = buf.length;
};
#ifdef USE_TEMP_BUFFER
memcpy(capture->buffers[MAX_V4L_BUFFERS].start,
- capture->buffers[buf.index].start,
- capture->buffers[MAX_V4L_BUFFERS].length );
+ capture->buffers[buf.index].start,
+ capture->buffers[MAX_V4L_BUFFERS].length );
capture->bufferIndex = MAX_V4L_BUFFERS;
//printf("got data in buff %d, len=%d, flags=0x%X, seq=%d, used=%d)\n",
- // buf.index, buf.length, buf.flags, buf.sequence, buf.bytesused);
+ // buf.index, buf.length, buf.flags, buf.sequence, buf.bytesused);
#else
capture->bufferIndex = buf.index;
#endif
capture->mmaps[capture->bufferIndex].format = capture->imageProperties.palette;
if (v4l1_ioctl (capture->deviceHandle, VIDIOCMCAPTURE,
- &capture->mmaps[capture->bufferIndex]) == -1) {
- /* capture is on the way, so just exit */
- return 1;
+ &capture->mmaps[capture->bufferIndex]) == -1) {
+ /* capture is on the way, so just exit */
+ return 1;
}
++capture->bufferIndex;
if (capture->is_v4l2_device == 1)
{
- if(capture->buffers[capture->bufferIndex].start){
- memcpy((char *)capture->frame.imageData,
- (char *)capture->buffers[capture->bufferIndex].start,
- capture->frame.imageSize);
- }
+ if(capture->buffers[capture->bufferIndex].start){
+ memcpy((char *)capture->frame.imageData,
+ (char *)capture->buffers[capture->bufferIndex].start,
+ capture->frame.imageSize);
+ }
} else
#endif /* HAVE_CAMV4L2 */
sprintf(name, "<unknown property string>");
capture->control.id = property_id;
}
-
+
if(v4l2_ioctl(capture->deviceHandle, VIDIOC_G_CTRL, &capture->control) == 0) {
/* all went well */
is_v4l2_device = 1;
CLEAR (capture->control);
CLEAR (capture->queryctrl);
-
+
/* get current values */
switch (property_id) {
case CV_CAP_PROP_BRIGHTNESS:
if (xioctl(capture->deviceHandle, VIDIOC_STREAMOFF, &capture->type) < 0) {
perror ("Unable to stop the stream.");
}
- for (unsigned int n_buffers = 0; n_buffers < capture->req.count; ++n_buffers) {
- if (-1 == v4l2_munmap (capture->buffers[n_buffers].start, capture->buffers[n_buffers].length)) {
+ for (unsigned int n_buffers2 = 0; n_buffers2 < capture->req.count; ++n_buffers2) {
+ if (-1 == v4l2_munmap (capture->buffers[n_buffers2].start, capture->buffers[n_buffers2].length)) {
perror ("munmap");
}
}
#include <iostream>
#include <queue>
+
+#ifndef i386
+# define i386 0
+#endif
+#ifndef __arm__
+# define __arm__ 0
+#endif
+#ifndef _ARC
+# define _ARC 0
+#endif
+#ifndef __APPLE__
+# define __APPLE__ 0
+#endif
+
#include "XnCppWrapper.h"
const std::string XMLConfig =
class ApproximateSyncGrabber
{
public:
- ApproximateSyncGrabber( xn::Context &context,
- xn::DepthGenerator &depthGenerator,
- xn::ImageGenerator &imageGenerator,
- int maxBufferSize, bool isCircleBuffer, int maxTimeDuration ) :
- context(context), depthGenerator(depthGenerator), imageGenerator(imageGenerator),
- maxBufferSize(maxBufferSize), isCircleBuffer(isCircleBuffer), maxTimeDuration(maxTimeDuration)
+ ApproximateSyncGrabber( xn::Context &_context,
+ xn::DepthGenerator &_depthGenerator,
+ xn::ImageGenerator &_imageGenerator,
+ int _maxBufferSize, bool _isCircleBuffer, int _maxTimeDuration ) :
+ context(_context), depthGenerator(_depthGenerator), imageGenerator(_imageGenerator),
+ maxBufferSize(_maxBufferSize), isCircleBuffer(_isCircleBuffer), maxTimeDuration(_maxTimeDuration)
{
task = 0;
class ApproximateSynchronizerBase
{
public:
- ApproximateSynchronizerBase( ApproximateSyncGrabber& approxSyncGrabber ) :
- approxSyncGrabber(approxSyncGrabber), isDepthFilled(false), isImageFilled(false)
+ ApproximateSynchronizerBase( ApproximateSyncGrabber& _approxSyncGrabber ) :
+ approxSyncGrabber(_approxSyncGrabber), isDepthFilled(false), isImageFilled(false)
{}
+ virtual ~ApproximateSynchronizerBase() {}
+
virtual bool isSpinContinue() const = 0;
virtual void pushDepthMetaData( xn::DepthMetaData& depthMetaData ) = 0;
virtual void pushImageMetaData( xn::ImageMetaData& imageMetaData ) = 0;
if( status != XN_STATUS_OK )
continue;
- xn::DepthMetaData depth;
- xn::ImageMetaData image;
+ //xn::DepthMetaData depth;
+ //xn::ImageMetaData image;
approxSyncGrabber.depthGenerator.GetMetaData(depth);
approxSyncGrabber.imageGenerator.GetMetaData(image);
class ApproximateSynchronizer: public ApproximateSynchronizerBase
{
public:
- ApproximateSynchronizer( ApproximateSyncGrabber& approxSyncGrabber ) :
- ApproximateSynchronizerBase(approxSyncGrabber)
+ ApproximateSynchronizer( ApproximateSyncGrabber& _approxSyncGrabber ) :
+ ApproximateSynchronizerBase(_approxSyncGrabber)
{}
virtual bool isSpinContinue() const
{
public:
enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_MAX=1 };
-
+
static const int INVALID_PIXEL_VAL = 0;
static const int INVALID_COORDINATE_VAL = 0;
static const int outputMapsTypesCount = 7;
+ static XnMapOutputMode defaultMapOutputMode();
+
IplImage* retrieveDepthMap();
IplImage* retrievePointCloudMap();
IplImage* retrieveDisparityMap();
return isContextOpened;
}
-XnMapOutputMode defaultMapOutputMode()
+XnMapOutputMode CvCapture_OpenNI::defaultMapOutputMode()
{
XnMapOutputMode mode;
mode.nXRes = XN_VGA_X_RES;
return mode;
}
-
CvCapture_OpenNI::CvCapture_OpenNI( int index )
{
int deviceType = DEVICE_DEFAULT;
XnStatus status;
-
+
isContextOpened = false;
maxBufferSize = DEFAULT_MAX_BUFFER_SIZE;
isCircleBuffer = DEFAULT_IS_CIRCLE_BUFFER;
maxTimeDuration = DEFAULT_MAX_TIME_DURATION;
-
+
if( index >= 10 )
{
deviceType = index / 10;
return outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].getIplImagePtr();
}
-void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
+static void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
XnUInt64 noSampleValue, XnUInt64 shadowValue )
{
cv::Mat depth;
*
*/
-void bayer2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst)
+static void bayer2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst)
{
long int i;
unsigned char *rawpt, *scanpt;
// at least for 046d:092f Logitech, Inc. QuickCam Express Plus to work
//see: http://www.siliconimaging.com/RGB%20Bayer.htm
//and 4.6 at http://tldp.org/HOWTO/html_single/libdc1394-HOWTO/
-void sgbrg2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst)
+static void sgbrg2rgb24(long int WIDTH, long int HEIGHT, unsigned char *src, unsigned char *dst)
{
long int i;
unsigned char *rawpt, *scanpt;
present at the MSB of byte x.
*/
-void sonix_decompress_init(void)
+static void sonix_decompress_init(void)
{
int i;
int is_abs, val, len;
Returns <0 if operation failed.
*/
-int sonix_decompress(int width, int height, unsigned char *inp, unsigned char *outp)
+static int sonix_decompress(int width, int height, unsigned char *inp, unsigned char *outp)
{
int row, col;
int val;
perror ("Unable to stop the stream.");
}
- for (unsigned int n_buffers = 0; n_buffers < capture->req.count; ++n_buffers)
+ for (unsigned int n_buffers_ = 0; n_buffers_ < capture->req.count; ++n_buffers_)
{
- if (-1 == munmap (capture->buffers[n_buffers].start, capture->buffers[n_buffers].length)) {
+ if (-1 == munmap (capture->buffers[n_buffers_].start, capture->buffers[n_buffers_].length)) {
perror ("munmap");
}
}
#include <vfw.h>
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4711 )
-#endif
-
#ifdef __GNUC__
#define WM_CAP_FIRSTA (WM_USER)
#define capSendMessage(hwnd,m,w,l) (IsWindow(hwnd)?SendMessage(hwnd,m,w,l):0)
#endif
-#if defined _M_X64
+#if defined _M_X64 && defined _MSC_VER
#pragma optimize("",off)
#pragma warning(disable: 4748)
#endif
{
size.width = aviinfo.rcFrame.right - aviinfo.rcFrame.left;
size.height = aviinfo.rcFrame.bottom - aviinfo.rcFrame.top;
- BITMAPINFOHEADER bmih = icvBitmapHeader( size.width, size.height, 24 );
+ BITMAPINFOHEADER bmihdr = icvBitmapHeader( size.width, size.height, 24 );
film_range.start_index = (int)aviinfo.dwStart;
film_range.end_index = film_range.start_index + (int)aviinfo.dwLength;
fps = (double)aviinfo.dwRate/aviinfo.dwScale;
pos = film_range.start_index;
- getframe = AVIStreamGetFrameOpen( avistream, &bmih );
+ getframe = AVIStreamGetFrameOpen( avistream, &bmihdr );
if( getframe != 0 )
return true;
}
#ifdef HAVE_JPEG
#ifdef _MSC_VER
-#pragma warning(disable: 4324 4611)
+//interaction between '_setjmp' and C++ object destruction is non-portable
+#pragma warning(disable: 4611)
#endif
#include <stdio.h>
namespace cv
{
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable:4324) //structure was padded due to __declspec(align())
+#endif
struct JpegErrorMgr
{
struct jpeg_error_mgr pub;
jmp_buf setjmp_buffer;
};
+#ifdef _MSC_VER
+# pragma warning(pop)
+#endif
struct JpegSource
{
}
-GLOBAL(void)
-jpeg_buffer_src(j_decompress_ptr cinfo, JpegSource* source)
+static void jpeg_buffer_src(j_decompress_ptr cinfo, JpegSource* source)
{
cinfo->src = &source->pub;
return TRUE;
}
-GLOBAL(void)
-jpeg_buffer_dest(j_compress_ptr cinfo, JpegDestination* destination)
+static void jpeg_buffer_dest(j_compress_ptr cinfo, JpegDestination* destination)
{
cinfo->dest = &destination->pub;
#include "grfmt_png.hpp"
#if defined _MSC_VER && _MSC_VER >= 1200
- // disable warnings related to _setjmp
+ // interaction between '_setjmp' and C++ object destruction is non-portable
#pragma warning( disable: 4611 )
#endif
if( !m_buf.empty() || m_f )
{
- png_uint_32 width, height;
+ png_uint_32 wdth, hght;
int bit_depth, color_type;
png_read_info( png_ptr, info_ptr );
- png_get_IHDR( png_ptr, info_ptr, &width, &height,
+ png_get_IHDR( png_ptr, info_ptr, &wdth, &hght,
&bit_depth, &color_type, 0, 0, 0 );
- m_width = (int)width;
- m_height = (int)height;
+ m_width = (int)wdth;
+ m_height = (int)hght;
m_color_type = color_type;
m_bit_depth = bit_depth;
if( bit_depth <= 8 || bit_depth == 16 )
{
- switch(color_type)
+ switch(color_type)
{
case PNG_COLOR_TYPE_RGB:
case PNG_COLOR_TYPE_PALETTE:
else if( !isBigEndian() )
png_set_swap( png_ptr );
- if(img.channels() < 4)
+ if(img.channels() < 4)
{
/* observation: png_read_image() writes 400 bytes beyond
* end of data when reading a 400x118 color png
#else
png_set_gray_1_2_4_to_8( png_ptr );
#endif
-
+
if( CV_MAT_CN(m_type) > 1 && color )
png_set_bgr( png_ptr ); // convert RGB to BGR
else if( color )
if( params[i] == CV_IMWRITE_PNG_STRATEGY )
{
compression_strategy = params[i+1];
- compression_strategy = MIN(MAX(compression_strategy, 0), Z_FIXED);
+ compression_strategy = MIN(MAX(compression_strategy, 0), Z_FIXED);
}
}
if( tif )
{
- int width = 0, height = 0, photometric = 0;
+ int wdth = 0, hght = 0, photometric = 0;
m_tif = tif;
- if( TIFFGetField( tif, TIFFTAG_IMAGEWIDTH, &width ) &&
- TIFFGetField( tif, TIFFTAG_IMAGELENGTH, &height ) &&
+ if( TIFFGetField( tif, TIFFTAG_IMAGEWIDTH, &wdth ) &&
+ TIFFGetField( tif, TIFFTAG_IMAGELENGTH, &hght ) &&
TIFFGetField( tif, TIFFTAG_PHOTOMETRIC, &photometric ))
{
int bpp=8, ncn = photometric > 1 ? 3 : 1;
TIFFGetField( tif, TIFFTAG_BITSPERSAMPLE, &bpp );
TIFFGetField( tif, TIFFTAG_SAMPLESPERPIXEL, &ncn );
-
- m_width = width;
- m_height = height;
+
+ m_width = wdth;
+ m_height = hght;
if( bpp > 8 &&
((photometric != 2 && photometric != 1) ||
(ncn != 1 && ncn != 3 && ncn != 4)))
bool color = img.channels() > 1;
uchar* data = img.data;
int step = (int)img.step;
-
+
if( img.depth() != CV_8U && img.depth() != CV_16U && img.depth() != CV_32F && img.depth() != CV_64F )
return false;
default:
{
return false;
- }
+ }
}
-
+
const int bitsPerByte = 8;
size_t fileStep = (width * channels * bitsPerChannel) / bitsPerByte;
int rowsPerStrip = (int)((1 << 13)/fileStep);
{
return false;
}
-
+
// defaults for now, maybe base them on params in the future
int compression = COMPRESSION_LZW;
int predictor = PREDICTOR_HORIZONTAL;
return false;
}
}
-
+
TIFFClose(pTiffHandle);
return true;
}
if( !strm.open(*m_buf) )
return false;
}
- else
+ else
{
#ifdef HAVE_TIFF
return writeLibTiff(img, params);
static vector<ImageDecoder> decoders;
static vector<ImageEncoder> encoders;
-ImageDecoder findDecoder( const string& filename )
+static ImageDecoder findDecoder( const string& filename )
{
size_t i, maxlen = 0;
for( i = 0; i < decoders.size(); i++ )
return ImageDecoder();
}
-ImageDecoder findDecoder( const Mat& buf )
+static ImageDecoder findDecoder( const Mat& buf )
{
size_t i, maxlen = 0;
return ImageDecoder();
}
-ImageEncoder findEncoder( const string& _ext )
+static ImageEncoder findEncoder( const string& _ext )
{
if( _ext.size() <= 1 )
return ImageEncoder();
imdecode_( buf, flags, LOAD_MAT, &img );
return img;
}
-
+
bool imencode( const string& ext, InputArray _image,
vector<uchar>& buf, const vector<int>& params )
{
#ifndef __HIGHGUI_H_
#define __HIGHGUI_H_
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 )
-#endif
-
#include "cvconfig.h"
#include "opencv2/highgui/highgui.hpp"
CV_NO_GUI_ERROR("cvSaveWindowParameters");
}
-CV_IMPL void cvLoadWindowParameterss(const char* name)
-{
- CV_NO_GUI_ERROR("cvLoadWindowParameters");
-}
+// CV_IMPL void cvLoadWindowParameterss(const char* name)
+// {
+// CV_NO_GUI_ERROR("cvLoadWindowParameters");
+// }
CV_IMPL int cvCreateButton(const char*, void (*)(int, void*), void*, int, int)
{
-//IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. \r
+//IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
\r
// By downloading, copying, installing or using the software you agree to this license.\r
// If you do not agree to this license, do not download, install,\r
\r
CV_IMPL CvFont cvFontQt(const char* nameFont, int pointSize,CvScalar color,int weight,int style, int spacing)\r
{\r
- /*\r
- //nameFont <- only Qt\r
- //CvScalar color <- only Qt (blue_component, green_component, red\_component[, alpha_component])\r
- int font_face;//<- style in Qt\r
- const int* ascii;\r
- const int* greek;\r
- const int* cyrillic;\r
- float hscale, vscale;\r
- float shear;\r
- int thickness;//<- weight in Qt\r
- float dx;//spacing letter in Qt (0 default) in pixel\r
- int line_type;//<- pointSize in Qt\r
- */\r
- CvFont f = {nameFont,color,style,NULL,NULL,NULL,0,0,0,weight,spacing,pointSize};\r
- return f;\r
+ /*\r
+ //nameFont <- only Qt\r
+ //CvScalar color <- only Qt (blue_component, green_component, red\_component[, alpha_component])\r
+ int font_face;//<- style in Qt\r
+ const int* ascii;\r
+ const int* greek;\r
+ const int* cyrillic;\r
+ float hscale, vscale;\r
+ float shear;\r
+ int thickness;//<- weight in Qt\r
+ float dx;//spacing letter in Qt (0 default) in pixel\r
+ int line_type;//<- pointSize in Qt\r
+ */\r
+ CvFont f = {nameFont,color,style,NULL,NULL,NULL,0,0,0,weight,spacing,pointSize};\r
+ return f;\r
}\r
\r
\r
CV_IMPL void cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont* font)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "putText",\r
- Qt::AutoConnection,\r
- Q_ARG(void*, (void*) img),\r
- Q_ARG(QString,QString(text)),\r
- Q_ARG(QPoint, QPoint(org.x,org.y)),\r
- Q_ARG(void*,(void*) font));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "putText",\r
+ Qt::AutoConnection,\r
+ Q_ARG(void*, (void*) img),\r
+ Q_ARG(QString,QString(text)),\r
+ Q_ARG(QPoint, QPoint(org.x,org.y)),\r
+ Q_ARG(void*,(void*) font));\r
}\r
\r
\r
double cvGetRatioWindow_QT(const char* name)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- double result = -1;\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "getRatioWindow",\r
- //Qt::DirectConnection,\r
- Qt::AutoConnection,\r
- Q_RETURN_ARG(double, result),\r
- Q_ARG(QString, QString(name)));\r
+ double result = -1;\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "getRatioWindow",\r
+ //Qt::DirectConnection,\r
+ Qt::AutoConnection,\r
+ Q_RETURN_ARG(double, result),\r
+ Q_ARG(QString, QString(name)));\r
\r
- return result;\r
+ return result;\r
}\r
\r
\r
void cvSetRatioWindow_QT(const char* name,double prop_value)\r
{\r
\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "setRatioWindow",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(name)),\r
- Q_ARG(double, prop_value));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "setRatioWindow",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name)),\r
+ Q_ARG(double, prop_value));\r
}\r
\r
\r
double cvGetPropWindow_QT(const char* name)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- double result = -1;\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "getPropWindow",\r
- //Qt::DirectConnection,\r
- Qt::AutoConnection,\r
- Q_RETURN_ARG(double, result),\r
- Q_ARG(QString, QString(name)));\r
+ double result = -1;\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "getPropWindow",\r
+ //Qt::DirectConnection,\r
+ Qt::AutoConnection,\r
+ Q_RETURN_ARG(double, result),\r
+ Q_ARG(QString, QString(name)));\r
\r
- return result;\r
+ return result;\r
}\r
\r
\r
void cvSetPropWindow_QT(const char* name,double prop_value)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "setPropWindow",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(name)),\r
- Q_ARG(double, prop_value));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "setPropWindow",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name)),\r
+ Q_ARG(double, prop_value));\r
}\r
\r
\r
void cvSetModeWindow_QT(const char* name, double prop_value)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "toggleFullScreen",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(name)),\r
- Q_ARG(double, prop_value));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "toggleFullScreen",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name)),\r
+ Q_ARG(double, prop_value));\r
}\r
\r
\r
double cvGetModeWindow_QT(const char* name)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- double result = -1;\r
+ double result = -1;\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "isFullScreen",\r
- Qt::AutoConnection,\r
- Q_RETURN_ARG(double, result),\r
- Q_ARG(QString, QString(name)));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "isFullScreen",\r
+ Qt::AutoConnection,\r
+ Q_RETURN_ARG(double, result),\r
+ Q_ARG(QString, QString(name)));\r
\r
- return result;\r
+ return result;\r
}\r
\r
\r
CV_IMPL void cvDisplayOverlay(const char* name, const char* text, int delayms)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "displayInfo",\r
- Qt::AutoConnection,\r
- //Qt::DirectConnection,\r
- Q_ARG(QString, QString(name)),\r
- Q_ARG(QString, QString(text)),\r
- Q_ARG(int, delayms));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "displayInfo",\r
+ Qt::AutoConnection,\r
+ //Qt::DirectConnection,\r
+ Q_ARG(QString, QString(name)),\r
+ Q_ARG(QString, QString(text)),\r
+ Q_ARG(int, delayms));\r
}\r
\r
\r
CV_IMPL void cvSaveWindowParameters(const char* name)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "saveWindowParameters",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(name)));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "saveWindowParameters",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name)));\r
}\r
\r
\r
CV_IMPL void cvLoadWindowParameters(const char* name)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "loadWindowParameters",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(name)));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "loadWindowParameters",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name)));\r
}\r
\r
\r
CV_IMPL void cvDisplayStatusBar(const char* name, const char* text, int delayms)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "displayStatusBar",\r
- Qt::AutoConnection,\r
- //Qt::DirectConnection,\r
- Q_ARG(QString, QString(name)),\r
- Q_ARG(QString, QString(text)),\r
- Q_ARG(int, delayms));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "displayStatusBar",\r
+ Qt::AutoConnection,\r
+ //Qt::DirectConnection,\r
+ Q_ARG(QString, QString(name)),\r
+ Q_ARG(QString, QString(text)),\r
+ Q_ARG(int, delayms));\r
}\r
\r
\r
//We recommend not using this function for now\r
CV_IMPL int cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[])\r
{\r
- multiThreads = true;\r
- QFuture<int> future = QtConcurrent::run(pt2Func, argc, argv);\r
- return guiMainThread->start();\r
+ multiThreads = true;\r
+ QFuture<int> future = QtConcurrent::run(pt2Func, argc, argv);\r
+ return guiMainThread->start();\r
}\r
\r
\r
CV_IMPL void cvStopLoop()\r
{\r
- qApp->exit();\r
+ qApp->exit();\r
}\r
\r
\r
-CvWindow* icvFindWindowByName(QString name)\r
+static CvWindow* icvFindWindowByName(QString name)\r
{\r
CvWindow* window = 0;\r
\r
//that can be grabbed here and crash the code at 'w->param_name==name'.\r
foreach (QWidget* widget, QApplication::topLevelWidgets())\r
{\r
- if (widget->isWindow() && !widget->parentWidget())//is a window without parent\r
- {\r
- CvWinModel* temp = (CvWinModel*) widget;\r
+ if (widget->isWindow() && !widget->parentWidget())//is a window without parent\r
+ {\r
+ CvWinModel* temp = (CvWinModel*) widget;\r
\r
- if (temp->type == type_CvWindow)\r
- {\r
- CvWindow* w = (CvWindow*) temp;\r
+ if (temp->type == type_CvWindow)\r
+ {\r
+ CvWindow* w = (CvWindow*) temp;\r
if (w->windowTitle() == name)\r
- {\r
- window = w;\r
- break;\r
- }\r
- }\r
- }\r
- } \r
+ {\r
+ window = w;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ }\r
\r
return window;\r
}\r
\r
\r
-CvBar* icvFindBarByName(QBoxLayout* layout, QString name_bar, typeBar type)\r
+static CvBar* icvFindBarByName(QBoxLayout* layout, QString name_bar, typeBar type)\r
{\r
if (!layout)\r
- return NULL;\r
+ return NULL;\r
\r
int stop_index = layout->layout()->count();\r
\r
for (int i = 0; i < stop_index; ++i)\r
{\r
- CvBar* t = (CvBar*) layout->layout()->itemAt(i);\r
+ CvBar* t = (CvBar*) layout->layout()->itemAt(i);\r
\r
- if (t->type == type && t->name_bar == name_bar)\r
- return t;\r
+ if (t->type == type && t->name_bar == name_bar)\r
+ return t;\r
}\r
\r
return NULL;\r
}\r
\r
\r
-CvTrackbar* icvFindTrackBarByName(const char* name_trackbar, const char* name_window, QBoxLayout* layout = NULL)\r
+static CvTrackbar* icvFindTrackBarByName(const char* name_trackbar, const char* name_window, QBoxLayout* layout = NULL)\r
{\r
QString nameQt(name_trackbar);\r
\r
if (!name_window && global_control_panel) //window name is null and we have a control panel\r
- layout = global_control_panel->myLayout;\r
+ layout = global_control_panel->myLayout;\r
\r
if (!layout)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(QLatin1String(name_window));\r
+ QPointer<CvWindow> w = icvFindWindowByName(QLatin1String(name_window));\r
\r
- if (!w)\r
- CV_Error(CV_StsNullPtr, "NULL window handler");\r
+ if (!w)\r
+ CV_Error(CV_StsNullPtr, "NULL window handler");\r
\r
- if (w->param_gui_mode == CV_GUI_NORMAL)\r
- return (CvTrackbar*) icvFindBarByName(w->myBarLayout, nameQt, type_CvTrackbar);\r
+ if (w->param_gui_mode == CV_GUI_NORMAL)\r
+ return (CvTrackbar*) icvFindBarByName(w->myBarLayout, nameQt, type_CvTrackbar);\r
\r
- if (w->param_gui_mode == CV_GUI_EXPANDED)\r
- {\r
- CvBar* result = icvFindBarByName(w->myBarLayout, nameQt, type_CvTrackbar);\r
+ if (w->param_gui_mode == CV_GUI_EXPANDED)\r
+ {\r
+ CvBar* result = icvFindBarByName(w->myBarLayout, nameQt, type_CvTrackbar);\r
\r
- if (result)\r
- return (CvTrackbar*) result;\r
+ if (result)\r
+ return (CvTrackbar*) result;\r
\r
- return (CvTrackbar*) icvFindBarByName(global_control_panel->myLayout, nameQt, type_CvTrackbar);\r
- }\r
+ return (CvTrackbar*) icvFindBarByName(global_control_panel->myLayout, nameQt, type_CvTrackbar);\r
+ }\r
\r
- return NULL;\r
+ return NULL;\r
}\r
else\r
{\r
- //layout was specified\r
- return (CvTrackbar*) icvFindBarByName(layout, nameQt, type_CvTrackbar);\r
+ //layout was specified\r
+ return (CvTrackbar*) icvFindBarByName(layout, nameQt, type_CvTrackbar);\r
}\r
}\r
\r
-\r
-CvButtonbar* icvFindButtonBarByName(const char* button_name, QBoxLayout* layout)\r
+/*\r
+static CvButtonbar* icvFindButtonBarByName(const char* button_name, QBoxLayout* layout)\r
{\r
QString nameQt(button_name);\r
return (CvButtonbar*) icvFindBarByName(layout, nameQt, type_CvButtonbar);\r
}\r
+*/\r
\r
-\r
-int icvInitSystem(int* c, char** v)\r
+static int icvInitSystem(int* c, char** v)\r
{\r
//"For any GUI application using Qt, there is precisely one QApplication object"\r
if (!QApplication::instance())\r
{\r
- new QApplication(*c, v);\r
+ new QApplication(*c, v);\r
\r
- qDebug() << "init done";\r
+ qDebug() << "init done";\r
\r
#ifdef HAVE_QT_OPENGL\r
- qDebug() << "opengl support available";\r
+ qDebug() << "opengl support available";\r
#endif\r
}\r
\r
\r
CV_IMPL int cvInitSystem(int, char**)\r
{\r
- icvInitSystem(¶meterSystemC, parameterSystemV);\r
- return 0;\r
+ icvInitSystem(¶meterSystemC, parameterSystemV);\r
+ return 0;\r
}\r
\r
\r
CV_IMPL int cvNamedWindow(const char* name, int flags)\r
{\r
- if (!guiMainThread)\r
- guiMainThread = new GuiReceiver;\r
+ if (!guiMainThread)\r
+ guiMainThread = new GuiReceiver;\r
\r
- if (multiThreads)\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "createWindow",\r
- Qt::BlockingQueuedConnection,\r
- Q_ARG(QString, QString(name)),\r
- Q_ARG(int, flags));\r
- else\r
- guiMainThread->createWindow(QString(name), flags);\r
+ if (multiThreads)\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "createWindow",\r
+ Qt::BlockingQueuedConnection,\r
+ Q_ARG(QString, QString(name)),\r
+ Q_ARG(int, flags));\r
+ else\r
+ guiMainThread->createWindow(QString(name), flags);\r
\r
- return 1; //Dummy value\r
+ return 1; //Dummy value\r
}\r
\r
\r
CV_IMPL void cvDestroyWindow(const char* name)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "destroyWindow",\r
- //Qt::BlockingQueuedConnection,\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(name)));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "destroyWindow",\r
+ //Qt::BlockingQueuedConnection,\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name)));\r
}\r
\r
\r
CV_IMPL void cvDestroyAllWindows()\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "destroyAllWindow",\r
- //Qt::BlockingQueuedConnection,\r
- Qt::AutoConnection);\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "destroyAllWindow",\r
+ //Qt::BlockingQueuedConnection,\r
+ Qt::AutoConnection);\r
}\r
\r
\r
CV_IMPL void* cvGetWindowHandle(const char* name)\r
{\r
- if (!name)\r
- CV_Error( CV_StsNullPtr, "NULL name string" );\r
+ if (!name)\r
+ CV_Error( CV_StsNullPtr, "NULL name string" );\r
\r
- return (void*) icvFindWindowByName(QLatin1String(name));\r
+ return (void*) icvFindWindowByName(QLatin1String(name));\r
}\r
\r
\r
CV_IMPL const char* cvGetWindowName(void* window_handle)\r
{\r
- if( !window_handle )\r
- CV_Error( CV_StsNullPtr, "NULL window handler" );\r
+ if( !window_handle )\r
+ CV_Error( CV_StsNullPtr, "NULL window handler" );\r
\r
- return ((CvWindow*)window_handle)->windowTitle().toLatin1().data();\r
+ return ((CvWindow*)window_handle)->windowTitle().toLatin1().data();\r
}\r
\r
\r
CV_IMPL void cvMoveWindow(const char* name, int x, int y)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "moveWindow",\r
- //Qt::BlockingQueuedConnection,\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(name)),\r
- Q_ARG(int, x),\r
- Q_ARG(int, y));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "moveWindow",\r
+ //Qt::BlockingQueuedConnection,\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name)),\r
+ Q_ARG(int, x),\r
+ Q_ARG(int, y));\r
}\r
\r
\r
CV_IMPL void cvResizeWindow(const char* name, int width, int height)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "resizeWindow",\r
- //Qt::BlockingQueuedConnection,\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(name)),\r
- Q_ARG(int, width),\r
- Q_ARG(int, height));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "resizeWindow",\r
+ //Qt::BlockingQueuedConnection,\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name)),\r
+ Q_ARG(int, width),\r
+ Q_ARG(int, height));\r
}\r
\r
\r
CV_IMPL int cvCreateTrackbar2(const char* name_bar, const char* window_name, int* val, int count, CvTrackbarCallback2 on_notify, void* userdata)\r
{\r
- if (!guiMainThread) \r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" ); \r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread, \r
- "addSlider2", \r
- Qt::AutoConnection, \r
- Q_ARG(QString, QString(name_bar)), \r
- Q_ARG(QString, QString(window_name)), \r
- Q_ARG(void*, (void*)val), \r
- Q_ARG(int, count), \r
- Q_ARG(void*, (void*)on_notify), \r
- Q_ARG(void*, (void*)userdata)); \r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "addSlider2",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name_bar)),\r
+ Q_ARG(QString, QString(window_name)),\r
+ Q_ARG(void*, (void*)val),\r
+ Q_ARG(int, count),\r
+ Q_ARG(void*, (void*)on_notify),\r
+ Q_ARG(void*, (void*)userdata));\r
\r
- return 1; //dummy value \r
+ return 1; //dummy value\r
}\r
\r
\r
CV_IMPL int cvStartWindowThread()\r
{\r
- return 0;\r
+ return 0;\r
}\r
\r
\r
CV_IMPL int cvCreateTrackbar(const char* name_bar, const char* window_name, int* value, int count, CvTrackbarCallback on_change)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "addSlider",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(name_bar)),\r
- Q_ARG(QString, QString(window_name)),\r
- Q_ARG(void*, (void*)value),\r
- Q_ARG(int, count),\r
- Q_ARG(void*, (void*)on_change));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "addSlider",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(name_bar)),\r
+ Q_ARG(QString, QString(window_name)),\r
+ Q_ARG(void*, (void*)value),\r
+ Q_ARG(int, count),\r
+ Q_ARG(void*, (void*)on_change));\r
\r
- return 1; //dummy value\r
+ return 1; //dummy value\r
}\r
\r
\r
CV_IMPL int cvCreateButton(const char* button_name, CvButtonCallback on_change, void* userdata, int button_type, int initial_button_state)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- if (initial_button_state < 0 || initial_button_state > 1)\r
- return 0;\r
+ if (initial_button_state < 0 || initial_button_state > 1)\r
+ return 0;\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "addButton",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(button_name)),\r
- Q_ARG(int, button_type),\r
- Q_ARG(int, initial_button_state),\r
- Q_ARG(void*, (void*)on_change),\r
- Q_ARG(void*, userdata));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "addButton",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(button_name)),\r
+ Q_ARG(int, button_type),\r
+ Q_ARG(int, initial_button_state),\r
+ Q_ARG(void*, (void*)on_change),\r
+ Q_ARG(void*, userdata));\r
\r
- return 1;//dummy value\r
+ return 1;//dummy value\r
}\r
\r
\r
CV_IMPL int cvGetTrackbarPos(const char* name_bar, const char* window_name)\r
{\r
- int result = -1;\r
+ int result = -1;\r
\r
- QPointer<CvTrackbar> t = icvFindTrackBarByName(name_bar, window_name);\r
+ QPointer<CvTrackbar> t = icvFindTrackBarByName(name_bar, window_name);\r
\r
- if (t)\r
- result = t->slider->value();\r
+ if (t)\r
+ result = t->slider->value();\r
\r
- return result;\r
+ return result;\r
}\r
\r
\r
CV_IMPL void cvSetTrackbarPos(const char* name_bar, const char* window_name, int pos)\r
{\r
- QPointer<CvTrackbar> t = icvFindTrackBarByName(name_bar, window_name);\r
+ QPointer<CvTrackbar> t = icvFindTrackBarByName(name_bar, window_name);\r
\r
- if (t)\r
- t->slider->setValue(pos);\r
+ if (t)\r
+ t->slider->setValue(pos);\r
}\r
\r
\r
/* assign callback for mouse events */\r
CV_IMPL void cvSetMouseCallback(const char* window_name, CvMouseCallback on_mouse, void* param)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(QLatin1String(window_name));\r
+ QPointer<CvWindow> w = icvFindWindowByName(QLatin1String(window_name));\r
\r
- if (!w)\r
- CV_Error(CV_StsNullPtr, "NULL window handler");\r
+ if (!w)\r
+ CV_Error(CV_StsNullPtr, "NULL window handler");\r
\r
- w->setMouseCallBack(on_mouse, param);\r
+ w->setMouseCallBack(on_mouse, param);\r
\r
}\r
\r
\r
CV_IMPL void cvShowImage(const char* name, const CvArr* arr)\r
{\r
- if (!guiMainThread)\r
- guiMainThread = new GuiReceiver;\r
+ if (!guiMainThread)\r
+ guiMainThread = new GuiReceiver;\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "showImage",\r
- //Qt::BlockingQueuedConnection,\r
- Qt::DirectConnection,\r
- Q_ARG(QString, QString(name)),\r
- Q_ARG(void*, (void*)arr));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "showImage",\r
+ //Qt::BlockingQueuedConnection,\r
+ Qt::DirectConnection,\r
+ Q_ARG(QString, QString(name)),\r
+ Q_ARG(void*, (void*)arr));\r
}\r
\r
\r
\r
CV_IMPL void cvSetOpenGlDrawCallback(const char* window_name, CvOpenGlDrawCallback callback, void* userdata)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "setOpenGlDrawCallback",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(window_name)),\r
- Q_ARG(void*, (void*)callback),\r
- Q_ARG(void*, userdata));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "setOpenGlDrawCallback",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(window_name)),\r
+ Q_ARG(void*, (void*)callback),\r
+ Q_ARG(void*, userdata));\r
}\r
\r
\r
void icvSetOpenGlCleanCallback(const char* window_name, CvOpenGlCleanCallback callback, void* userdata)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "setOpenGlCleanCallback",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(window_name)),\r
- Q_ARG(void*, (void*)callback),\r
- Q_ARG(void*, userdata));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "setOpenGlCleanCallback",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(window_name)),\r
+ Q_ARG(void*, (void*)callback),\r
+ Q_ARG(void*, userdata));\r
}\r
\r
\r
CV_IMPL void cvSetOpenGlContext(const char* window_name)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "setOpenGlContext",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(window_name)));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "setOpenGlContext",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(window_name)));\r
}\r
\r
\r
CV_IMPL void cvUpdateWindow(const char* window_name)\r
{\r
- if (!guiMainThread)\r
- CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+ if (!guiMainThread)\r
+ CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "updateWindow",\r
- Qt::AutoConnection,\r
- Q_ARG(QString, QString(window_name)));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "updateWindow",\r
+ Qt::AutoConnection,\r
+ Q_ARG(QString, QString(window_name)));\r
}\r
\r
#endif\r
\r
double cvGetOpenGlProp_QT(const char* name)\r
{\r
- double result = -1;\r
+ double result = -1;\r
\r
- if (guiMainThread)\r
+ if (guiMainThread)\r
{\r
- QMetaObject::invokeMethod(guiMainThread,\r
- "isOpenGl",\r
- Qt::AutoConnection,\r
- Q_RETURN_ARG(double, result),\r
- Q_ARG(QString, QString(name)));\r
+ QMetaObject::invokeMethod(guiMainThread,\r
+ "isOpenGl",\r
+ Qt::AutoConnection,\r
+ Q_RETURN_ARG(double, result),\r
+ Q_ARG(QString, QString(name)));\r
}\r
\r
- return result;\r
+ return result;\r
}\r
\r
\r
\r
GuiReceiver::GuiReceiver() : bTimeOut(false), nb_windows(0)\r
{\r
- doesExternalQAppExist = (QApplication::instance() != 0);\r
- icvInitSystem(¶meterSystemC, parameterSystemV);\r
+ doesExternalQAppExist = (QApplication::instance() != 0);\r
+ icvInitSystem(¶meterSystemC, parameterSystemV);\r
\r
- timer = new QTimer(this);\r
- QObject::connect(timer, SIGNAL(timeout()), this, SLOT(timeOut()));\r
- timer->setSingleShot(true);\r
+ timer = new QTimer(this);\r
+ QObject::connect(timer, SIGNAL(timeout()), this, SLOT(timeOut()));\r
+ timer->setSingleShot(true);\r
}\r
\r
\r
void GuiReceiver::isLastWindow()\r
{\r
- if (--nb_windows <= 0)\r
- {\r
- delete guiMainThread;//delete global_control_panel too\r
- guiMainThread = NULL;\r
+ if (--nb_windows <= 0)\r
+ {\r
+ delete guiMainThread;//delete global_control_panel too\r
+ guiMainThread = NULL;\r
\r
- if (!doesExternalQAppExist)\r
- {\r
- qApp->quit();\r
- }\r
- }\r
+ if (!doesExternalQAppExist)\r
+ {\r
+ qApp->quit();\r
+ }\r
+ }\r
}\r
\r
\r
GuiReceiver::~GuiReceiver()\r
-{ \r
- if (global_control_panel)\r
- {\r
- delete global_control_panel;\r
- global_control_panel = NULL;\r
- }\r
+{\r
+ if (global_control_panel)\r
+ {\r
+ delete global_control_panel;\r
+ global_control_panel = NULL;\r
+ }\r
}\r
\r
\r
void GuiReceiver::putText(void* arr, QString text, QPoint org, void* arg2)\r
{\r
- CV_Assert(arr);\r
+ CV_Assert(arr);\r
\r
- CvMat* mat, stub;\r
- mat = cvGetMat(arr, &stub);\r
+ CvMat* mat, stub;\r
+ mat = cvGetMat(arr, &stub);\r
\r
- int nbChannelOriginImage = cvGetElemType(mat);\r
- if (nbChannelOriginImage != CV_8UC3) return; //for now, font works only with 8UC3\r
+ int nbChannelOriginImage = cvGetElemType(mat);\r
+ if (nbChannelOriginImage != CV_8UC3) return; //for now, font works only with 8UC3\r
\r
- QImage qimg(mat->data.ptr, mat->cols, mat->rows, mat->step, QImage::Format_RGB888);\r
+ QImage qimg(mat->data.ptr, mat->cols, mat->rows, mat->step, QImage::Format_RGB888);\r
\r
- CvFont* font = (CvFont*)arg2;\r
+ CvFont* font = (CvFont*)arg2;\r
\r
- QPainter qp(&qimg);\r
- if (font)\r
- {\r
- QFont f(font->nameFont, font->line_type/*PointSize*/, font->thickness/*weight*/);\r
- f.setStyle((QFont::Style) font->font_face/*style*/);\r
- f.setLetterSpacing(QFont::AbsoluteSpacing, font->dx/*spacing*/);\r
- //cvScalar(blue_component, green_component, red_component[, alpha_component])\r
- //Qt map non-transparent to 0xFF and transparent to 0\r
- //OpenCV scalar is the reverse, so 255-font->color.val[3]\r
- qp.setPen(QColor(font->color.val[2], font->color.val[1], font->color.val[0], 255 - font->color.val[3]));\r
- qp.setFont(f);\r
- }\r
- qp.drawText(org, text);\r
- qp.end();\r
+ QPainter qp(&qimg);\r
+ if (font)\r
+ {\r
+ QFont f(font->nameFont, font->line_type/*PointSize*/, font->thickness/*weight*/);\r
+ f.setStyle((QFont::Style) font->font_face/*style*/);\r
+ f.setLetterSpacing(QFont::AbsoluteSpacing, font->dx/*spacing*/);\r
+ //cvScalar(blue_component, green_component, red_component[, alpha_component])\r
+ //Qt map non-transparent to 0xFF and transparent to 0\r
+ //OpenCV scalar is the reverse, so 255-font->color.val[3]\r
+ qp.setPen(QColor(font->color.val[2], font->color.val[1], font->color.val[0], 255 - font->color.val[3]));\r
+ qp.setFont(f);\r
+ }\r
+ qp.drawText(org, text);\r
+ qp.end();\r
}\r
\r
\r
void GuiReceiver::saveWindowParameters(QString name)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- w->writeSettings();\r
+ if (w)\r
+ w->writeSettings();\r
}\r
\r
\r
void GuiReceiver::loadWindowParameters(QString name)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- w->readSettings();\r
+ if (w)\r
+ w->readSettings();\r
}\r
\r
\r
double GuiReceiver::getRatioWindow(QString name)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (!w)\r
- return -1;\r
+ if (!w)\r
+ return -1;\r
\r
- return w->getRatio();\r
+ return w->getRatio();\r
}\r
\r
\r
void GuiReceiver::setRatioWindow(QString name, double arg2)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName( name.toLatin1().data() );\r
+ QPointer<CvWindow> w = icvFindWindowByName( name.toLatin1().data() );\r
+\r
+ if (!w)\r
+ return;\r
\r
- if (!w)\r
- return;\r
- \r
- int flags = (int) arg2;\r
+ int flags = (int) arg2;\r
\r
w->setRatio(flags);\r
}\r
\r
double GuiReceiver::getPropWindow(QString name)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (!w)\r
- return -1;\r
+ if (!w)\r
+ return -1;\r
\r
- return (double) w->getPropWindow();\r
+ return (double) w->getPropWindow();\r
}\r
\r
\r
void GuiReceiver::setPropWindow(QString name, double arg2)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (!w)\r
- return;\r
+ if (!w)\r
+ return;\r
\r
- int flags = (int) arg2;\r
+ int flags = (int) arg2;\r
\r
w->setPropWindow(flags);\r
}\r
\r
double GuiReceiver::isFullScreen(QString name)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (!w)\r
- return -1;\r
+ if (!w)\r
+ return -1;\r
\r
return w->isFullScreen() ? CV_WINDOW_FULLSCREEN : CV_WINDOW_NORMAL;\r
}\r
\r
void GuiReceiver::toggleFullScreen(QString name, double arg2)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (!w)\r
- return;\r
+ if (!w)\r
+ return;\r
\r
- int flags = (int) arg2;\r
+ int flags = (int) arg2;\r
\r
w->toggleFullScreen(flags);\r
}\r
\r
void GuiReceiver::createWindow(QString name, int flags)\r
{\r
- if (!qApp)\r
- CV_Error(CV_StsNullPtr, "NULL session handler" );\r
+ if (!qApp)\r
+ CV_Error(CV_StsNullPtr, "NULL session handler" );\r
\r
- // Check the name in the storage\r
- if (icvFindWindowByName(name.toLatin1().data()))\r
- {\r
- return;\r
- }\r
+ // Check the name in the storage\r
+ if (icvFindWindowByName(name.toLatin1().data()))\r
+ {\r
+ return;\r
+ }\r
\r
- nb_windows++;\r
- new CvWindow(name, flags);\r
+ nb_windows++;\r
+ new CvWindow(name, flags);\r
}\r
\r
\r
void GuiReceiver::timeOut()\r
{\r
- bTimeOut = true;\r
+ bTimeOut = true;\r
}\r
\r
\r
void GuiReceiver::displayInfo(QString name, QString text, int delayms)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- w->displayInfo(text, delayms);\r
+ if (w)\r
+ w->displayInfo(text, delayms);\r
}\r
\r
\r
void GuiReceiver::displayStatusBar(QString name, QString text, int delayms)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- w->displayStatusBar(text, delayms);\r
+ if (w)\r
+ w->displayStatusBar(text, delayms);\r
}\r
\r
\r
void GuiReceiver::showImage(QString name, void* arr)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (!w) //as observed in the previous implementation (W32, GTK or Carbon), create a new window is the pointer returned is null\r
- {\r
- cvNamedWindow(name.toLatin1().data());\r
- w = icvFindWindowByName(name);\r
- }\r
+ if (!w) //as observed in the previous implementation (W32, GTK or Carbon), create a new window is the pointer returned is null\r
+ {\r
+ cvNamedWindow(name.toLatin1().data());\r
+ w = icvFindWindowByName(name);\r
+ }\r
\r
if (!w || !arr)\r
return; // keep silence here.\r
{\r
CvMat* mat, stub;\r
\r
- mat = cvGetMat(arr, &stub);\r
+ mat = cvGetMat(arr, &stub);\r
\r
cv::Mat im(mat);\r
cv::imshow(name.toStdString(), im);\r
}\r
else\r
{\r
- w->updateImage(arr);\r
+ w->updateImage(arr);\r
}\r
\r
- if (w->isHidden())\r
- w->show();\r
+ if (w->isHidden())\r
+ w->show();\r
}\r
\r
\r
void GuiReceiver::destroyWindow(QString name)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- {\r
- w->close();\r
+ if (w)\r
+ {\r
+ w->close();\r
\r
- //in not-multiThreads mode, looks like the window is hidden but not deleted\r
- //so I do it manually\r
- //otherwise QApplication do it for me if the exec command was executed (in multiThread mode)\r
- if (!multiThreads)\r
- delete w;\r
- }\r
+ //in not-multiThreads mode, looks like the window is hidden but not deleted\r
+ //so I do it manually\r
+ //otherwise QApplication do it for me if the exec command was executed (in multiThread mode)\r
+ if (!multiThreads)\r
+ delete w;\r
+ }\r
}\r
\r
\r
void GuiReceiver::destroyAllWindow()\r
{\r
- if (!qApp)\r
- CV_Error(CV_StsNullPtr, "NULL session handler" );\r
-\r
- if (multiThreads)\r
- {\r
- // WARNING: this could even close windows from an external parent app\r
- //#TODO check externalQAppExists and in case it does, close windows carefully,\r
- // i.e. apply the className-check from below...\r
- qApp->closeAllWindows();\r
- }\r
- else\r
- {\r
- bool isWidgetDeleted = true;\r
- while(isWidgetDeleted)\r
- {\r
- isWidgetDeleted = false;\r
- QWidgetList list = QApplication::topLevelWidgets();\r
- for (int i = 0; i < list.count(); i++)\r
- {\r
- QObject *obj = list.at(i);\r
- if (obj->metaObject()->className() == QString("CvWindow"))\r
- {\r
- delete obj;\r
- isWidgetDeleted = true;\r
- break;\r
- }\r
- }\r
- }\r
- }\r
+ if (!qApp)\r
+ CV_Error(CV_StsNullPtr, "NULL session handler" );\r
+\r
+ if (multiThreads)\r
+ {\r
+ // WARNING: this could even close windows from an external parent app\r
+ //#TODO check externalQAppExists and in case it does, close windows carefully,\r
+ // i.e. apply the className-check from below...\r
+ qApp->closeAllWindows();\r
+ }\r
+ else\r
+ {\r
+ bool isWidgetDeleted = true;\r
+ while(isWidgetDeleted)\r
+ {\r
+ isWidgetDeleted = false;\r
+ QWidgetList list = QApplication::topLevelWidgets();\r
+ for (int i = 0; i < list.count(); i++)\r
+ {\r
+ QObject *obj = list.at(i);\r
+ if (obj->metaObject()->className() == QString("CvWindow"))\r
+ {\r
+ delete obj;\r
+ isWidgetDeleted = true;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ }\r
}\r
\r
\r
void GuiReceiver::moveWindow(QString name, int x, int y)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- w->move(x, y);\r
+ if (w)\r
+ w->move(x, y);\r
}\r
\r
\r
void GuiReceiver::resizeWindow(QString name, int width, int height)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- {\r
- w->showNormal();\r
+ if (w)\r
+ {\r
+ w->showNormal();\r
w->setViewportSize(QSize(width, height));\r
- }\r
+ }\r
}\r
\r
\r
void GuiReceiver::enablePropertiesButtonEachWindow()\r
{\r
- //For each window, enable window property button\r
- foreach (QWidget* widget, QApplication::topLevelWidgets())\r
- {\r
- if (widget->isWindow() && !widget->parentWidget()) //is a window without parent\r
- {\r
- CvWinModel* temp = (CvWinModel*) widget;\r
- if (temp->type == type_CvWindow)\r
- {\r
- CvWindow* w = (CvWindow*) widget;\r
+ //For each window, enable window property button\r
+ foreach (QWidget* widget, QApplication::topLevelWidgets())\r
+ {\r
+ if (widget->isWindow() && !widget->parentWidget()) //is a window without parent\r
+ {\r
+ CvWinModel* temp = (CvWinModel*) widget;\r
+ if (temp->type == type_CvWindow)\r
+ {\r
+ CvWindow* w = (CvWindow*) widget;\r
\r
- //active window properties button\r
- w->enablePropertiesButton();\r
- }\r
- }\r
- }\r
+ //active window properties button\r
+ w->enablePropertiesButton();\r
+ }\r
+ }\r
+ }\r
}\r
\r
\r
void GuiReceiver::addButton(QString button_name, int button_type, int initial_button_state, void* on_change, void* userdata)\r
{\r
- if (!global_control_panel)\r
- return;\r
+ if (!global_control_panel)\r
+ return;\r
\r
- QPointer<CvButtonbar> b;\r
+ QPointer<CvButtonbar> b;\r
\r
- if (global_control_panel->myLayout->count() == 0) //if that is the first button attach to the control panel, create a new button bar\r
- {\r
- b = CvWindow::createButtonBar(button_name); //the bar has the name of the first button attached to it\r
- enablePropertiesButtonEachWindow();\r
+ if (global_control_panel->myLayout->count() == 0) //if that is the first button attach to the control panel, create a new button bar\r
+ {\r
+ b = CvWindow::createButtonBar(button_name); //the bar has the name of the first button attached to it\r
+ enablePropertiesButtonEachWindow();\r
\r
- }\r
+ }\r
else\r
{\r
- CvBar* lastbar = (CvBar*) global_control_panel->myLayout->itemAt(global_control_panel->myLayout->count() - 1);\r
+ CvBar* lastbar = (CvBar*) global_control_panel->myLayout->itemAt(global_control_panel->myLayout->count() - 1);\r
\r
- if (lastbar->type == type_CvTrackbar) //if last bar is a trackbar, create a new buttonbar, else, attach to the current bar\r
- b = CvWindow::createButtonBar(button_name); //the bar has the name of the first button attached to it\r
- else\r
- b = (CvButtonbar*) lastbar;\r
+ if (lastbar->type == type_CvTrackbar) //if last bar is a trackbar, create a new buttonbar, else, attach to the current bar\r
+ b = CvWindow::createButtonBar(button_name); //the bar has the name of the first button attached to it\r
+ else\r
+ b = (CvButtonbar*) lastbar;\r
\r
- }\r
+ }\r
\r
- b->addButton(button_name, (CvButtonCallback) on_change, userdata, button_type, initial_button_state);\r
+ b->addButton(button_name, (CvButtonCallback) on_change, userdata, button_type, initial_button_state);\r
}\r
\r
\r
void GuiReceiver::addSlider2(QString bar_name, QString window_name, void* value, int count, void* on_change, void *userdata)\r
{\r
- QBoxLayout *layout = NULL;\r
- QPointer<CvWindow> w;\r
+ QBoxLayout *layout = NULL;\r
+ QPointer<CvWindow> w;\r
\r
if (!window_name.isEmpty())\r
- {\r
- w = icvFindWindowByName(window_name);\r
+ {\r
+ w = icvFindWindowByName(window_name);\r
\r
- if (!w)\r
- return;\r
- }\r
+ if (!w)\r
+ return;\r
+ }\r
else\r
{\r
- if (global_control_panel)\r
- layout = global_control_panel->myLayout;\r
- }\r
+ if (global_control_panel)\r
+ layout = global_control_panel->myLayout;\r
+ }\r
\r
- QPointer<CvTrackbar> t = icvFindTrackBarByName(bar_name.toLatin1().data(), window_name.toLatin1().data(), layout);\r
+ QPointer<CvTrackbar> t = icvFindTrackBarByName(bar_name.toLatin1().data(), window_name.toLatin1().data(), layout);\r
\r
- if (t) //trackbar exists\r
- return;\r
+ if (t) //trackbar exists\r
+ return;\r
\r
- if (!value)\r
- CV_Error(CV_StsNullPtr, "NULL value pointer" );\r
+ if (!value)\r
+ CV_Error(CV_StsNullPtr, "NULL value pointer" );\r
\r
- if (count <= 0) //count is the max value of the slider, so must be bigger than 0\r
- CV_Error(CV_StsNullPtr, "Max value of the slider must be bigger than 0" );\r
+ if (count <= 0) //count is the max value of the slider, so must be bigger than 0\r
+ CV_Error(CV_StsNullPtr, "Max value of the slider must be bigger than 0" );\r
\r
- CvWindow::addSlider2(w, bar_name, (int*)value, count, (CvTrackbarCallback2) on_change, userdata);\r
+ CvWindow::addSlider2(w, bar_name, (int*)value, count, (CvTrackbarCallback2) on_change, userdata);\r
}\r
\r
\r
void GuiReceiver::addSlider(QString bar_name, QString window_name, void* value, int count, void* on_change)\r
{\r
- QBoxLayout *layout = NULL;\r
- QPointer<CvWindow> w;\r
+ QBoxLayout *layout = NULL;\r
+ QPointer<CvWindow> w;\r
\r
- if (!window_name.isEmpty())\r
- {\r
- w = icvFindWindowByName(window_name);\r
+ if (!window_name.isEmpty())\r
+ {\r
+ w = icvFindWindowByName(window_name);\r
\r
- if (!w)\r
- return;\r
- }\r
+ if (!w)\r
+ return;\r
+ }\r
else\r
{\r
- if (global_control_panel)\r
- layout = global_control_panel->myLayout;\r
- }\r
+ if (global_control_panel)\r
+ layout = global_control_panel->myLayout;\r
+ }\r
\r
- QPointer<CvTrackbar> t = icvFindTrackBarByName(bar_name.toLatin1().data(), window_name.toLatin1().data(), layout);\r
+ QPointer<CvTrackbar> t = icvFindTrackBarByName(bar_name.toLatin1().data(), window_name.toLatin1().data(), layout);\r
\r
- if (t) //trackbar exists\r
- return;\r
+ if (t) //trackbar exists\r
+ return;\r
\r
- if (!value)\r
- CV_Error(CV_StsNullPtr, "NULL value pointer" );\r
+ if (!value)\r
+ CV_Error(CV_StsNullPtr, "NULL value pointer" );\r
\r
- if (count <= 0) //count is the max value of the slider, so must be bigger than 0\r
- CV_Error(CV_StsNullPtr, "Max value of the slider must be bigger than 0" );\r
+ if (count <= 0) //count is the max value of the slider, so must be bigger than 0\r
+ CV_Error(CV_StsNullPtr, "Max value of the slider must be bigger than 0" );\r
\r
- CvWindow::addSlider(w, bar_name, (int*)value, count, (CvTrackbarCallback) on_change);\r
+ CvWindow::addSlider(w, bar_name, (int*)value, count, (CvTrackbarCallback) on_change);\r
}\r
\r
\r
int GuiReceiver::start()\r
{\r
- return qApp->exec();\r
+ return qApp->exec();\r
}\r
\r
\r
void GuiReceiver::setOpenGlDrawCallback(QString name, void* callback, void* userdata)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- w->setOpenGlDrawCallback((CvOpenGlDrawCallback) callback, userdata);\r
+ if (w)\r
+ w->setOpenGlDrawCallback((CvOpenGlDrawCallback) callback, userdata);\r
}\r
\r
void GuiReceiver::setOpenGlCleanCallback(QString name, void* callback, void* userdata)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- w->setOpenGlCleanCallback((CvOpenGlCleanCallback) callback, userdata);\r
+ if (w)\r
+ w->setOpenGlCleanCallback((CvOpenGlCleanCallback) callback, userdata);\r
}\r
\r
void GuiReceiver::setOpenGlContext(QString name)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- w->makeCurrentOpenGlContext();\r
+ if (w)\r
+ w->makeCurrentOpenGlContext();\r
}\r
\r
void GuiReceiver::updateWindow(QString name)\r
{\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- w->updateGl();\r
+ if (w)\r
+ w->updateGl();\r
}\r
\r
double GuiReceiver::isOpenGl(QString name)\r
{\r
double result = -1;\r
\r
- QPointer<CvWindow> w = icvFindWindowByName(name);\r
+ QPointer<CvWindow> w = icvFindWindowByName(name);\r
\r
- if (w)\r
- result = (double) w->isOpenGl();\r
+ if (w)\r
+ result = (double) w->isOpenGl();\r
\r
return result;\r
}\r
// CvTrackbar\r
\r
\r
-CvTrackbar::CvTrackbar(CvWindow* arg, QString name, int* value, int count, CvTrackbarCallback2 on_change, void* data)\r
+CvTrackbar::CvTrackbar(CvWindow* arg, QString name, int* value, int _count, CvTrackbarCallback2 on_change, void* data)\r
{\r
- callback = NULL;\r
- callback2 = on_change;\r
- userdata = data;\r
+ callback = NULL;\r
+ callback2 = on_change;\r
+ userdata = data;\r
\r
- create(arg, name, value, count);\r
+ create(arg, name, value, _count);\r
}\r
\r
\r
-CvTrackbar::CvTrackbar(CvWindow* arg, QString name, int* value, int count, CvTrackbarCallback on_change)\r
+CvTrackbar::CvTrackbar(CvWindow* arg, QString name, int* value, int _count, CvTrackbarCallback on_change)\r
{\r
- callback = on_change;\r
- callback2 = NULL;\r
- userdata = NULL;\r
+ callback = on_change;\r
+ callback2 = NULL;\r
+ userdata = NULL;\r
\r
- create(arg, name, value, count);\r
+ create(arg, name, value, _count);\r
}\r
\r
\r
-void CvTrackbar::create(CvWindow* arg, QString name, int* value, int count)\r
+void CvTrackbar::create(CvWindow* arg, QString name, int* value, int _count)\r
{\r
- type = type_CvTrackbar;\r
- myparent = arg;\r
- name_bar = name;\r
- setObjectName(name_bar);\r
- dataSlider = value;\r
+ type = type_CvTrackbar;\r
+ myparent = arg;\r
+ name_bar = name;\r
+ setObjectName(name_bar);\r
+ dataSlider = value;\r
\r
- slider = new QSlider(Qt::Horizontal);\r
- slider->setFocusPolicy(Qt::StrongFocus);\r
- slider->setMinimum(0);\r
- slider->setMaximum(count);\r
- slider->setPageStep(5);\r
- slider->setValue(*value);\r
- slider->setTickPosition(QSlider::TicksBelow);\r
+ slider = new QSlider(Qt::Horizontal);\r
+ slider->setFocusPolicy(Qt::StrongFocus);\r
+ slider->setMinimum(0);\r
+ slider->setMaximum(_count);\r
+ slider->setPageStep(5);\r
+ slider->setValue(*value);\r
+ slider->setTickPosition(QSlider::TicksBelow);\r
\r
\r
- //Change style of the Slider\r
- //slider->setStyleSheet(str_Trackbar_css);\r
+ //Change style of the Slider\r
+ //slider->setStyleSheet(str_Trackbar_css);\r
\r
- QFile qss(":/stylesheet-trackbar");\r
- if (qss.open(QFile::ReadOnly))\r
- {\r
- slider->setStyleSheet(QLatin1String(qss.readAll()));\r
- qss.close();\r
- }\r
+ QFile qss(":/stylesheet-trackbar");\r
+ if (qss.open(QFile::ReadOnly))\r
+ {\r
+ slider->setStyleSheet(QLatin1String(qss.readAll()));\r
+ qss.close();\r
+ }\r
\r
\r
- //this next line does not work if we change the style with a stylesheet, why ? (bug in QT ?)\r
- //slider->setTickPosition(QSlider::TicksBelow);\r
- label = new QPushButton;\r
- label->setFlat(true);\r
- setLabel(slider->value());\r
+ //this next line does not work if we change the style with a stylesheet, why ? (bug in QT ?)\r
+ //slider->setTickPosition(QSlider::TicksBelow);\r
+ label = new QPushButton;\r
+ label->setFlat(true);\r
+ setLabel(slider->value());\r
\r
\r
- QObject::connect(slider, SIGNAL(valueChanged(int)), this, SLOT(update(int)));\r
+ QObject::connect(slider, SIGNAL(valueChanged(int)), this, SLOT(update(int)));\r
\r
- QObject::connect(label, SIGNAL(clicked()), this, SLOT(createDialog()));\r
+ QObject::connect(label, SIGNAL(clicked()), this, SLOT(createDialog()));\r
\r
- //label->setStyleSheet("QPushButton:disabled {color: black}");\r
+ //label->setStyleSheet("QPushButton:disabled {color: black}");\r
\r
- addWidget(label, Qt::AlignLeft);//name + value\r
- addWidget(slider, Qt::AlignCenter);//slider\r
+ addWidget(label, Qt::AlignLeft);//name + value\r
+ addWidget(slider, Qt::AlignCenter);//slider\r
}\r
\r
\r
void CvTrackbar::createDialog()\r
{\r
- bool ok = false;\r
+ bool ok = false;\r
\r
- //crash if I access the values directly and give them to QInputDialog, so do a copy first.\r
- int value = slider->value();\r
- int step = slider->singleStep();\r
- int min = slider->minimum();\r
- int max = slider->maximum();\r
+ //crash if I access the values directly and give them to QInputDialog, so do a copy first.\r
+ int value = slider->value();\r
+ int step = slider->singleStep();\r
+ int min = slider->minimum();\r
+ int max = slider->maximum();\r
\r
- int i =\r
+ int i =\r
#if QT_VERSION >= 0x040500\r
- QInputDialog::getInt\r
+ QInputDialog::getInt\r
#else\r
- QInputDialog::getInteger\r
+ QInputDialog::getInteger\r
#endif\r
- (this->parentWidget(),\r
- tr("Slider %1").arg(name_bar),\r
- tr("New value:"),\r
- value,\r
- min,\r
- max,\r
- step,\r
- &ok);\r
+ (this->parentWidget(),\r
+ tr("Slider %1").arg(name_bar),\r
+ tr("New value:"),\r
+ value,\r
+ min,\r
+ max,\r
+ step,\r
+ &ok);\r
\r
- if (ok)\r
- slider->setValue(i);\r
+ if (ok)\r
+ slider->setValue(i);\r
}\r
\r
\r
void CvTrackbar::update(int myvalue)\r
{\r
- setLabel(myvalue);\r
+ setLabel(myvalue);\r
\r
- *dataSlider = myvalue;\r
- if (callback)\r
- {\r
- callback(myvalue);\r
- return;\r
- }\r
+ *dataSlider = myvalue;\r
+ if (callback)\r
+ {\r
+ callback(myvalue);\r
+ return;\r
+ }\r
\r
- if (callback2)\r
- {\r
- callback2(myvalue, userdata);\r
- return;\r
- }\r
+ if (callback2)\r
+ {\r
+ callback2(myvalue, userdata);\r
+ return;\r
+ }\r
}\r
\r
\r
void CvTrackbar::setLabel(int myvalue)\r
{\r
- QString nameNormalized = name_bar.leftJustified( 10, ' ', true );\r
- QString valueMaximum = QString("%1").arg(slider->maximum());\r
- QString str = QString("%1 (%2/%3)").arg(nameNormalized).arg(myvalue,valueMaximum.length(),10,QChar('0')).arg(valueMaximum);\r
- label->setText(str);\r
+ QString nameNormalized = name_bar.leftJustified( 10, ' ', true );\r
+ QString valueMaximum = QString("%1").arg(slider->maximum());\r
+ QString str = QString("%1 (%2/%3)").arg(nameNormalized).arg(myvalue,valueMaximum.length(),10,QChar('0')).arg(valueMaximum);\r
+ label->setText(str);\r
}\r
\r
\r
//here CvButtonbar class\r
CvButtonbar::CvButtonbar(QWidget* arg, QString arg2)\r
{\r
- type = type_CvButtonbar;\r
- myparent = arg;\r
- name_bar = arg2;\r
- setObjectName(name_bar);\r
+ type = type_CvButtonbar;\r
+ myparent = arg;\r
+ name_bar = arg2;\r
+ setObjectName(name_bar);\r
\r
- group_button = new QButtonGroup(this);\r
+ group_button = new QButtonGroup(this);\r
}\r
\r
\r
void CvButtonbar::setLabel()\r
{\r
- QString nameNormalized = name_bar.leftJustified(10, ' ', true);\r
- label->setText(nameNormalized);\r
+ QString nameNormalized = name_bar.leftJustified(10, ' ', true);\r
+ label->setText(nameNormalized);\r
}\r
\r
\r
void CvButtonbar::addButton(QString name, CvButtonCallback call, void* userdata, int button_type, int initial_button_state)\r
{\r
- QString button_name = name;\r
+ QString button_name = name;\r
\r
- if (button_name == "")\r
- button_name = tr("button %1").arg(this->count());\r
+ if (button_name == "")\r
+ button_name = tr("button %1").arg(this->count());\r
\r
- QPointer<QAbstractButton> button;\r
+ QPointer<QAbstractButton> button;\r
\r
- if (button_type == CV_PUSH_BUTTON)\r
- button = (QAbstractButton*) new CvPushButton(this, button_name,call, userdata);\r
+ if (button_type == CV_PUSH_BUTTON)\r
+ button = (QAbstractButton*) new CvPushButton(this, button_name,call, userdata);\r
\r
- if (button_type == CV_CHECKBOX)\r
- button = (QAbstractButton*) new CvCheckBox(this, button_name,call, userdata, initial_button_state);\r
+ if (button_type == CV_CHECKBOX)\r
+ button = (QAbstractButton*) new CvCheckBox(this, button_name,call, userdata, initial_button_state);\r
\r
- if (button_type == CV_RADIOBOX)\r
- {\r
- button = (QAbstractButton*) new CvRadioButton(this, button_name,call, userdata, initial_button_state);\r
- group_button->addButton(button);\r
- }\r
+ if (button_type == CV_RADIOBOX)\r
+ {\r
+ button = (QAbstractButton*) new CvRadioButton(this, button_name,call, userdata, initial_button_state);\r
+ group_button->addButton(button);\r
+ }\r
\r
- if (button)\r
- {\r
- if (button_type == CV_PUSH_BUTTON)\r
- QObject::connect(button, SIGNAL(clicked(bool)), button, SLOT(callCallBack(bool)));\r
- else \r
- QObject::connect(button, SIGNAL(toggled(bool)), button, SLOT(callCallBack(bool)));\r
+ if (button)\r
+ {\r
+ if (button_type == CV_PUSH_BUTTON)\r
+ QObject::connect(button, SIGNAL(clicked(bool)), button, SLOT(callCallBack(bool)));\r
+ else\r
+ QObject::connect(button, SIGNAL(toggled(bool)), button, SLOT(callCallBack(bool)));\r
\r
- addWidget(button, Qt::AlignCenter);\r
- }\r
+ addWidget(button, Qt::AlignCenter);\r
+ }\r
}\r
\r
\r
//buttons here\r
CvPushButton::CvPushButton(CvButtonbar* arg1, QString arg2, CvButtonCallback arg3, void* arg4)\r
{\r
- myparent = arg1;\r
- button_name = arg2;\r
- callback = arg3;\r
- userdata = arg4;\r
+ myparent = arg1;\r
+ button_name = arg2;\r
+ callback = arg3;\r
+ userdata = arg4;\r
\r
- setObjectName(button_name);\r
- setText(button_name);\r
+ setObjectName(button_name);\r
+ setText(button_name);\r
\r
- if (isChecked())\r
- callCallBack(true);\r
+ if (isChecked())\r
+ callCallBack(true);\r
}\r
\r
\r
void CvPushButton::callCallBack(bool checked)\r
{\r
- if (callback)\r
- callback(checked, userdata);\r
+ if (callback)\r
+ callback(checked, userdata);\r
}\r
\r
\r
CvCheckBox::CvCheckBox(CvButtonbar* arg1, QString arg2, CvButtonCallback arg3, void* arg4, int initial_button_state)\r
{\r
- myparent = arg1;\r
- button_name = arg2;\r
- callback = arg3;\r
- userdata = arg4;\r
+ myparent = arg1;\r
+ button_name = arg2;\r
+ callback = arg3;\r
+ userdata = arg4;\r
\r
- setObjectName(button_name);\r
- setCheckState((initial_button_state == 1 ? Qt::Checked : Qt::Unchecked));\r
- setText(button_name);\r
+ setObjectName(button_name);\r
+ setCheckState((initial_button_state == 1 ? Qt::Checked : Qt::Unchecked));\r
+ setText(button_name);\r
\r
- if (isChecked())\r
- callCallBack(true);\r
+ if (isChecked())\r
+ callCallBack(true);\r
}\r
\r
\r
void CvCheckBox::callCallBack(bool checked)\r
{\r
- if (callback)\r
- callback(checked, userdata);\r
+ if (callback)\r
+ callback(checked, userdata);\r
}\r
\r
\r
CvRadioButton::CvRadioButton(CvButtonbar* arg1, QString arg2, CvButtonCallback arg3, void* arg4, int initial_button_state)\r
{\r
- myparent = arg1;\r
- button_name = arg2;\r
- callback = arg3;\r
- userdata = arg4;\r
+ myparent = arg1;\r
+ button_name = arg2;\r
+ callback = arg3;\r
+ userdata = arg4;\r
\r
- setObjectName(button_name);\r
- setChecked(initial_button_state);\r
- setText(button_name);\r
+ setObjectName(button_name);\r
+ setChecked(initial_button_state);\r
+ setText(button_name);\r
\r
- if (isChecked())\r
- callCallBack(true);\r
+ if (isChecked())\r
+ callCallBack(true);\r
}\r
\r
void CvRadioButton::callCallBack(bool checked)\r
{\r
- if (callback)\r
- callback(checked, userdata);\r
+ if (callback)\r
+ callback(checked, userdata);\r
}\r
\r
\r
\r
\r
//here CvWinProperties class\r
-CvWinProperties::CvWinProperties(QString name_paraWindow, QObject* parent)\r
+CvWinProperties::CvWinProperties(QString name_paraWindow, QObject* /*parent*/)\r
{\r
- //setParent(parent);\r
- type = type_CvWinProperties;\r
- setWindowFlags(Qt::Tool);\r
- setContentsMargins(0, 0, 0, 0);\r
- setWindowTitle(name_paraWindow);\r
- setObjectName(name_paraWindow);\r
- resize(100, 50);\r
+ //setParent(parent);\r
+ type = type_CvWinProperties;\r
+ setWindowFlags(Qt::Tool);\r
+ setContentsMargins(0, 0, 0, 0);\r
+ setWindowTitle(name_paraWindow);\r
+ setObjectName(name_paraWindow);\r
+ resize(100, 50);\r
\r
- myLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
- myLayout->setObjectName(QString::fromUtf8("boxLayout"));\r
- myLayout->setContentsMargins(0, 0, 0, 0);\r
- myLayout->setSpacing(0);\r
- myLayout->setMargin(0);\r
- myLayout->setSizeConstraint(QLayout::SetFixedSize);\r
- setLayout(myLayout);\r
+ myLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
+ myLayout->setObjectName(QString::fromUtf8("boxLayout"));\r
+ myLayout->setContentsMargins(0, 0, 0, 0);\r
+ myLayout->setSpacing(0);\r
+ myLayout->setMargin(0);\r
+ myLayout->setSizeConstraint(QLayout::SetFixedSize);\r
+ setLayout(myLayout);\r
\r
- hide();\r
+ hide();\r
}\r
\r
\r
void CvWinProperties::closeEvent(QCloseEvent* e)\r
{\r
- e->accept(); //intersept the close event (not sure I really need it)\r
- //an hide event is also sent. I will intercept it and do some processing\r
+ e->accept(); //intersept the close event (not sure I really need it)\r
+ //an hide event is also sent. I will intercept it and do some processing\r
}\r
\r
\r
-void CvWinProperties::showEvent(QShowEvent* event)\r
+void CvWinProperties::showEvent(QShowEvent* evnt)\r
{\r
- //why -1,-1 ?: do this trick because the first time the code is run,\r
- //no value pos was saved so we let Qt move the window in the middle of its parent (event ignored).\r
- //then hide will save the last position and thus, we want to retreive it (event accepted).\r
- QPoint mypos(-1, -1);\r
- QSettings settings("OpenCV2", windowTitle());\r
- mypos = settings.value("pos", mypos).toPoint();\r
+ //why -1,-1 ?: do this trick because the first time the code is run,\r
+ //no value pos was saved so we let Qt move the window in the middle of its parent (event ignored).\r
+ //then hide will save the last position and thus, we want to retreive it (event accepted).\r
+ QPoint mypos(-1, -1);\r
+ QSettings settings("OpenCV2", windowTitle());\r
+ mypos = settings.value("pos", mypos).toPoint();\r
\r
- if (mypos.x() >= 0)\r
- {\r
- move(mypos);\r
- event->accept();\r
- }\r
- else\r
+ if (mypos.x() >= 0)\r
+ {\r
+ move(mypos);\r
+ evnt->accept();\r
+ }\r
+ else\r
{\r
- event->ignore();\r
- }\r
+ evnt->ignore();\r
+ }\r
}\r
\r
\r
-void CvWinProperties::hideEvent(QHideEvent* event)\r
+void CvWinProperties::hideEvent(QHideEvent* evnt)\r
{\r
- QSettings settings("OpenCV2", windowTitle());\r
- settings.setValue("pos", pos()); //there is an offset of 6 pixels (so the window's position is wrong -- why ?)\r
- event->accept();\r
+ QSettings settings("OpenCV2", windowTitle());\r
+ settings.setValue("pos", pos()); //there is an offset of 6 pixels (so the window's position is wrong -- why ?)\r
+ evnt->accept();\r
}\r
\r
\r
CvWinProperties::~CvWinProperties()\r
{\r
- //clear the setting pos\r
- QSettings settings("OpenCV2", windowTitle());\r
- settings.remove("pos");\r
+ //clear the setting pos\r
+ QSettings settings("OpenCV2", windowTitle());\r
+ settings.remove("pos");\r
}\r
\r
\r
\r
CvWindow::CvWindow(QString name, int arg2)\r
{\r
- type = type_CvWindow;\r
- moveToThread(qApp->instance()->thread());\r
+ type = type_CvWindow;\r
+ moveToThread(qApp->instance()->thread());\r
\r
- param_flags = arg2 & 0x0000000F;\r
- param_gui_mode = arg2 & 0x000000F0;\r
- param_ratio_mode = arg2 & 0x00000F00;\r
+ param_flags = arg2 & 0x0000000F;\r
+ param_gui_mode = arg2 & 0x000000F0;\r
+ param_ratio_mode = arg2 & 0x00000F00;\r
\r
- //setAttribute(Qt::WA_DeleteOnClose); //in other case, does not release memory\r
- setContentsMargins(0, 0, 0, 0);\r
- setWindowTitle(name);\r
+ //setAttribute(Qt::WA_DeleteOnClose); //in other case, does not release memory\r
+ setContentsMargins(0, 0, 0, 0);\r
+ setWindowTitle(name);\r
setObjectName(name);\r
\r
setFocus( Qt::PopupFocusReason ); //#1695 arrow keys are not recieved without the explicit focus\r
\r
- resize(400, 300);\r
- setMinimumSize(1, 1);\r
+ resize(400, 300);\r
+ setMinimumSize(1, 1);\r
\r
- //1: create control panel\r
- if (!global_control_panel)\r
- global_control_panel = createParameterWindow();\r
+ //1: create control panel\r
+ if (!global_control_panel)\r
+ global_control_panel = createParameterWindow();\r
\r
- //2: Layouts\r
- createBarLayout();\r
- createGlobalLayout();\r
+ //2: Layouts\r
+ createBarLayout();\r
+ createGlobalLayout();\r
\r
- //3: my view\r
+ //3: my view\r
#ifndef HAVE_QT_OPENGL\r
if (arg2 & CV_WINDOW_OPENGL)\r
CV_Error( CV_OpenGlNotSupported, "Library was built without OpenGL support" );\r
- mode_display = CV_MODE_NORMAL;\r
+ mode_display = CV_MODE_NORMAL;\r
#else\r
mode_display = arg2 & CV_WINDOW_OPENGL ? CV_MODE_OPENGL : CV_MODE_NORMAL;\r
if (mode_display == CV_MODE_OPENGL)\r
param_gui_mode = CV_GUI_NORMAL;\r
#endif\r
- createView();\r
+ createView();\r
\r
- //4: shortcuts and actions\r
- //5: toolBar and statusbar\r
- if (param_gui_mode == CV_GUI_EXPANDED)\r
- {\r
+ //4: shortcuts and actions\r
+ //5: toolBar and statusbar\r
+ if (param_gui_mode == CV_GUI_EXPANDED)\r
+ {\r
createActions();\r
createShortcuts();\r
\r
- createToolBar();\r
- createStatusBar();\r
- }\r
+ createToolBar();\r
+ createStatusBar();\r
+ }\r
\r
- //Now attach everything\r
- if (myToolBar)\r
- myGlobalLayout->addWidget(myToolBar, Qt::AlignCenter);\r
+ //Now attach everything\r
+ if (myToolBar)\r
+ myGlobalLayout->addWidget(myToolBar, Qt::AlignCenter);\r
\r
- myGlobalLayout->addWidget(myView->getWidget(), Qt::AlignCenter);\r
+ myGlobalLayout->addWidget(myView->getWidget(), Qt::AlignCenter);\r
\r
- myGlobalLayout->addLayout(myBarLayout, Qt::AlignCenter);\r
+ myGlobalLayout->addLayout(myBarLayout, Qt::AlignCenter);\r
\r
- if (myStatusBar)\r
- myGlobalLayout->addWidget(myStatusBar, Qt::AlignCenter);\r
+ if (myStatusBar)\r
+ myGlobalLayout->addWidget(myStatusBar, Qt::AlignCenter);\r
\r
- setLayout(myGlobalLayout);\r
- show();\r
+ setLayout(myGlobalLayout);\r
+ show();\r
}\r
\r
\r
CvWindow::~CvWindow()\r
{\r
- if (guiMainThread)\r
- guiMainThread->isLastWindow();\r
+ if (guiMainThread)\r
+ guiMainThread->isLastWindow();\r
}\r
\r
\r
void CvWindow::setMouseCallBack(CvMouseCallback callback, void* param)\r
{\r
- myView->setMouseCallBack(callback, param);\r
+ myView->setMouseCallBack(callback, param);\r
}\r
\r
\r
void CvWindow::writeSettings()\r
{\r
- //organisation and application's name\r
- QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName());\r
+ //organisation and application's name\r
+ QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName());\r
\r
- settings.setValue("pos", pos());\r
- settings.setValue("size", size());\r
- settings.setValue("mode_resize" ,param_flags);\r
- settings.setValue("mode_gui", param_gui_mode);\r
+ settings.setValue("pos", pos());\r
+ settings.setValue("size", size());\r
+ settings.setValue("mode_resize" ,param_flags);\r
+ settings.setValue("mode_gui", param_gui_mode);\r
\r
myView->writeSettings(settings);\r
\r
- icvSaveTrackbars(&settings);\r
+ icvSaveTrackbars(&settings);\r
\r
- if (global_control_panel)\r
- {\r
- icvSaveControlPanel();\r
- settings.setValue("posPanel", global_control_panel->pos());\r
- }\r
+ if (global_control_panel)\r
+ {\r
+ icvSaveControlPanel();\r
+ settings.setValue("posPanel", global_control_panel->pos());\r
+ }\r
}\r
\r
\r
//TODO: load CV_GUI flag (done) and act accordingly (create win property if needed and attach trackbars)\r
void CvWindow::readSettings()\r
{\r
- //organisation and application's name\r
- QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName());\r
+ //organisation and application's name\r
+ QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName());\r
\r
- QPoint pos = settings.value("pos", QPoint(200, 200)).toPoint();\r
- QSize size = settings.value("size", QSize(400, 400)).toSize();\r
+ QPoint _pos = settings.value("pos", QPoint(200, 200)).toPoint();\r
+ QSize _size = settings.value("size", QSize(400, 400)).toSize();\r
\r
- param_flags = settings.value("mode_resize", param_flags).toInt();\r
- param_gui_mode = settings.value("mode_gui", param_gui_mode).toInt();\r
+ param_flags = settings.value("mode_resize", param_flags).toInt();\r
+ param_gui_mode = settings.value("mode_gui", param_gui_mode).toInt();\r
\r
- param_flags = settings.value("mode_resize", param_flags).toInt();\r
+ param_flags = settings.value("mode_resize", param_flags).toInt();\r
\r
- myView->readSettings(settings);\r
+ myView->readSettings(settings);\r
\r
- //trackbar here\r
- icvLoadTrackbars(&settings);\r
+ //trackbar here\r
+ icvLoadTrackbars(&settings);\r
\r
- resize(size);\r
- move(pos);\r
+ resize(_size);\r
+ move(_pos);\r
\r
- if (global_control_panel)\r
- {\r
- icvLoadControlPanel();\r
- global_control_panel->move(settings.value("posPanel", global_control_panel->pos()).toPoint());\r
- }\r
+ if (global_control_panel)\r
+ {\r
+ icvLoadControlPanel();\r
+ global_control_panel->move(settings.value("posPanel", global_control_panel->pos()).toPoint());\r
+ }\r
}\r
\r
\r
}\r
\r
\r
-int CvWindow::getPropWindow() \r
-{ \r
- return param_flags; \r
+int CvWindow::getPropWindow()\r
+{\r
+ return param_flags;\r
}\r
\r
\r
void CvWindow::setPropWindow(int flags)\r
{\r
if (param_flags == flags) //nothing to do\r
- return;\r
+ return;\r
\r
switch(flags)\r
{\r
case CV_WINDOW_NORMAL:\r
- myGlobalLayout->setSizeConstraint(QLayout::SetMinAndMaxSize);\r
- param_flags = flags;\r
+ myGlobalLayout->setSizeConstraint(QLayout::SetMinAndMaxSize);\r
+ param_flags = flags;\r
\r
- break;\r
+ break;\r
\r
case CV_WINDOW_AUTOSIZE:\r
- myGlobalLayout->setSizeConstraint(QLayout::SetFixedSize);\r
- param_flags = flags;\r
+ myGlobalLayout->setSizeConstraint(QLayout::SetFixedSize);\r
+ param_flags = flags;\r
\r
- break;\r
+ break;\r
\r
default:\r
;\r
{\r
if (isFullScreen() && flags == CV_WINDOW_NORMAL)\r
{\r
- showTools();\r
- showNormal();\r
- return;\r
+ showTools();\r
+ showNormal();\r
+ return;\r
}\r
\r
if (!isFullScreen() && flags == CV_WINDOW_FULLSCREEN)\r
{\r
- hideTools();\r
- showFullScreen();\r
- return;\r
+ hideTools();\r
+ showFullScreen();\r
+ return;\r
}\r
}\r
\r
\r
void CvWindow::updateImage(void* arr)\r
{\r
- myView->updateImage(arr);\r
+ myView->updateImage(arr);\r
}\r
\r
\r
void CvWindow::displayInfo(QString text, int delayms)\r
{\r
- myView->startDisplayInfo(text, delayms);\r
+ myView->startDisplayInfo(text, delayms);\r
}\r
\r
\r
void CvWindow::displayStatusBar(QString text, int delayms)\r
{\r
if (myStatusBar)\r
- myStatusBar->showMessage(text, delayms);\r
+ myStatusBar->showMessage(text, delayms);\r
}\r
\r
\r
\r
CvButtonbar* CvWindow::createButtonBar(QString name_bar)\r
{\r
- QPointer<CvButtonbar> t = new CvButtonbar(global_control_panel, name_bar);\r
- t->setAlignment(Qt::AlignHCenter);\r
+ QPointer<CvButtonbar> t = new CvButtonbar(global_control_panel, name_bar);\r
+ t->setAlignment(Qt::AlignHCenter);\r
\r
- QPointer<QBoxLayout> myLayout = global_control_panel->myLayout;\r
+ QPointer<QBoxLayout> myLayout = global_control_panel->myLayout;\r
\r
- myLayout->insertLayout(myLayout->count(), t);\r
+ myLayout->insertLayout(myLayout->count(), t);\r
\r
- return t;\r
+ return t;\r
}\r
\r
\r
void CvWindow::addSlider(CvWindow* w, QString name, int* value, int count, CvTrackbarCallback on_change)\r
{\r
- QPointer<CvTrackbar> t = new CvTrackbar(w, name, value, count, on_change);\r
- t->setAlignment(Qt::AlignHCenter);\r
+ QPointer<CvTrackbar> t = new CvTrackbar(w, name, value, count, on_change);\r
+ t->setAlignment(Qt::AlignHCenter);\r
\r
- QPointer<QBoxLayout> myLayout;\r
+ QPointer<QBoxLayout> myLayout;\r
\r
- if (w)\r
- {\r
- myLayout = w->myBarLayout;\r
- }\r
- else\r
- {\r
- myLayout = global_control_panel->myLayout;\r
+ if (w)\r
+ {\r
+ myLayout = w->myBarLayout;\r
+ }\r
+ else\r
+ {\r
+ myLayout = global_control_panel->myLayout;\r
\r
- //if first one, enable control panel\r
- if (myLayout->count() == 0)\r
- guiMainThread->enablePropertiesButtonEachWindow();\r
- }\r
+ //if first one, enable control panel\r
+ if (myLayout->count() == 0)\r
+ guiMainThread->enablePropertiesButtonEachWindow();\r
+ }\r
\r
- myLayout->insertLayout(myLayout->count(), t);\r
+ myLayout->insertLayout(myLayout->count(), t);\r
}\r
\r
\r
void CvWindow::addSlider2(CvWindow* w, QString name, int* value, int count, CvTrackbarCallback2 on_change, void* userdata)\r
{\r
- QPointer<CvTrackbar> t = new CvTrackbar(w, name, value, count, on_change, userdata);\r
- t->setAlignment(Qt::AlignHCenter);\r
+ QPointer<CvTrackbar> t = new CvTrackbar(w, name, value, count, on_change, userdata);\r
+ t->setAlignment(Qt::AlignHCenter);\r
\r
- QPointer<QBoxLayout> myLayout;\r
+ QPointer<QBoxLayout> myLayout;\r
\r
- if (w)\r
- {\r
- myLayout = w->myBarLayout;\r
- }\r
- else\r
- {\r
- myLayout = global_control_panel->myLayout;\r
+ if (w)\r
+ {\r
+ myLayout = w->myBarLayout;\r
+ }\r
+ else\r
+ {\r
+ myLayout = global_control_panel->myLayout;\r
\r
- //if first one, enable control panel\r
- if (myLayout->count() == 0)\r
- guiMainThread->enablePropertiesButtonEachWindow();\r
- }\r
+ //if first one, enable control panel\r
+ if (myLayout->count() == 0)\r
+ guiMainThread->enablePropertiesButtonEachWindow();\r
+ }\r
\r
- myLayout->insertLayout(myLayout->count(), t);\r
+ myLayout->insertLayout(myLayout->count(), t);\r
}\r
\r
\r
void CvWindow::setOpenGlDrawCallback(CvOpenGlDrawCallback callback, void* userdata)\r
{\r
- myView->setOpenGlDrawCallback(callback, userdata);\r
+ myView->setOpenGlDrawCallback(callback, userdata);\r
}\r
\r
\r
void CvWindow::setOpenGlCleanCallback(CvOpenGlCleanCallback callback, void* userdata)\r
{\r
- myView->setOpenGlCleanCallback(callback, userdata);\r
+ myView->setOpenGlCleanCallback(callback, userdata);\r
}\r
\r
\r
}\r
\r
\r
-void CvWindow::setViewportSize(QSize size)\r
+void CvWindow::setViewportSize(QSize _size)\r
{\r
- myView->getWidget()->resize(size);\r
- myView->setSize(size);\r
+ myView->getWidget()->resize(_size);\r
+ myView->setSize(_size);\r
}\r
\r
\r
void CvWindow::createBarLayout()\r
{\r
- myBarLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
- myBarLayout->setObjectName(QString::fromUtf8("barLayout"));\r
- myBarLayout->setContentsMargins(0, 0, 0, 0);\r
- myBarLayout->setSpacing(0);\r
- myBarLayout->setMargin(0);\r
+ myBarLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
+ myBarLayout->setObjectName(QString::fromUtf8("barLayout"));\r
+ myBarLayout->setContentsMargins(0, 0, 0, 0);\r
+ myBarLayout->setSpacing(0);\r
+ myBarLayout->setMargin(0);\r
}\r
\r
\r
void CvWindow::createGlobalLayout()\r
{\r
- myGlobalLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
- myGlobalLayout->setObjectName(QString::fromUtf8("boxLayout"));\r
- myGlobalLayout->setContentsMargins(0, 0, 0, 0);\r
- myGlobalLayout->setSpacing(0);\r
- myGlobalLayout->setMargin(0);\r
- setMinimumSize(1, 1);\r
+ myGlobalLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
+ myGlobalLayout->setObjectName(QString::fromUtf8("boxLayout"));\r
+ myGlobalLayout->setContentsMargins(0, 0, 0, 0);\r
+ myGlobalLayout->setSpacing(0);\r
+ myGlobalLayout->setMargin(0);\r
+ setMinimumSize(1, 1);\r
\r
- if (param_flags == CV_WINDOW_AUTOSIZE)\r
- myGlobalLayout->setSizeConstraint(QLayout::SetFixedSize);\r
- else if (param_flags == CV_WINDOW_NORMAL)\r
- myGlobalLayout->setSizeConstraint(QLayout::SetMinAndMaxSize);\r
+ if (param_flags == CV_WINDOW_AUTOSIZE)\r
+ myGlobalLayout->setSizeConstraint(QLayout::SetFixedSize);\r
+ else if (param_flags == CV_WINDOW_NORMAL)\r
+ myGlobalLayout->setSizeConstraint(QLayout::SetMinAndMaxSize);\r
}\r
\r
\r
{\r
#ifdef HAVE_QT_OPENGL\r
if (isOpenGl())\r
- myView = new OpenGlViewPort(this);\r
+ myView = new OpenGlViewPort(this);\r
else\r
#endif\r
- myView = new DefaultViewPort(this, param_ratio_mode);\r
+ myView = new DefaultViewPort(this, param_ratio_mode);\r
}\r
\r
\r
void CvWindow::createActions()\r
{\r
- vect_QActions.resize(10);\r
+ vect_QActions.resize(10);\r
\r
QWidget* view = myView->getWidget();\r
\r
- //if the shortcuts are changed in window_QT.h, we need to update the tooltip manually\r
- vect_QActions[0] = new QAction(QIcon(":/left-icon"), "Panning left (CTRL+arrowLEFT)", this);\r
- vect_QActions[0]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[0], SIGNAL(triggered()), view, SLOT(siftWindowOnLeft()));\r
+ //if the shortcuts are changed in window_QT.h, we need to update the tooltip manually\r
+ vect_QActions[0] = new QAction(QIcon(":/left-icon"), "Panning left (CTRL+arrowLEFT)", this);\r
+ vect_QActions[0]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[0], SIGNAL(triggered()), view, SLOT(siftWindowOnLeft()));\r
\r
- vect_QActions[1] = new QAction(QIcon(":/right-icon"), "Panning right (CTRL+arrowRIGHT)", this);\r
- vect_QActions[1]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[1], SIGNAL(triggered()), view, SLOT(siftWindowOnRight()));\r
+ vect_QActions[1] = new QAction(QIcon(":/right-icon"), "Panning right (CTRL+arrowRIGHT)", this);\r
+ vect_QActions[1]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[1], SIGNAL(triggered()), view, SLOT(siftWindowOnRight()));\r
\r
- vect_QActions[2] = new QAction(QIcon(":/up-icon"), "Panning up (CTRL+arrowUP)", this);\r
- vect_QActions[2]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[2], SIGNAL(triggered()), view, SLOT(siftWindowOnUp()));\r
+ vect_QActions[2] = new QAction(QIcon(":/up-icon"), "Panning up (CTRL+arrowUP)", this);\r
+ vect_QActions[2]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[2], SIGNAL(triggered()), view, SLOT(siftWindowOnUp()));\r
\r
- vect_QActions[3] = new QAction(QIcon(":/down-icon"), "Panning down (CTRL+arrowDOWN)", this);\r
- vect_QActions[3]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[3], SIGNAL(triggered()), view, SLOT(siftWindowOnDown()) );\r
+ vect_QActions[3] = new QAction(QIcon(":/down-icon"), "Panning down (CTRL+arrowDOWN)", this);\r
+ vect_QActions[3]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[3], SIGNAL(triggered()), view, SLOT(siftWindowOnDown()) );\r
\r
- vect_QActions[4] = new QAction(QIcon(":/zoom_x1-icon"), "Zoom x1 (CTRL+P)", this);\r
- vect_QActions[4]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[4], SIGNAL(triggered()), view, SLOT(resetZoom()));\r
+ vect_QActions[4] = new QAction(QIcon(":/zoom_x1-icon"), "Zoom x1 (CTRL+P)", this);\r
+ vect_QActions[4]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[4], SIGNAL(triggered()), view, SLOT(resetZoom()));\r
\r
- vect_QActions[5] = new QAction(QIcon(":/imgRegion-icon"), tr("Zoom x%1 (see label) (CTRL+X)").arg(threshold_zoom_img_region), this);\r
- vect_QActions[5]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[5], SIGNAL(triggered()), view, SLOT(imgRegion()));\r
+ vect_QActions[5] = new QAction(QIcon(":/imgRegion-icon"), tr("Zoom x%1 (see label) (CTRL+X)").arg(threshold_zoom_img_region), this);\r
+ vect_QActions[5]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[5], SIGNAL(triggered()), view, SLOT(imgRegion()));\r
\r
- vect_QActions[6] = new QAction(QIcon(":/zoom_in-icon"), "Zoom in (CTRL++)", this);\r
- vect_QActions[6]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[6], SIGNAL(triggered()), view, SLOT(ZoomIn()));\r
+ vect_QActions[6] = new QAction(QIcon(":/zoom_in-icon"), "Zoom in (CTRL++)", this);\r
+ vect_QActions[6]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[6], SIGNAL(triggered()), view, SLOT(ZoomIn()));\r
\r
- vect_QActions[7] = new QAction(QIcon(":/zoom_out-icon"), "Zoom out (CTRL+-)", this);\r
- vect_QActions[7]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[7], SIGNAL(triggered()), view, SLOT(ZoomOut()));\r
+ vect_QActions[7] = new QAction(QIcon(":/zoom_out-icon"), "Zoom out (CTRL+-)", this);\r
+ vect_QActions[7]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[7], SIGNAL(triggered()), view, SLOT(ZoomOut()));\r
\r
- vect_QActions[8] = new QAction(QIcon(":/save-icon"), "Save current image (CTRL+S)", this);\r
- vect_QActions[8]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[8], SIGNAL(triggered()), view, SLOT(saveView()));\r
+ vect_QActions[8] = new QAction(QIcon(":/save-icon"), "Save current image (CTRL+S)", this);\r
+ vect_QActions[8]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[8], SIGNAL(triggered()), view, SLOT(saveView()));\r
\r
- vect_QActions[9] = new QAction(QIcon(":/properties-icon"), "Display properties window (CTRL+P)", this);\r
- vect_QActions[9]->setIconVisibleInMenu(true);\r
- QObject::connect(vect_QActions[9], SIGNAL(triggered()), this, SLOT(displayPropertiesWin()));\r
+ vect_QActions[9] = new QAction(QIcon(":/properties-icon"), "Display properties window (CTRL+P)", this);\r
+ vect_QActions[9]->setIconVisibleInMenu(true);\r
+ QObject::connect(vect_QActions[9], SIGNAL(triggered()), this, SLOT(displayPropertiesWin()));\r
\r
- if (global_control_panel->myLayout->count() == 0)\r
- vect_QActions[9]->setDisabled(true);\r
+ if (global_control_panel->myLayout->count() == 0)\r
+ vect_QActions[9]->setDisabled(true);\r
}\r
\r
\r
void CvWindow::createShortcuts()\r
{\r
- vect_QShortcuts.resize(10);\r
+ vect_QShortcuts.resize(10);\r
\r
QWidget* view = myView->getWidget();\r
\r
- vect_QShortcuts[0] = new QShortcut(shortcut_panning_left, this);\r
- QObject::connect(vect_QShortcuts[0], SIGNAL(activated()), view, SLOT(siftWindowOnLeft()));\r
+ vect_QShortcuts[0] = new QShortcut(shortcut_panning_left, this);\r
+ QObject::connect(vect_QShortcuts[0], SIGNAL(activated()), view, SLOT(siftWindowOnLeft()));\r
\r
- vect_QShortcuts[1] = new QShortcut(shortcut_panning_right, this);\r
- QObject::connect(vect_QShortcuts[1], SIGNAL(activated()), view, SLOT(siftWindowOnRight()));\r
+ vect_QShortcuts[1] = new QShortcut(shortcut_panning_right, this);\r
+ QObject::connect(vect_QShortcuts[1], SIGNAL(activated()), view, SLOT(siftWindowOnRight()));\r
\r
- vect_QShortcuts[2] = new QShortcut(shortcut_panning_up, this);\r
- QObject::connect(vect_QShortcuts[2], SIGNAL(activated()), view, SLOT(siftWindowOnUp()));\r
+ vect_QShortcuts[2] = new QShortcut(shortcut_panning_up, this);\r
+ QObject::connect(vect_QShortcuts[2], SIGNAL(activated()), view, SLOT(siftWindowOnUp()));\r
\r
- vect_QShortcuts[3] = new QShortcut(shortcut_panning_down, this);\r
- QObject::connect(vect_QShortcuts[3], SIGNAL(activated()), view, SLOT(siftWindowOnDown()));\r
+ vect_QShortcuts[3] = new QShortcut(shortcut_panning_down, this);\r
+ QObject::connect(vect_QShortcuts[3], SIGNAL(activated()), view, SLOT(siftWindowOnDown()));\r
\r
- vect_QShortcuts[4] = new QShortcut(shortcut_zoom_normal, this);\r
- QObject::connect(vect_QShortcuts[4], SIGNAL(activated()), view, SLOT(resetZoom()));\r
+ vect_QShortcuts[4] = new QShortcut(shortcut_zoom_normal, this);\r
+ QObject::connect(vect_QShortcuts[4], SIGNAL(activated()), view, SLOT(resetZoom()));\r
\r
- vect_QShortcuts[5] = new QShortcut(shortcut_zoom_imgRegion, this);\r
- QObject::connect(vect_QShortcuts[5], SIGNAL(activated()), view, SLOT(imgRegion()));\r
+ vect_QShortcuts[5] = new QShortcut(shortcut_zoom_imgRegion, this);\r
+ QObject::connect(vect_QShortcuts[5], SIGNAL(activated()), view, SLOT(imgRegion()));\r
\r
- vect_QShortcuts[6] = new QShortcut(shortcut_zoom_in, this);\r
- QObject::connect(vect_QShortcuts[6], SIGNAL(activated()), view, SLOT(ZoomIn()));\r
+ vect_QShortcuts[6] = new QShortcut(shortcut_zoom_in, this);\r
+ QObject::connect(vect_QShortcuts[6], SIGNAL(activated()), view, SLOT(ZoomIn()));\r
\r
- vect_QShortcuts[7] = new QShortcut(shortcut_zoom_out, this);\r
- QObject::connect(vect_QShortcuts[7], SIGNAL(activated()), view, SLOT(ZoomOut()));\r
+ vect_QShortcuts[7] = new QShortcut(shortcut_zoom_out, this);\r
+ QObject::connect(vect_QShortcuts[7], SIGNAL(activated()), view, SLOT(ZoomOut()));\r
\r
- vect_QShortcuts[8] = new QShortcut(shortcut_save_img, this);\r
- QObject::connect(vect_QShortcuts[8], SIGNAL(activated()), view, SLOT(saveView()));\r
+ vect_QShortcuts[8] = new QShortcut(shortcut_save_img, this);\r
+ QObject::connect(vect_QShortcuts[8], SIGNAL(activated()), view, SLOT(saveView()));\r
\r
- vect_QShortcuts[9] = new QShortcut(shortcut_properties_win, this);\r
- QObject::connect(vect_QShortcuts[9], SIGNAL(activated()), this, SLOT(displayPropertiesWin()));\r
+ vect_QShortcuts[9] = new QShortcut(shortcut_properties_win, this);\r
+ QObject::connect(vect_QShortcuts[9], SIGNAL(activated()), this, SLOT(displayPropertiesWin()));\r
}\r
\r
\r
void CvWindow::createToolBar()\r
{\r
- myToolBar = new QToolBar(this);\r
- myToolBar->setFloatable(false); //is not a window\r
- myToolBar->setFixedHeight(28);\r
- myToolBar->setMinimumWidth(1);\r
+ myToolBar = new QToolBar(this);\r
+ myToolBar->setFloatable(false); //is not a window\r
+ myToolBar->setFixedHeight(28);\r
+ myToolBar->setMinimumWidth(1);\r
\r
- foreach (QAction *a, vect_QActions)\r
- myToolBar->addAction(a);\r
+ foreach (QAction *a, vect_QActions)\r
+ myToolBar->addAction(a);\r
}\r
\r
\r
void CvWindow::createStatusBar()\r
{\r
- myStatusBar = new QStatusBar(this);\r
- myStatusBar->setSizeGripEnabled(false);\r
- myStatusBar->setFixedHeight(20);\r
- myStatusBar->setMinimumWidth(1);\r
- myStatusBar_msg = new QLabel;\r
+ myStatusBar = new QStatusBar(this);\r
+ myStatusBar->setSizeGripEnabled(false);\r
+ myStatusBar->setFixedHeight(20);\r
+ myStatusBar->setMinimumWidth(1);\r
+ myStatusBar_msg = new QLabel;\r
\r
- //I comment this because if we change the style, myview (the picture)\r
- //will not be the correct size anymore (will lost 2 pixel because of the borders)\r
+ //I comment this because if we change the style, myview (the picture)\r
+ //will not be the correct size anymore (will lost 2 pixel because of the borders)\r
\r
- //myStatusBar_msg->setFrameStyle(QFrame::Raised);\r
+ //myStatusBar_msg->setFrameStyle(QFrame::Raised);\r
\r
- myStatusBar_msg->setAlignment(Qt::AlignHCenter);\r
- myStatusBar->addWidget(myStatusBar_msg);\r
+ myStatusBar_msg->setAlignment(Qt::AlignHCenter);\r
+ myStatusBar->addWidget(myStatusBar_msg);\r
}\r
\r
\r
void CvWindow::hideTools()\r
{\r
- if (myToolBar)\r
- myToolBar->hide();\r
+ if (myToolBar)\r
+ myToolBar->hide();\r
\r
- if (myStatusBar)\r
- myStatusBar->hide();\r
+ if (myStatusBar)\r
+ myStatusBar->hide();\r
\r
- if (global_control_panel)\r
- global_control_panel->hide();\r
+ if (global_control_panel)\r
+ global_control_panel->hide();\r
}\r
\r
\r
void CvWindow::showTools()\r
{\r
- if (myToolBar)\r
- myToolBar->show();\r
+ if (myToolBar)\r
+ myToolBar->show();\r
\r
- if (myStatusBar)\r
- myStatusBar->show();\r
+ if (myStatusBar)\r
+ myStatusBar->show();\r
}\r
\r
\r
CvWinProperties* CvWindow::createParameterWindow()\r
{\r
- QString name_paraWindow = QFileInfo(QApplication::applicationFilePath()).fileName() + " settings";\r
+ QString name_paraWindow = QFileInfo(QApplication::applicationFilePath()).fileName() + " settings";\r
\r
- CvWinProperties* result = new CvWinProperties(name_paraWindow, guiMainThread);\r
+ CvWinProperties* result = new CvWinProperties(name_paraWindow, guiMainThread);\r
\r
- return result;\r
+ return result;\r
}\r
\r
\r
void CvWindow::displayPropertiesWin()\r
{\r
- if (global_control_panel->isHidden())\r
- global_control_panel->show();\r
- else\r
- global_control_panel->hide();\r
+ if (global_control_panel->isHidden())\r
+ global_control_panel->show();\r
+ else\r
+ global_control_panel->hide();\r
}\r
\r
\r
//Need more test here !\r
-void CvWindow::keyPressEvent(QKeyEvent *event)\r
+void CvWindow::keyPressEvent(QKeyEvent *evnt)\r
{\r
- //see http://doc.trolltech.com/4.6/qt.html#Key-enum\r
- int key = event->key();\r
+ //see http://doc.trolltech.com/4.6/qt.html#Key-enum\r
+ int key = evnt->key();\r
\r
Qt::Key qtkey = static_cast<Qt::Key>(key);\r
char asciiCode = QTest::keyToAscii(qtkey);\r
if (asciiCode != 0)\r
key = static_cast<int>(asciiCode);\r
else\r
- key = event->nativeVirtualKey(); //same codes as returned by GTK-based backend\r
+ key = evnt->nativeVirtualKey(); //same codes as returned by GTK-based backend\r
\r
- //control plus (Z, +, -, up, down, left, right) are used for zoom/panning functions\r
- if (event->modifiers() != Qt::ControlModifier)\r
+ //control plus (Z, +, -, up, down, left, right) are used for zoom/panning functions\r
+ if (evnt->modifiers() != Qt::ControlModifier)\r
{\r
- mutexKey.lock();\r
- last_key = key;\r
- mutexKey.unlock();\r
- key_pressed.wakeAll();\r
- //event->accept();\r
- }\r
+ mutexKey.lock();\r
+ last_key = key;\r
+ mutexKey.unlock();\r
+ key_pressed.wakeAll();\r
+ //evnt->accept();\r
+ }\r
\r
- QWidget::keyPressEvent(event);\r
+ QWidget::keyPressEvent(evnt);\r
}\r
\r
\r
void CvWindow::icvLoadControlPanel()\r
{\r
- QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName() + " control panel");\r
- \r
- int size = settings.beginReadArray("bars");\r
+ QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName() + " control panel");\r
+\r
+ int bsize = settings.beginReadArray("bars");\r
\r
- if (size == global_control_panel->myLayout->layout()->count())\r
+ if (bsize == global_control_panel->myLayout->layout()->count())\r
{\r
- for (int i = 0; i < size; ++i) \r
+ for (int i = 0; i < bsize; ++i)\r
{\r
- CvBar* t = (CvBar*) global_control_panel->myLayout->layout()->itemAt(i);\r
- settings.setArrayIndex(i);\r
- if (t->type == type_CvTrackbar)\r
- {\r
- if (t->name_bar == settings.value("namebar").toString())\r
- {\r
- ((CvTrackbar*)t)->slider->setValue(settings.value("valuebar").toInt());\r
- }\r
- }\r
- if (t->type == type_CvButtonbar)\r
- {\r
- int subsize = settings.beginReadArray(QString("buttonbar")+i);\r
+ CvBar* t = (CvBar*) global_control_panel->myLayout->layout()->itemAt(i);\r
+ settings.setArrayIndex(i);\r
+ if (t->type == type_CvTrackbar)\r
+ {\r
+ if (t->name_bar == settings.value("namebar").toString())\r
+ {\r
+ ((CvTrackbar*)t)->slider->setValue(settings.value("valuebar").toInt());\r
+ }\r
+ }\r
+ if (t->type == type_CvButtonbar)\r
+ {\r
+ int subsize = settings.beginReadArray(QString("buttonbar")+i);\r
+\r
+ if ( subsize == ((CvButtonbar*)t)->layout()->count() )\r
+ icvLoadButtonbar((CvButtonbar*)t,&settings);\r
\r
- if ( subsize == ((CvButtonbar*)t)->layout()->count() )\r
- icvLoadButtonbar((CvButtonbar*)t,&settings);\r
- \r
- settings.endArray();\r
- }\r
- }\r
+ settings.endArray();\r
+ }\r
+ }\r
}\r
\r
- settings.endArray();\r
+ settings.endArray();\r
}\r
\r
\r
void CvWindow::icvSaveControlPanel()\r
{\r
- QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName()+" control panel");\r
+ QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName()+" control panel");\r
\r
- settings.beginWriteArray("bars");\r
+ settings.beginWriteArray("bars");\r
\r
- for (int i = 0; i < global_control_panel->myLayout->layout()->count(); ++i) \r
+ for (int i = 0; i < global_control_panel->myLayout->layout()->count(); ++i)\r
{\r
- CvBar* t = (CvBar*) global_control_panel->myLayout->layout()->itemAt(i);\r
- settings.setArrayIndex(i);\r
- if (t->type == type_CvTrackbar)\r
- {\r
- settings.setValue("namebar", QString(t->name_bar));\r
- settings.setValue("valuebar",((CvTrackbar*)t)->slider->value());\r
- }\r
- if (t->type == type_CvButtonbar)\r
- {\r
- settings.beginWriteArray(QString("buttonbar")+i);\r
- icvSaveButtonbar((CvButtonbar*)t,&settings);\r
- settings.endArray();\r
- }\r
- }\r
+ CvBar* t = (CvBar*) global_control_panel->myLayout->layout()->itemAt(i);\r
+ settings.setArrayIndex(i);\r
+ if (t->type == type_CvTrackbar)\r
+ {\r
+ settings.setValue("namebar", QString(t->name_bar));\r
+ settings.setValue("valuebar",((CvTrackbar*)t)->slider->value());\r
+ }\r
+ if (t->type == type_CvButtonbar)\r
+ {\r
+ settings.beginWriteArray(QString("buttonbar")+i);\r
+ icvSaveButtonbar((CvButtonbar*)t,&settings);\r
+ settings.endArray();\r
+ }\r
+ }\r
\r
- settings.endArray();\r
+ settings.endArray();\r
}\r
\r
\r
void CvWindow::icvSaveButtonbar(CvButtonbar* b, QSettings* settings)\r
{\r
- for (int i = 0, count = b->layout()->count(); i < count; ++i) \r
+ for (int i = 0, count = b->layout()->count(); i < count; ++i)\r
{\r
- settings->setArrayIndex(i);\r
+ settings->setArrayIndex(i);\r
\r
- QWidget* temp = (QWidget*) b->layout()->itemAt(i)->widget();\r
+ QWidget* temp = (QWidget*) b->layout()->itemAt(i)->widget();\r
QString myclass(QLatin1String(temp->metaObject()->className()));\r
\r
- if (myclass == "CvPushButton")\r
- {\r
- CvPushButton* button = (CvPushButton*) temp;\r
- settings->setValue("namebutton", button->text());\r
- settings->setValue("valuebutton", int(button->isChecked()));\r
- }\r
- else if (myclass == "CvCheckBox")\r
- {\r
- CvCheckBox* button = (CvCheckBox*) temp;\r
- settings->setValue("namebutton", button->text());\r
- settings->setValue("valuebutton", int(button->isChecked()));\r
- }\r
- else if (myclass == "CvRadioButton")\r
- {\r
- CvRadioButton* button = (CvRadioButton*) temp;\r
- settings->setValue("namebutton", button->text());\r
- settings->setValue("valuebutton", int(button->isChecked()));\r
- }\r
- }\r
+ if (myclass == "CvPushButton")\r
+ {\r
+ CvPushButton* button = (CvPushButton*) temp;\r
+ settings->setValue("namebutton", button->text());\r
+ settings->setValue("valuebutton", int(button->isChecked()));\r
+ }\r
+ else if (myclass == "CvCheckBox")\r
+ {\r
+ CvCheckBox* button = (CvCheckBox*) temp;\r
+ settings->setValue("namebutton", button->text());\r
+ settings->setValue("valuebutton", int(button->isChecked()));\r
+ }\r
+ else if (myclass == "CvRadioButton")\r
+ {\r
+ CvRadioButton* button = (CvRadioButton*) temp;\r
+ settings->setValue("namebutton", button->text());\r
+ settings->setValue("valuebutton", int(button->isChecked()));\r
+ }\r
+ }\r
}\r
\r
\r
void CvWindow::icvLoadButtonbar(CvButtonbar* b, QSettings* settings)\r
{\r
- for (int i = 0, count = b->layout()->count(); i < count; ++i)\r
- {\r
- settings->setArrayIndex(i);\r
+ for (int i = 0, count = b->layout()->count(); i < count; ++i)\r
+ {\r
+ settings->setArrayIndex(i);\r
\r
- QWidget* temp = (QWidget*) b->layout()->itemAt(i)->widget();\r
- QString myclass(QLatin1String(temp->metaObject()->className()));\r
+ QWidget* temp = (QWidget*) b->layout()->itemAt(i)->widget();\r
+ QString myclass(QLatin1String(temp->metaObject()->className()));\r
\r
- if (myclass == "CvPushButton")\r
- {\r
- CvPushButton* button = (CvPushButton*) temp;\r
+ if (myclass == "CvPushButton")\r
+ {\r
+ CvPushButton* button = (CvPushButton*) temp;\r
\r
- if (button->text() == settings->value("namebutton").toString())\r
- button->setChecked(settings->value("valuebutton").toInt());\r
- }\r
- else if (myclass == "CvCheckBox")\r
- {\r
- CvCheckBox* button = (CvCheckBox*) temp;\r
+ if (button->text() == settings->value("namebutton").toString())\r
+ button->setChecked(settings->value("valuebutton").toInt());\r
+ }\r
+ else if (myclass == "CvCheckBox")\r
+ {\r
+ CvCheckBox* button = (CvCheckBox*) temp;\r
\r
- if (button->text() == settings->value("namebutton").toString())\r
- button->setChecked(settings->value("valuebutton").toInt());\r
- }\r
- else if (myclass == "CvRadioButton")\r
- {\r
- CvRadioButton* button = (CvRadioButton*) temp;\r
+ if (button->text() == settings->value("namebutton").toString())\r
+ button->setChecked(settings->value("valuebutton").toInt());\r
+ }\r
+ else if (myclass == "CvRadioButton")\r
+ {\r
+ CvRadioButton* button = (CvRadioButton*) temp;\r
\r
- if (button->text() == settings->value("namebutton").toString())\r
- button->setChecked(settings->value("valuebutton").toInt());\r
- }\r
+ if (button->text() == settings->value("namebutton").toString())\r
+ button->setChecked(settings->value("valuebutton").toInt());\r
+ }\r
\r
- }\r
+ }\r
}\r
\r
\r
void CvWindow::icvLoadTrackbars(QSettings* settings)\r
{\r
- int size = settings->beginReadArray("trackbars");\r
+ int bsize = settings->beginReadArray("trackbars");\r
\r
- //trackbar are saved in the same order, so no need to use icvFindTrackbarByName\r
+ //trackbar are saved in the same order, so no need to use icvFindTrackbarByName\r
\r
- if (myBarLayout->layout()->count() == size) //if not the same number, the window saved and loaded is not the same (nb trackbar not equal)\r
+ if (myBarLayout->layout()->count() == bsize) //if not the same number, the window saved and loaded is not the same (nb trackbar not equal)\r
{\r
- for (int i = 0; i < size; ++i)\r
- {\r
- settings->setArrayIndex(i);\r
+ for (int i = 0; i < bsize; ++i)\r
+ {\r
+ settings->setArrayIndex(i);\r
\r
- CvTrackbar* t = (CvTrackbar*) myBarLayout->layout()->itemAt(i);\r
+ CvTrackbar* t = (CvTrackbar*) myBarLayout->layout()->itemAt(i);\r
\r
- if (t->name_bar == settings->value("name").toString())\r
- t->slider->setValue(settings->value("value").toInt());\r
+ if (t->name_bar == settings->value("name").toString())\r
+ t->slider->setValue(settings->value("value").toInt());\r
\r
- }\r
+ }\r
}\r
\r
- settings->endArray();\r
+ settings->endArray();\r
}\r
\r
\r
void CvWindow::icvSaveTrackbars(QSettings* settings)\r
{\r
- settings->beginWriteArray("trackbars");\r
+ settings->beginWriteArray("trackbars");\r
\r
- for (int i = 0; i < myBarLayout->layout()->count(); ++i) \r
+ for (int i = 0; i < myBarLayout->layout()->count(); ++i)\r
{\r
- settings->setArrayIndex(i);\r
+ settings->setArrayIndex(i);\r
\r
- CvTrackbar* t = (CvTrackbar*) myBarLayout->layout()->itemAt(i);\r
+ CvTrackbar* t = (CvTrackbar*) myBarLayout->layout()->itemAt(i);\r
\r
- settings->setValue("name", t->name_bar);\r
- settings->setValue("value", t->slider->value());\r
- }\r
+ settings->setValue("name", t->name_bar);\r
+ settings->setValue("value", t->slider->value());\r
+ }\r
\r
- settings->endArray();\r
+ settings->endArray();\r
}\r
\r
\r
\r
DefaultViewPort::DefaultViewPort(CvWindow* arg, int arg2) : QGraphicsView(arg), image2Draw_mat(0)\r
{\r
- centralWidget = arg;\r
+ centralWidget = arg;\r
param_keepRatio = arg2;\r
\r
- setContentsMargins(0, 0, 0, 0);\r
- setMinimumSize(1, 1);\r
+ setContentsMargins(0, 0, 0, 0);\r
+ setMinimumSize(1, 1);\r
setAlignment(Qt::AlignHCenter);\r
\r
- setObjectName(QString::fromUtf8("graphicsView"));\r
+ setObjectName(QString::fromUtf8("graphicsView"));\r
\r
- timerDisplay = new QTimer(this);\r
- timerDisplay->setSingleShot(true);\r
- connect(timerDisplay, SIGNAL(timeout()), this, SLOT(stopDisplayInfo()));\r
+ timerDisplay = new QTimer(this);\r
+ timerDisplay->setSingleShot(true);\r
+ connect(timerDisplay, SIGNAL(timeout()), this, SLOT(stopDisplayInfo()));\r
\r
- drawInfo = false;\r
- positionGrabbing = QPointF(0, 0);\r
- positionCorners = QRect(0, 0, size().width(), size().height());\r
+ drawInfo = false;\r
+ positionGrabbing = QPointF(0, 0);\r
+ positionCorners = QRect(0, 0, size().width(), size().height());\r
\r
- on_mouse = 0;\r
+ on_mouse = 0;\r
on_mouse_param = 0;\r
- mouseCoordinate = QPoint(-1, -1);\r
+ mouseCoordinate = QPoint(-1, -1);\r
\r
- //no border\r
- setStyleSheet( "QGraphicsView { border-style: none; }" ); \r
+ //no border\r
+ setStyleSheet( "QGraphicsView { border-style: none; }" );\r
\r
image2Draw_mat = cvCreateMat(viewport()->height(), viewport()->width(), CV_8UC3);\r
cvZero(image2Draw_mat);\r
\r
nbChannelOriginImage = 0;\r
\r
- setInteractive(false);\r
- setMouseTracking(true); //receive mouse event everytime\r
+ setInteractive(false);\r
+ setMouseTracking(true); //receive mouse event everytime\r
}\r
\r
\r
DefaultViewPort::~DefaultViewPort()\r
{\r
- if (image2Draw_mat) \r
- cvReleaseMat(&image2Draw_mat);\r
+ if (image2Draw_mat)\r
+ cvReleaseMat(&image2Draw_mat);\r
}\r
\r
\r
\r
void DefaultViewPort::setMouseCallBack(CvMouseCallback m, void* param)\r
{\r
- on_mouse = m;\r
+ on_mouse = m;\r
\r
- on_mouse_param = param;\r
+ on_mouse_param = param;\r
}\r
\r
void DefaultViewPort::writeSettings(QSettings& settings)\r
void DefaultViewPort::setRatio(int flags)\r
{\r
if (getRatio() == flags) //nothing to do\r
- return;\r
+ return;\r
\r
- //if valid flags\r
- if (flags == CV_WINDOW_FREERATIO || flags == CV_WINDOW_KEEPRATIO)\r
+ //if valid flags\r
+ if (flags == CV_WINDOW_FREERATIO || flags == CV_WINDOW_KEEPRATIO)\r
{\r
centralWidget->param_ratio_mode = flags;\r
- param_keepRatio = flags;\r
- updateGeometry();\r
- viewport()->update();\r
+ param_keepRatio = flags;\r
+ updateGeometry();\r
+ viewport()->update();\r
}\r
}\r
\r
\r
void DefaultViewPort::updateImage(const CvArr* arr)\r
{\r
- CV_Assert(arr);\r
+ CV_Assert(arr);\r
\r
- CvMat* mat, stub;\r
- int origin = 0;\r
+ CvMat* mat, stub;\r
+ int origin = 0;\r
\r
- if (CV_IS_IMAGE_HDR(arr))\r
- origin = ((IplImage*)arr)->origin;\r
+ if (CV_IS_IMAGE_HDR(arr))\r
+ origin = ((IplImage*)arr)->origin;\r
\r
- mat = cvGetMat(arr, &stub);\r
+ mat = cvGetMat(arr, &stub);\r
\r
- if (!image2Draw_mat || !CV_ARE_SIZES_EQ(image2Draw_mat, mat))\r
- {\r
+ if (!image2Draw_mat || !CV_ARE_SIZES_EQ(image2Draw_mat, mat))\r
+ {\r
if (image2Draw_mat)\r
- cvReleaseMat(&image2Draw_mat);\r
+ cvReleaseMat(&image2Draw_mat);\r
\r
- //the image in ipl (to do a deep copy with cvCvtColor)\r
- image2Draw_mat = cvCreateMat(mat->rows, mat->cols, CV_8UC3);\r
- image2Draw_qt = QImage(image2Draw_mat->data.ptr, image2Draw_mat->cols, image2Draw_mat->rows, image2Draw_mat->step, QImage::Format_RGB888);\r
+ //the image in ipl (to do a deep copy with cvCvtColor)\r
+ image2Draw_mat = cvCreateMat(mat->rows, mat->cols, CV_8UC3);\r
+ image2Draw_qt = QImage(image2Draw_mat->data.ptr, image2Draw_mat->cols, image2Draw_mat->rows, image2Draw_mat->step, QImage::Format_RGB888);\r
\r
- //use to compute mouse coordinate, I need to update the ratio here and in resizeEvent\r
- ratioX = width() / float(image2Draw_mat->cols);\r
- ratioY = height() / float(image2Draw_mat->rows);\r
+ //use to compute mouse coordinate, I need to update the ratio here and in resizeEvent\r
+ ratioX = width() / float(image2Draw_mat->cols);\r
+ ratioY = height() / float(image2Draw_mat->rows);\r
\r
- updateGeometry();\r
- }\r
+ updateGeometry();\r
+ }\r
\r
- nbChannelOriginImage = cvGetElemType(mat);\r
+ nbChannelOriginImage = cvGetElemType(mat);\r
\r
- cvConvertImage(mat, image2Draw_mat, (origin != 0 ? CV_CVTIMG_FLIP : 0) + CV_CVTIMG_SWAP_RB);\r
+ cvConvertImage(mat, image2Draw_mat, (origin != 0 ? CV_CVTIMG_FLIP : 0) + CV_CVTIMG_SWAP_RB);\r
\r
- viewport()->update();\r
+ viewport()->update();\r
}\r
\r
\r
void DefaultViewPort::startDisplayInfo(QString text, int delayms)\r
{\r
- if (timerDisplay->isActive())\r
- stopDisplayInfo();\r
+ if (timerDisplay->isActive())\r
+ stopDisplayInfo();\r
\r
- infoText = text;\r
- if (delayms > 0) timerDisplay->start(delayms);\r
- drawInfo = true;\r
+ infoText = text;\r
+ if (delayms > 0) timerDisplay->start(delayms);\r
+ drawInfo = true;\r
}\r
\r
\r
//Note: move 2 percent of the window\r
void DefaultViewPort::siftWindowOnLeft()\r
{\r
- float delta = 2 * width() / (100.0 * param_matrixWorld.m11());\r
- moveView(QPointF(delta, 0));\r
+ float delta = 2 * width() / (100.0 * param_matrixWorld.m11());\r
+ moveView(QPointF(delta, 0));\r
}\r
\r
\r
//Note: move 2 percent of the window\r
void DefaultViewPort::siftWindowOnRight()\r
{\r
- float delta = -2 * width() / (100.0 * param_matrixWorld.m11());\r
- moveView(QPointF(delta, 0));\r
+ float delta = -2 * width() / (100.0 * param_matrixWorld.m11());\r
+ moveView(QPointF(delta, 0));\r
}\r
\r
\r
//Note: move 2 percent of the window\r
void DefaultViewPort::siftWindowOnUp()\r
{\r
- float delta = 2 * height() / (100.0 * param_matrixWorld.m11());\r
- moveView(QPointF(0, delta));\r
+ float delta = 2 * height() / (100.0 * param_matrixWorld.m11());\r
+ moveView(QPointF(0, delta));\r
}\r
\r
\r
//Note: move 2 percent of the window\r
void DefaultViewPort::siftWindowOnDown()\r
{\r
- float delta = -2 * height() / (100.0 * param_matrixWorld.m11());\r
- moveView(QPointF(0, delta));\r
+ float delta = -2 * height() / (100.0 * param_matrixWorld.m11());\r
+ moveView(QPointF(0, delta));\r
}\r
\r
\r
void DefaultViewPort::imgRegion()\r
{\r
- scaleView((threshold_zoom_img_region / param_matrixWorld.m11() - 1) * 5, QPointF(size().width() / 2, size().height() / 2));\r
+ scaleView((threshold_zoom_img_region / param_matrixWorld.m11() - 1) * 5, QPointF(size().width() / 2, size().height() / 2));\r
}\r
\r
\r
void DefaultViewPort::resetZoom()\r
{\r
- param_matrixWorld.reset();\r
- controlImagePosition();\r
+ param_matrixWorld.reset();\r
+ controlImagePosition();\r
}\r
\r
\r
void DefaultViewPort::ZoomIn()\r
{\r
- scaleView(0.5, QPointF(size().width() / 2, size().height() / 2));\r
+ scaleView(0.5, QPointF(size().width() / 2, size().height() / 2));\r
}\r
\r
\r
void DefaultViewPort::ZoomOut()\r
{\r
- scaleView(-0.5, QPointF(size().width() / 2, size().height() / 2));\r
+ scaleView(-0.5, QPointF(size().width() / 2, size().height() / 2));\r
}\r
\r
\r
//can save as JPG, JPEG, BMP, PNG\r
void DefaultViewPort::saveView()\r
{\r
- QDate date_d = QDate::currentDate();\r
- QString date_s = date_d.toString("dd.MM.yyyy");\r
+ QDate date_d = QDate::currentDate();\r
+ QString date_s = date_d.toString("dd.MM.yyyy");\r
QString name_s = centralWidget->windowTitle() + "_screenshot_" + date_s;\r
\r
- QString fileName = QFileDialog::getSaveFileName(this, tr("Save File %1").arg(name_s), name_s + ".png", tr("Images (*.png *.jpg *.bmp *.jpeg)"));\r
+ QString fileName = QFileDialog::getSaveFileName(this, tr("Save File %1").arg(name_s), name_s + ".png", tr("Images (*.png *.jpg *.bmp *.jpeg)"));\r
+\r
+ if (!fileName.isEmpty()) //save the picture\r
+ {\r
+ QString extension = fileName.right(3);\r
\r
- if (!fileName.isEmpty()) //save the picture\r
- {\r
- QString extension = fileName.right(3);\r
+ // (no need anymore) create the image resized to receive the 'screenshot'\r
+ // image2Draw_qt_resized = QImage(viewport()->width(), viewport()->height(),QImage::Format_RGB888);\r
\r
- // (no need anymore) create the image resized to receive the 'screenshot'\r
- // image2Draw_qt_resized = QImage(viewport()->width(), viewport()->height(),QImage::Format_RGB888);\r
- \r
- QPainter saveimage(&image2Draw_qt_resized);\r
- this->render(&saveimage);\r
+ QPainter saveimage(&image2Draw_qt_resized);\r
+ this->render(&saveimage);\r
\r
- // Save it..\r
- if (QString::compare(extension, "png", Qt::CaseInsensitive) == 0)\r
- {\r
- image2Draw_qt_resized.save(fileName, "PNG");\r
- return;\r
- }\r
+ // Save it..\r
+ if (QString::compare(extension, "png", Qt::CaseInsensitive) == 0)\r
+ {\r
+ image2Draw_qt_resized.save(fileName, "PNG");\r
+ return;\r
+ }\r
\r
- if (QString::compare(extension, "jpg", Qt::CaseInsensitive) == 0)\r
- {\r
- image2Draw_qt_resized.save(fileName, "JPG");\r
- return;\r
- }\r
+ if (QString::compare(extension, "jpg", Qt::CaseInsensitive) == 0)\r
+ {\r
+ image2Draw_qt_resized.save(fileName, "JPG");\r
+ return;\r
+ }\r
\r
- if (QString::compare(extension, "bmp", Qt::CaseInsensitive) == 0)\r
- {\r
- image2Draw_qt_resized.save(fileName, "BMP");\r
- return;\r
- }\r
+ if (QString::compare(extension, "bmp", Qt::CaseInsensitive) == 0)\r
+ {\r
+ image2Draw_qt_resized.save(fileName, "BMP");\r
+ return;\r
+ }\r
\r
- if (QString::compare(extension, "jpeg", Qt::CaseInsensitive) == 0)\r
- {\r
- image2Draw_qt_resized.save(fileName, "JPEG");\r
- return;\r
- }\r
+ if (QString::compare(extension, "jpeg", Qt::CaseInsensitive) == 0)\r
+ {\r
+ image2Draw_qt_resized.save(fileName, "JPEG");\r
+ return;\r
+ }\r
\r
- CV_Error(CV_StsNullPtr, "file extension not recognized, please choose between JPG, JPEG, BMP or PNG");\r
- }\r
+ CV_Error(CV_StsNullPtr, "file extension not recognized, please choose between JPG, JPEG, BMP or PNG");\r
+ }\r
}\r
\r
\r
-void DefaultViewPort::contextMenuEvent(QContextMenuEvent* event)\r
+void DefaultViewPort::contextMenuEvent(QContextMenuEvent* evnt)\r
{\r
- if (centralWidget->vect_QActions.size() > 0)\r
- {\r
- QMenu menu(this);\r
+ if (centralWidget->vect_QActions.size() > 0)\r
+ {\r
+ QMenu menu(this);\r
\r
- foreach (QAction *a, centralWidget->vect_QActions)\r
- menu.addAction(a);\r
+ foreach (QAction *a, centralWidget->vect_QActions)\r
+ menu.addAction(a);\r
\r
- menu.exec(event->globalPos());\r
- }\r
+ menu.exec(evnt->globalPos());\r
+ }\r
}\r
\r
\r
-void DefaultViewPort::resizeEvent(QResizeEvent* event)\r
+void DefaultViewPort::resizeEvent(QResizeEvent* evnt)\r
{\r
controlImagePosition();\r
\r
//use to compute mouse coordinate, I need to update the ratio here and in resizeEvent\r
ratioX = width() / float(image2Draw_mat->cols);\r
ratioY = height() / float(image2Draw_mat->rows);\r
- \r
+\r
if (param_keepRatio == CV_WINDOW_KEEPRATIO)//to keep the same aspect ratio\r
{\r
- QSize newSize = QSize(image2Draw_mat->cols, image2Draw_mat->rows);\r
- newSize.scale(event->size(), Qt::KeepAspectRatio);\r
+ QSize newSize = QSize(image2Draw_mat->cols, image2Draw_mat->rows);\r
+ newSize.scale(evnt->size(), Qt::KeepAspectRatio);\r
\r
- //imageWidth/imageHeight = newWidth/newHeight +/- epsilon\r
- //ratioX = ratioY +/- epsilon\r
- //||ratioX - ratioY|| = epsilon\r
- if (fabs(ratioX - ratioY) * 100 > ratioX) //avoid infinity loop / epsilon = 1% of ratioX\r
- {\r
- resize(newSize);\r
+ //imageWidth/imageHeight = newWidth/newHeight +/- epsilon\r
+ //ratioX = ratioY +/- epsilon\r
+ //||ratioX - ratioY|| = epsilon\r
+ if (fabs(ratioX - ratioY) * 100 > ratioX) //avoid infinity loop / epsilon = 1% of ratioX\r
+ {\r
+ resize(newSize);\r
\r
- //move to the middle\r
- //newSize get the delta offset to place the picture in the middle of its parent\r
- newSize = (event->size() - newSize) / 2;\r
+ //move to the middle\r
+ //newSize get the delta offset to place the picture in the middle of its parent\r
+ newSize = (evnt->size() - newSize) / 2;\r
\r
- //if the toolbar is displayed, avoid drawing myview on top of it\r
- if (centralWidget->myToolBar)\r
- if(!centralWidget->myToolBar->isHidden())\r
- newSize += QSize(0, centralWidget->myToolBar->height());\r
+ //if the toolbar is displayed, avoid drawing myview on top of it\r
+ if (centralWidget->myToolBar)\r
+ if(!centralWidget->myToolBar->isHidden())\r
+ newSize += QSize(0, centralWidget->myToolBar->height());\r
\r
- move(newSize.width(), newSize.height());\r
- }\r
+ move(newSize.width(), newSize.height());\r
+ }\r
}\r
\r
- return QGraphicsView::resizeEvent(event);\r
+ return QGraphicsView::resizeEvent(evnt);\r
}\r
\r
\r
-void DefaultViewPort::wheelEvent(QWheelEvent* event)\r
+void DefaultViewPort::wheelEvent(QWheelEvent* evnt)\r
{\r
- scaleView(event->delta() / 240.0, event->pos());\r
- viewport()->update();\r
+ scaleView(evnt->delta() / 240.0, evnt->pos());\r
+ viewport()->update();\r
}\r
\r
\r
-void DefaultViewPort::mousePressEvent(QMouseEvent* event)\r
+void DefaultViewPort::mousePressEvent(QMouseEvent* evnt)\r
{\r
- int cv_event = -1, flags = 0;\r
- QPoint pt = event->pos();\r
+ int cv_event = -1, flags = 0;\r
+ QPoint pt = evnt->pos();\r
\r
- //icvmouseHandler: pass parameters for cv_event, flags\r
- icvmouseHandler(event, mouse_down, cv_event, flags);\r
- icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+ //icvmouseHandler: pass parameters for cv_event, flags\r
+ icvmouseHandler(evnt, mouse_down, cv_event, flags);\r
+ icvmouseProcessing(QPointF(pt), cv_event, flags);\r
\r
- if (param_matrixWorld.m11()>1)\r
- {\r
- setCursor(Qt::ClosedHandCursor);\r
- positionGrabbing = event->pos();\r
- }\r
+ if (param_matrixWorld.m11()>1)\r
+ {\r
+ setCursor(Qt::ClosedHandCursor);\r
+ positionGrabbing = evnt->pos();\r
+ }\r
\r
- QWidget::mousePressEvent(event);\r
+ QWidget::mousePressEvent(evnt);\r
}\r
\r
\r
-void DefaultViewPort::mouseReleaseEvent(QMouseEvent* event)\r
+void DefaultViewPort::mouseReleaseEvent(QMouseEvent* evnt)\r
{\r
- int cv_event = -1, flags = 0;\r
- QPoint pt = event->pos();\r
+ int cv_event = -1, flags = 0;\r
+ QPoint pt = evnt->pos();\r
\r
- //icvmouseHandler: pass parameters for cv_event, flags\r
- icvmouseHandler(event, mouse_up, cv_event, flags);\r
- icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+ //icvmouseHandler: pass parameters for cv_event, flags\r
+ icvmouseHandler(evnt, mouse_up, cv_event, flags);\r
+ icvmouseProcessing(QPointF(pt), cv_event, flags);\r
\r
- if (param_matrixWorld.m11()>1)\r
- setCursor(Qt::OpenHandCursor);\r
+ if (param_matrixWorld.m11()>1)\r
+ setCursor(Qt::OpenHandCursor);\r
\r
- QWidget::mouseReleaseEvent(event);\r
+ QWidget::mouseReleaseEvent(evnt);\r
}\r
\r
\r
-void DefaultViewPort::mouseDoubleClickEvent(QMouseEvent* event)\r
+void DefaultViewPort::mouseDoubleClickEvent(QMouseEvent* evnt)\r
{\r
- int cv_event = -1, flags = 0;\r
- QPoint pt = event->pos();\r
+ int cv_event = -1, flags = 0;\r
+ QPoint pt = evnt->pos();\r
\r
- //icvmouseHandler: pass parameters for cv_event, flags\r
- icvmouseHandler(event, mouse_dbclick, cv_event, flags);\r
- icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+ //icvmouseHandler: pass parameters for cv_event, flags\r
+ icvmouseHandler(evnt, mouse_dbclick, cv_event, flags);\r
+ icvmouseProcessing(QPointF(pt), cv_event, flags);\r
\r
- QWidget::mouseDoubleClickEvent(event);\r
+ QWidget::mouseDoubleClickEvent(evnt);\r
}\r
\r
\r
-void DefaultViewPort::mouseMoveEvent(QMouseEvent* event)\r
+void DefaultViewPort::mouseMoveEvent(QMouseEvent* evnt)\r
{\r
- int cv_event = CV_EVENT_MOUSEMOVE, flags = 0;\r
- QPoint pt = event->pos();\r
+ int cv_event = CV_EVENT_MOUSEMOVE, flags = 0;\r
+ QPoint pt = evnt->pos();\r
\r
- //icvmouseHandler: pass parameters for cv_event, flags\r
- icvmouseHandler(event, mouse_move, cv_event, flags);\r
- icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+ //icvmouseHandler: pass parameters for cv_event, flags\r
+ icvmouseHandler(evnt, mouse_move, cv_event, flags);\r
+ icvmouseProcessing(QPointF(pt), cv_event, flags);\r
\r
- if (param_matrixWorld.m11() > 1 && event->buttons() == Qt::LeftButton)\r
- {\r
- QPointF dxy = (pt - positionGrabbing)/param_matrixWorld.m11();\r
- positionGrabbing = event->pos();\r
- moveView(dxy);\r
- }\r
+ if (param_matrixWorld.m11() > 1 && evnt->buttons() == Qt::LeftButton)\r
+ {\r
+ QPointF dxy = (pt - positionGrabbing)/param_matrixWorld.m11();\r
+ positionGrabbing = evnt->pos();\r
+ moveView(dxy);\r
+ }\r
\r
- //I update the statusbar here because if the user does a cvWaitkey(0) (like with inpaint.cpp)\r
- //the status bar will only be repaint when a click occurs.\r
- if (centralWidget->myStatusBar)\r
- viewport()->update();\r
+ //I update the statusbar here because if the user does a cvWaitkey(0) (like with inpaint.cpp)\r
+ //the status bar will only be repaint when a click occurs.\r
+ if (centralWidget->myStatusBar)\r
+ viewport()->update();\r
\r
- QWidget::mouseMoveEvent(event);\r
+ QWidget::mouseMoveEvent(evnt);\r
}\r
\r
\r
-void DefaultViewPort::paintEvent(QPaintEvent* event)\r
+void DefaultViewPort::paintEvent(QPaintEvent* evnt)\r
{\r
- QPainter myPainter(viewport());\r
- myPainter.setWorldTransform(param_matrixWorld);\r
+ QPainter myPainter(viewport());\r
+ myPainter.setWorldTransform(param_matrixWorld);\r
\r
- draw2D(&myPainter);\r
+ draw2D(&myPainter);\r
\r
- //Now disable matrixWorld for overlay display\r
- myPainter.setWorldMatrixEnabled(false);\r
+ //Now disable matrixWorld for overlay display\r
+ myPainter.setWorldMatrixEnabled(false);\r
\r
- //in mode zoom/panning\r
- if (param_matrixWorld.m11() > 1)\r
- { \r
- if (param_matrixWorld.m11() >= threshold_zoom_img_region)\r
- {\r
- if (centralWidget->param_flags == CV_WINDOW_NORMAL)\r
- startDisplayInfo("WARNING: The values displayed are the resized image's values. If you want the original image's values, use CV_WINDOW_AUTOSIZE", 1000);\r
+ //in mode zoom/panning\r
+ if (param_matrixWorld.m11() > 1)\r
+ {\r
+ if (param_matrixWorld.m11() >= threshold_zoom_img_region)\r
+ {\r
+ if (centralWidget->param_flags == CV_WINDOW_NORMAL)\r
+ startDisplayInfo("WARNING: The values displayed are the resized image's values. If you want the original image's values, use CV_WINDOW_AUTOSIZE", 1000);\r
\r
- drawImgRegion(&myPainter);\r
- }\r
+ drawImgRegion(&myPainter);\r
+ }\r
\r
- drawViewOverview(&myPainter);\r
- }\r
+ drawViewOverview(&myPainter);\r
+ }\r
\r
- //for information overlay\r
- if (drawInfo)\r
- drawInstructions(&myPainter);\r
+ //for information overlay\r
+ if (drawInfo)\r
+ drawInstructions(&myPainter);\r
\r
- //for statusbar\r
- if (centralWidget->myStatusBar)\r
- drawStatusBar();\r
+ //for statusbar\r
+ if (centralWidget->myStatusBar)\r
+ drawStatusBar();\r
\r
- QGraphicsView::paintEvent(event);\r
+ QGraphicsView::paintEvent(evnt);\r
}\r
\r
\r
void DefaultViewPort::stopDisplayInfo()\r
{\r
- timerDisplay->stop();\r
- drawInfo = false;\r
+ timerDisplay->stop();\r
+ drawInfo = false;\r
}\r
\r
\r
inline bool DefaultViewPort::isSameSize(IplImage* img1, IplImage* img2)\r
{\r
- return img1->width == img2->width && img1->height == img2->height;\r
+ return img1->width == img2->width && img1->height == img2->height;\r
}\r
\r
\r
void DefaultViewPort::controlImagePosition()\r
{\r
- qreal left, top, right, bottom;\r
-\r
- //after check top-left, bottom right corner to avoid getting "out" during zoom/panning\r
- param_matrixWorld.map(0,0,&left,&top);\r
-\r
- if (left > 0)\r
- {\r
- param_matrixWorld.translate(-left,0);\r
- left = 0;\r
- }\r
- if (top > 0)\r
- {\r
- param_matrixWorld.translate(0,-top);\r
- top = 0;\r
- }\r
- //-------\r
-\r
- QSize sizeImage = size();\r
- param_matrixWorld.map(sizeImage.width(),sizeImage.height(),&right,&bottom);\r
- if (right < sizeImage.width())\r
- {\r
- param_matrixWorld.translate(sizeImage.width()-right,0);\r
- right = sizeImage.width();\r
- }\r
- if (bottom < sizeImage.height())\r
- {\r
- param_matrixWorld.translate(0,sizeImage.height()-bottom);\r
- bottom = sizeImage.height();\r
- }\r
-\r
- //save corner position\r
- positionCorners.setTopLeft(QPoint(left,top));\r
- positionCorners.setBottomRight(QPoint(right,bottom));\r
- //save also the inv matrix\r
- matrixWorld_inv = param_matrixWorld.inverted();\r
-\r
- //viewport()->update();\r
+ qreal left, top, right, bottom;\r
+\r
+ //after check top-left, bottom right corner to avoid getting "out" during zoom/panning\r
+ param_matrixWorld.map(0,0,&left,&top);\r
+\r
+ if (left > 0)\r
+ {\r
+ param_matrixWorld.translate(-left,0);\r
+ left = 0;\r
+ }\r
+ if (top > 0)\r
+ {\r
+ param_matrixWorld.translate(0,-top);\r
+ top = 0;\r
+ }\r
+ //-------\r
+\r
+ QSize sizeImage = size();\r
+ param_matrixWorld.map(sizeImage.width(),sizeImage.height(),&right,&bottom);\r
+ if (right < sizeImage.width())\r
+ {\r
+ param_matrixWorld.translate(sizeImage.width()-right,0);\r
+ right = sizeImage.width();\r
+ }\r
+ if (bottom < sizeImage.height())\r
+ {\r
+ param_matrixWorld.translate(0,sizeImage.height()-bottom);\r
+ bottom = sizeImage.height();\r
+ }\r
+\r
+ //save corner position\r
+ positionCorners.setTopLeft(QPoint(left,top));\r
+ positionCorners.setBottomRight(QPoint(right,bottom));\r
+ //save also the inv matrix\r
+ matrixWorld_inv = param_matrixWorld.inverted();\r
+\r
+ //viewport()->update();\r
}\r
\r
void DefaultViewPort::moveView(QPointF delta)\r
{\r
- param_matrixWorld.translate(delta.x(),delta.y());\r
- controlImagePosition();\r
- viewport()->update();\r
+ param_matrixWorld.translate(delta.x(),delta.y());\r
+ controlImagePosition();\r
+ viewport()->update();\r
}\r
\r
//factor is -0.5 (zoom out) or 0.5 (zoom in)\r
void DefaultViewPort::scaleView(qreal factor,QPointF center)\r
{\r
- factor/=5;//-0.1 <-> 0.1\r
- factor+=1;//0.9 <-> 1.1\r
+ factor/=5;//-0.1 <-> 0.1\r
+ factor+=1;//0.9 <-> 1.1\r
\r
- //limit zoom out ---\r
- if (param_matrixWorld.m11()==1 && factor < 1)\r
- return;\r
+ //limit zoom out ---\r
+ if (param_matrixWorld.m11()==1 && factor < 1)\r
+ return;\r
\r
- if (param_matrixWorld.m11()*factor<1)\r
- factor = 1/param_matrixWorld.m11();\r
+ if (param_matrixWorld.m11()*factor<1)\r
+ factor = 1/param_matrixWorld.m11();\r
\r
\r
- //limit zoom int ---\r
- if (param_matrixWorld.m11()>100 && factor > 1)\r
- return;\r
+ //limit zoom int ---\r
+ if (param_matrixWorld.m11()>100 && factor > 1)\r
+ return;\r
\r
- //inverse the transform\r
- int a, b;\r
- matrixWorld_inv.map(center.x(),center.y(),&a,&b);\r
+ //inverse the transform\r
+ int a, b;\r
+ matrixWorld_inv.map(center.x(),center.y(),&a,&b);\r
\r
- param_matrixWorld.translate(a-factor*a,b-factor*b);\r
- param_matrixWorld.scale(factor,factor);\r
+ param_matrixWorld.translate(a-factor*a,b-factor*b);\r
+ param_matrixWorld.scale(factor,factor);\r
\r
- controlImagePosition();\r
+ controlImagePosition();\r
\r
- //display new zoom\r
- if (centralWidget->myStatusBar)\r
- centralWidget->displayStatusBar(tr("Zoom: %1%").arg(param_matrixWorld.m11()*100),1000);\r
+ //display new zoom\r
+ if (centralWidget->myStatusBar)\r
+ centralWidget->displayStatusBar(tr("Zoom: %1%").arg(param_matrixWorld.m11()*100),1000);\r
\r
- if (param_matrixWorld.m11()>1)\r
- setCursor(Qt::OpenHandCursor);\r
- else\r
- unsetCursor();\r
+ if (param_matrixWorld.m11()>1)\r
+ setCursor(Qt::OpenHandCursor);\r
+ else\r
+ unsetCursor();\r
}\r
\r
\r
//up, down, dclick, move\r
-void DefaultViewPort::icvmouseHandler(QMouseEvent *event, type_mouse_event category, int &cv_event, int &flags)\r
+void DefaultViewPort::icvmouseHandler(QMouseEvent *evnt, type_mouse_event category, int &cv_event, int &flags)\r
{\r
- Qt::KeyboardModifiers modifiers = event->modifiers();\r
- Qt::MouseButtons buttons = event->buttons();\r
- \r
+ Qt::KeyboardModifiers modifiers = evnt->modifiers();\r
+ Qt::MouseButtons buttons = evnt->buttons();\r
+\r
flags = 0;\r
if(modifiers & Qt::ShiftModifier)\r
flags |= CV_EVENT_FLAG_SHIFTKEY;\r
flags |= CV_EVENT_FLAG_MBUTTON;\r
\r
cv_event = CV_EVENT_MOUSEMOVE;\r
- switch(event->button())\r
+ switch(evnt->button())\r
{\r
case Qt::LeftButton:\r
cv_event = tableMouseButtons[category][0];\r
\r
void DefaultViewPort::icvmouseProcessing(QPointF pt, int cv_event, int flags)\r
{\r
- //to convert mouse coordinate\r
- qreal pfx, pfy;\r
- matrixWorld_inv.map(pt.x(),pt.y(),&pfx,&pfy);\r
- \r
- mouseCoordinate.rx()=floor(pfx/ratioX);\r
- mouseCoordinate.ry()=floor(pfy/ratioY);\r
+ //to convert mouse coordinate\r
+ qreal pfx, pfy;\r
+ matrixWorld_inv.map(pt.x(),pt.y(),&pfx,&pfy);\r
+\r
+ mouseCoordinate.rx()=floor(pfx/ratioX);\r
+ mouseCoordinate.ry()=floor(pfy/ratioY);\r
\r
- if (on_mouse)\r
- on_mouse( cv_event, mouseCoordinate.x(),\r
+ if (on_mouse)\r
+ on_mouse( cv_event, mouseCoordinate.x(),\r
mouseCoordinate.y(), flags, on_mouse_param );\r
}\r
\r
\r
QSize DefaultViewPort::sizeHint() const\r
{\r
- if(image2Draw_mat)\r
- return QSize(image2Draw_mat->cols, image2Draw_mat->rows);\r
- else\r
- return QGraphicsView::sizeHint();\r
+ if(image2Draw_mat)\r
+ return QSize(image2Draw_mat->cols, image2Draw_mat->rows);\r
+ else\r
+ return QGraphicsView::sizeHint();\r
}\r
\r
\r
void DefaultViewPort::draw2D(QPainter *painter)\r
{\r
- image2Draw_qt = QImage(image2Draw_mat->data.ptr, image2Draw_mat->cols, image2Draw_mat->rows,image2Draw_mat->step,QImage::Format_RGB888);\r
- image2Draw_qt_resized = image2Draw_qt.scaled(viewport()->width(),viewport()->height(),Qt::IgnoreAspectRatio,Qt::FastTransformation);//Qt::SmoothTransformation);\r
- painter->drawImage(0,0,image2Draw_qt_resized);\r
+ image2Draw_qt = QImage(image2Draw_mat->data.ptr, image2Draw_mat->cols, image2Draw_mat->rows,image2Draw_mat->step,QImage::Format_RGB888);\r
+ image2Draw_qt_resized = image2Draw_qt.scaled(viewport()->width(),viewport()->height(),Qt::IgnoreAspectRatio,Qt::FastTransformation);//Qt::SmoothTransformation);\r
+ painter->drawImage(0,0,image2Draw_qt_resized);\r
}\r
\r
//only if CV_8UC1 or CV_8UC3\r
void DefaultViewPort::drawStatusBar()\r
{\r
- if (nbChannelOriginImage!=CV_8UC1 && nbChannelOriginImage!=CV_8UC3)\r
- return;\r
-\r
- if (mouseCoordinate.x()>=0 &&\r
- mouseCoordinate.y()>=0 &&\r
- mouseCoordinate.x()<image2Draw_qt.width() &&\r
- mouseCoordinate.y()<image2Draw_qt.height())\r
-// if (mouseCoordinate.x()>=0 && mouseCoordinate.y()>=0)\r
- {\r
- QRgb rgbValue = image2Draw_qt.pixel(mouseCoordinate);\r
-\r
- if (nbChannelOriginImage==CV_8UC3 )\r
- {\r
- centralWidget->myStatusBar_msg->setText(tr("<font color='black'>(x=%1, y=%2) ~ </font>")\r
- .arg(mouseCoordinate.x())\r
- .arg(mouseCoordinate.y())+\r
- tr("<font color='red'>R:%3 </font>").arg(qRed(rgbValue))+//.arg(value.val[0])+\r
- tr("<font color='green'>G:%4 </font>").arg(qGreen(rgbValue))+//.arg(value.val[1])+\r
- tr("<font color='blue'>B:%5</font>").arg(qBlue(rgbValue))//.arg(value.val[2])\r
- );\r
- }\r
-\r
- if (nbChannelOriginImage==CV_8UC1)\r
- {\r
- //all the channel have the same value (because of cvconvertimage), so only the r channel is dsplayed\r
- centralWidget->myStatusBar_msg->setText(tr("<font color='black'>(x=%1, y=%2) ~ </font>")\r
- .arg(mouseCoordinate.x())\r
- .arg(mouseCoordinate.y())+\r
- tr("<font color='grey'>L:%3 </font>").arg(qRed(rgbValue))\r
- );\r
- }\r
- }\r
+ if (nbChannelOriginImage!=CV_8UC1 && nbChannelOriginImage!=CV_8UC3)\r
+ return;\r
+\r
+ if (mouseCoordinate.x()>=0 &&\r
+ mouseCoordinate.y()>=0 &&\r
+ mouseCoordinate.x()<image2Draw_qt.width() &&\r
+ mouseCoordinate.y()<image2Draw_qt.height())\r
+// if (mouseCoordinate.x()>=0 && mouseCoordinate.y()>=0)\r
+ {\r
+ QRgb rgbValue = image2Draw_qt.pixel(mouseCoordinate);\r
+\r
+ if (nbChannelOriginImage==CV_8UC3 )\r
+ {\r
+ centralWidget->myStatusBar_msg->setText(tr("<font color='black'>(x=%1, y=%2) ~ </font>")\r
+ .arg(mouseCoordinate.x())\r
+ .arg(mouseCoordinate.y())+\r
+ tr("<font color='red'>R:%3 </font>").arg(qRed(rgbValue))+//.arg(value.val[0])+\r
+ tr("<font color='green'>G:%4 </font>").arg(qGreen(rgbValue))+//.arg(value.val[1])+\r
+ tr("<font color='blue'>B:%5</font>").arg(qBlue(rgbValue))//.arg(value.val[2])\r
+ );\r
+ }\r
+\r
+ if (nbChannelOriginImage==CV_8UC1)\r
+ {\r
+ //all the channel have the same value (because of cvconvertimage), so only the r channel is dsplayed\r
+ centralWidget->myStatusBar_msg->setText(tr("<font color='black'>(x=%1, y=%2) ~ </font>")\r
+ .arg(mouseCoordinate.x())\r
+ .arg(mouseCoordinate.y())+\r
+ tr("<font color='grey'>L:%3 </font>").arg(qRed(rgbValue))\r
+ );\r
+ }\r
+ }\r
}\r
\r
//accept only CV_8UC1 and CV_8UC8 image for now\r
void DefaultViewPort::drawImgRegion(QPainter *painter)\r
{\r
\r
- if (nbChannelOriginImage!=CV_8UC1 && nbChannelOriginImage!=CV_8UC3)\r
- return;\r
-\r
- qreal offsetX = param_matrixWorld.dx()/param_matrixWorld.m11();\r
- offsetX = offsetX - floor(offsetX);\r
- qreal offsetY = param_matrixWorld.dy()/param_matrixWorld.m11();\r
- offsetY = offsetY - floor(offsetY);\r
-\r
- QSize view = size();\r
- QVarLengthArray<QLineF, 30> linesX;\r
- for (qreal x = offsetX*param_matrixWorld.m11(); x < view.width(); x += param_matrixWorld.m11() )\r
- linesX.append(QLineF(x, 0, x, view.height()));\r
+ if (nbChannelOriginImage!=CV_8UC1 && nbChannelOriginImage!=CV_8UC3)\r
+ return;\r
\r
- QVarLengthArray<QLineF, 30> linesY;\r
- for (qreal y = offsetY*param_matrixWorld.m11(); y < view.height(); y += param_matrixWorld.m11() )\r
- linesY.append(QLineF(0, y, view.width(), y));\r
+ qreal offsetX = param_matrixWorld.dx()/param_matrixWorld.m11();\r
+ offsetX = offsetX - floor(offsetX);\r
+ qreal offsetY = param_matrixWorld.dy()/param_matrixWorld.m11();\r
+ offsetY = offsetY - floor(offsetY);\r
\r
+ QSize view = size();\r
+ QVarLengthArray<QLineF, 30> linesX;\r
+ for (qreal _x = offsetX*param_matrixWorld.m11(); _x < view.width(); _x += param_matrixWorld.m11() )\r
+ linesX.append(QLineF(_x, 0, _x, view.height()));\r
\r
- QFont f = painter->font();\r
- int original_font_size = f.pointSize();\r
- //change font size\r
- //f.setPointSize(4+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);\r
- f.setPixelSize(10+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);\r
- painter->setFont(f);\r
- QString val;\r
- QRgb rgbValue;\r
+ QVarLengthArray<QLineF, 30> linesY;\r
+ for (qreal _y = offsetY*param_matrixWorld.m11(); _y < view.height(); _y += param_matrixWorld.m11() )\r
+ linesY.append(QLineF(0, _y, view.width(), _y));\r
\r
- QPointF point1;//sorry, I do not know how to name it\r
- QPointF point2;//idem\r
\r
- for (int j=-1;j<height()/param_matrixWorld.m11();j++)//-1 because display the pixels top rows left colums\r
- for (int i=-1;i<width()/param_matrixWorld.m11();i++)//-1\r
- {\r
- point1.setX((i+offsetX)*param_matrixWorld.m11());\r
- point1.setY((j+offsetY)*param_matrixWorld.m11());\r
+ QFont f = painter->font();\r
+ int original_font_size = f.pointSize();\r
+ //change font size\r
+ //f.setPointSize(4+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);\r
+ f.setPixelSize(10+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);\r
+ painter->setFont(f);\r
+ QString val;\r
+ QRgb rgbValue;\r
\r
- matrixWorld_inv.map(point1.x(),point1.y(),&point2.rx(),&point2.ry());\r
+ QPointF point1;//sorry, I do not know how to name it\r
+ QPointF point2;//idem\r
\r
- point2.rx()= (long) (point2.x() + 0.5);\r
- point2.ry()= (long) (point2.y() + 0.5);\r
-\r
- if (point2.x() >= 0 && point2.y() >= 0)\r
- rgbValue = image2Draw_qt_resized.pixel(QPoint(point2.x(),point2.y()));\r
- else\r
- rgbValue = qRgb(0,0,0);\r
+ for (int j=-1;j<height()/param_matrixWorld.m11();j++)//-1 because display the pixels top rows left colums\r
+ for (int i=-1;i<width()/param_matrixWorld.m11();i++)//-1\r
+ {\r
+ point1.setX((i+offsetX)*param_matrixWorld.m11());\r
+ point1.setY((j+offsetY)*param_matrixWorld.m11());\r
\r
- if (nbChannelOriginImage==CV_8UC3)\r
- {\r
- //for debug\r
- /*\r
- val = tr("%1 %2").arg(point2.x()).arg(point2.y());\r
- painter->setPen(QPen(Qt::black, 1));\r
- painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/2),\r
- Qt::AlignCenter, val);\r
- */\r
+ matrixWorld_inv.map(point1.x(),point1.y(),&point2.rx(),&point2.ry());\r
\r
- val = tr("%1").arg(qRed(rgbValue));\r
- painter->setPen(QPen(Qt::red, 1));\r
- painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
- Qt::AlignCenter, val);\r
+ point2.rx()= (long) (point2.x() + 0.5);\r
+ point2.ry()= (long) (point2.y() + 0.5);\r
\r
- val = tr("%1").arg(qGreen(rgbValue));\r
- painter->setPen(QPen(Qt::green, 1));\r
- painter->drawText(QRect(point1.x(),point1.y()+param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
- Qt::AlignCenter, val);\r
+ if (point2.x() >= 0 && point2.y() >= 0)\r
+ rgbValue = image2Draw_qt_resized.pixel(QPoint(point2.x(),point2.y()));\r
+ else\r
+ rgbValue = qRgb(0,0,0);\r
\r
- val = tr("%1").arg(qBlue(rgbValue));\r
- painter->setPen(QPen(Qt::blue, 1));\r
- painter->drawText(QRect(point1.x(),point1.y()+2*param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
- Qt::AlignCenter, val);\r
+ if (nbChannelOriginImage==CV_8UC3)\r
+ {\r
+ //for debug\r
+ /*\r
+ val = tr("%1 %2").arg(point2.x()).arg(point2.y());\r
+ painter->setPen(QPen(Qt::black, 1));\r
+ painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/2),\r
+ Qt::AlignCenter, val);\r
+ */\r
+\r
+ val = tr("%1").arg(qRed(rgbValue));\r
+ painter->setPen(QPen(Qt::red, 1));\r
+ painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
+ Qt::AlignCenter, val);\r
+\r
+ val = tr("%1").arg(qGreen(rgbValue));\r
+ painter->setPen(QPen(Qt::green, 1));\r
+ painter->drawText(QRect(point1.x(),point1.y()+param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
+ Qt::AlignCenter, val);\r
+\r
+ val = tr("%1").arg(qBlue(rgbValue));\r
+ painter->setPen(QPen(Qt::blue, 1));\r
+ painter->drawText(QRect(point1.x(),point1.y()+2*param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
+ Qt::AlignCenter, val);\r
\r
- }\r
+ }\r
\r
- if (nbChannelOriginImage==CV_8UC1)\r
- {\r
+ if (nbChannelOriginImage==CV_8UC1)\r
+ {\r
\r
- val = tr("%1").arg(qRed(rgbValue));\r
- painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()),\r
- Qt::AlignCenter, val);\r
- }\r
- }\r
+ val = tr("%1").arg(qRed(rgbValue));\r
+ painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()),\r
+ Qt::AlignCenter, val);\r
+ }\r
+ }\r
\r
- painter->setPen(QPen(Qt::black, 1));\r
- painter->drawLines(linesX.data(), linesX.size());\r
- painter->drawLines(linesY.data(), linesY.size());\r
+ painter->setPen(QPen(Qt::black, 1));\r
+ painter->drawLines(linesX.data(), linesX.size());\r
+ painter->drawLines(linesY.data(), linesY.size());\r
\r
- //restore font size\r
- f.setPointSize(original_font_size);\r
- painter->setFont(f);\r
+ //restore font size\r
+ f.setPointSize(original_font_size);\r
+ painter->setFont(f);\r
\r
}\r
\r
void DefaultViewPort::drawViewOverview(QPainter *painter)\r
{\r
- QSize viewSize = size();\r
- viewSize.scale ( 100, 100,Qt::KeepAspectRatio );\r
+ QSize viewSize = size();\r
+ viewSize.scale ( 100, 100,Qt::KeepAspectRatio );\r
\r
- const int margin = 5;\r
+ const int margin = 5;\r
\r
- //draw the image's location\r
- painter->setBrush(QColor(0, 0, 0, 127));\r
- painter->setPen(Qt::darkGreen);\r
- painter->drawRect(QRect(width()-viewSize.width()-margin, 0,viewSize.width(),viewSize.height()));\r
+ //draw the image's location\r
+ painter->setBrush(QColor(0, 0, 0, 127));\r
+ painter->setPen(Qt::darkGreen);\r
+ painter->drawRect(QRect(width()-viewSize.width()-margin, 0,viewSize.width(),viewSize.height()));\r
\r
- //daw the view's location inside the image\r
- qreal ratioSize = 1/param_matrixWorld.m11();\r
- qreal ratioWindow = (qreal)(viewSize.height())/(qreal)(size().height());\r
- painter->setPen(Qt::darkBlue);\r
- painter->drawRect(QRectF(width()-viewSize.width()-positionCorners.left()*ratioSize*ratioWindow-margin,\r
- -positionCorners.top()*ratioSize*ratioWindow,\r
- (viewSize.width()-1)*ratioSize,\r
- (viewSize.height()-1)*ratioSize)\r
- );\r
+ //daw the view's location inside the image\r
+ qreal ratioSize = 1/param_matrixWorld.m11();\r
+ qreal ratioWindow = (qreal)(viewSize.height())/(qreal)(size().height());\r
+ painter->setPen(Qt::darkBlue);\r
+ painter->drawRect(QRectF(width()-viewSize.width()-positionCorners.left()*ratioSize*ratioWindow-margin,\r
+ -positionCorners.top()*ratioSize*ratioWindow,\r
+ (viewSize.width()-1)*ratioSize,\r
+ (viewSize.height()-1)*ratioSize)\r
+ );\r
}\r
\r
void DefaultViewPort::drawInstructions(QPainter *painter)\r
{\r
- QFontMetrics metrics = QFontMetrics(font());\r
- int border = qMax(4, metrics.leading());\r
+ QFontMetrics metrics = QFontMetrics(font());\r
+ int border = qMax(4, metrics.leading());\r
\r
- QRect rect = metrics.boundingRect(0, 0, width() - 2*border, int(height()*0.125),\r
- Qt::AlignCenter | Qt::TextWordWrap, infoText);\r
- painter->setRenderHint(QPainter::TextAntialiasing);\r
- painter->fillRect(QRect(0, 0, width(), rect.height() + 2*border),\r
- QColor(0, 0, 0, 127));\r
- painter->setPen(Qt::white);\r
- painter->fillRect(QRect(0, 0, width(), rect.height() + 2*border),\r
- QColor(0, 0, 0, 127));\r
+ QRect qrect = metrics.boundingRect(0, 0, width() - 2*border, int(height()*0.125),\r
+ Qt::AlignCenter | Qt::TextWordWrap, infoText);\r
+ painter->setRenderHint(QPainter::TextAntialiasing);\r
+ painter->fillRect(QRect(0, 0, width(), qrect.height() + 2*border),\r
+ QColor(0, 0, 0, 127));\r
+ painter->setPen(Qt::white);\r
+ painter->fillRect(QRect(0, 0, width(), qrect.height() + 2*border),\r
+ QColor(0, 0, 0, 127));\r
\r
- painter->drawText((width() - rect.width())/2, border,\r
- rect.width(), rect.height(),\r
- Qt::AlignCenter | Qt::TextWordWrap, infoText);\r
+ painter->drawText((width() - qrect.width())/2, border,\r
+ qrect.width(), qrect.height(),\r
+ Qt::AlignCenter | Qt::TextWordWrap, infoText);\r
}\r
\r
\r
\r
#ifdef HAVE_QT_OPENGL\r
\r
-OpenGlViewPort::OpenGlViewPort(QWidget* parent) : QGLWidget(parent), size(-1, -1)\r
+OpenGlViewPort::OpenGlViewPort(QWidget* _parent) : QGLWidget(_parent), size(-1, -1)\r
{\r
mouseCallback = 0;\r
mouseData = 0;\r
void generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const;\r
\r
bool isGlContextInitialized() const;\r
- \r
+\r
PFNGLGENBUFFERSPROC glGenBuffersExt;\r
PFNGLDELETEBUFFERSPROC glDeleteBuffersExt;\r
\r
\r
void GlFuncTab_QT::generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const\r
{\r
- CV_FUNCNAME( "GlFuncTab_QT::generateBitmapFont" );\r
+ //CV_FUNCNAME( "GlFuncTab_QT::generateBitmapFont" );\r
\r
QFont font(QString(family.c_str()), height, weight, italic);\r
\r
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);\r
\r
#ifdef Q_WS_WIN\r
- std::auto_ptr<GlFuncTab_QT> glFuncTab(new GlFuncTab_QT(getDC()));\r
+ std::auto_ptr<GlFuncTab_QT> qglFuncTab(new GlFuncTab_QT(getDC()));\r
#else\r
- std::auto_ptr<GlFuncTab_QT> glFuncTab(new GlFuncTab_QT);\r
+ std::auto_ptr<GlFuncTab_QT> qglFuncTab(new GlFuncTab_QT);\r
#endif\r
\r
// Load extensions\r
\r
- glFuncTab->glGenBuffersExt = (PFNGLGENBUFFERSPROC)context()->getProcAddress("glGenBuffers");\r
- glFuncTab->glDeleteBuffersExt = (PFNGLDELETEBUFFERSPROC)context()->getProcAddress("glDeleteBuffers");\r
- glFuncTab->glBufferDataExt = (PFNGLBUFFERDATAPROC)context()->getProcAddress("glBufferData");\r
- glFuncTab->glBufferSubDataExt = (PFNGLBUFFERSUBDATAPROC)context()->getProcAddress("glBufferSubData");\r
- glFuncTab->glBindBufferExt = (PFNGLBINDBUFFERPROC)context()->getProcAddress("glBindBuffer");\r
- glFuncTab->glMapBufferExt = (PFNGLMAPBUFFERPROC)context()->getProcAddress("glMapBuffer");\r
- glFuncTab->glUnmapBufferExt = (PFNGLUNMAPBUFFERPROC)context()->getProcAddress("glUnmapBuffer");\r
+ qglFuncTab->glGenBuffersExt = (PFNGLGENBUFFERSPROC)context()->getProcAddress("glGenBuffers");\r
+ qglFuncTab->glDeleteBuffersExt = (PFNGLDELETEBUFFERSPROC)context()->getProcAddress("glDeleteBuffers");\r
+ qglFuncTab->glBufferDataExt = (PFNGLBUFFERDATAPROC)context()->getProcAddress("glBufferData");\r
+ qglFuncTab->glBufferSubDataExt = (PFNGLBUFFERSUBDATAPROC)context()->getProcAddress("glBufferSubData");\r
+ qglFuncTab->glBindBufferExt = (PFNGLBINDBUFFERPROC)context()->getProcAddress("glBindBuffer");\r
+ qglFuncTab->glMapBufferExt = (PFNGLMAPBUFFERPROC)context()->getProcAddress("glMapBuffer");\r
+ qglFuncTab->glUnmapBufferExt = (PFNGLUNMAPBUFFERPROC)context()->getProcAddress("glUnmapBuffer");\r
\r
- glFuncTab->initialized = true;\r
+ qglFuncTab->initialized = true;\r
\r
- this->glFuncTab = glFuncTab.release();\r
+ glFuncTab = qglFuncTab.release();\r
\r
- icvSetOpenGlFuncTab(this->glFuncTab);\r
+ icvSetOpenGlFuncTab(glFuncTab);\r
}\r
\r
void OpenGlViewPort::resizeGL(int w, int h)\r
CV_CheckGlError();\r
}\r
\r
-void OpenGlViewPort::mousePressEvent(QMouseEvent* event)\r
+void OpenGlViewPort::mousePressEvent(QMouseEvent* evnt)\r
{\r
- int cv_event = -1, flags = 0;\r
- QPoint pt = event->pos();\r
+ int cv_event = -1, flags = 0;\r
+ QPoint pt = evnt->pos();\r
\r
- icvmouseHandler(event, mouse_down, cv_event, flags);\r
- icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+ icvmouseHandler(evnt, mouse_down, cv_event, flags);\r
+ icvmouseProcessing(QPointF(pt), cv_event, flags);\r
\r
- QGLWidget::mousePressEvent(event);\r
+ QGLWidget::mousePressEvent(evnt);\r
}\r
\r
\r
-void OpenGlViewPort::mouseReleaseEvent(QMouseEvent* event)\r
+void OpenGlViewPort::mouseReleaseEvent(QMouseEvent* evnt)\r
{\r
- int cv_event = -1, flags = 0;\r
- QPoint pt = event->pos();\r
+ int cv_event = -1, flags = 0;\r
+ QPoint pt = evnt->pos();\r
\r
- icvmouseHandler(event, mouse_up, cv_event, flags);\r
- icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+ icvmouseHandler(evnt, mouse_up, cv_event, flags);\r
+ icvmouseProcessing(QPointF(pt), cv_event, flags);\r
\r
- QGLWidget::mouseReleaseEvent(event);\r
+ QGLWidget::mouseReleaseEvent(evnt);\r
}\r
\r
\r
-void OpenGlViewPort::mouseDoubleClickEvent(QMouseEvent* event)\r
+void OpenGlViewPort::mouseDoubleClickEvent(QMouseEvent* evnt)\r
{\r
- int cv_event = -1, flags = 0;\r
- QPoint pt = event->pos();\r
+ int cv_event = -1, flags = 0;\r
+ QPoint pt = evnt->pos();\r
\r
- icvmouseHandler(event, mouse_dbclick, cv_event, flags);\r
- icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+ icvmouseHandler(evnt, mouse_dbclick, cv_event, flags);\r
+ icvmouseProcessing(QPointF(pt), cv_event, flags);\r
\r
- QGLWidget::mouseDoubleClickEvent(event);\r
+ QGLWidget::mouseDoubleClickEvent(evnt);\r
}\r
\r
\r
-void OpenGlViewPort::mouseMoveEvent(QMouseEvent* event)\r
+void OpenGlViewPort::mouseMoveEvent(QMouseEvent* evnt)\r
{\r
- int cv_event = CV_EVENT_MOUSEMOVE, flags = 0;\r
- QPoint pt = event->pos();\r
+ int cv_event = CV_EVENT_MOUSEMOVE, flags = 0;\r
+ QPoint pt = evnt->pos();\r
\r
- //icvmouseHandler: pass parameters for cv_event, flags\r
- icvmouseHandler(event, mouse_move, cv_event, flags);\r
- icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+ //icvmouseHandler: pass parameters for cv_event, flags\r
+ icvmouseHandler(evnt, mouse_move, cv_event, flags);\r
+ icvmouseProcessing(QPointF(pt), cv_event, flags);\r
\r
- QGLWidget::mouseMoveEvent(event);\r
+ QGLWidget::mouseMoveEvent(evnt);\r
}\r
\r
-void OpenGlViewPort::icvmouseHandler(QMouseEvent* event, type_mouse_event category, int& cv_event, int& flags)\r
+void OpenGlViewPort::icvmouseHandler(QMouseEvent* evnt, type_mouse_event category, int& cv_event, int& flags)\r
{\r
- Qt::KeyboardModifiers modifiers = event->modifiers();\r
- Qt::MouseButtons buttons = event->buttons();\r
- \r
+ Qt::KeyboardModifiers modifiers = evnt->modifiers();\r
+ Qt::MouseButtons buttons = evnt->buttons();\r
+\r
flags = 0;\r
if (modifiers & Qt::ShiftModifier)\r
- flags |= CV_EVENT_FLAG_SHIFTKEY;\r
- if (modifiers & Qt::ControlModifier)\r
- flags |= CV_EVENT_FLAG_CTRLKEY;\r
- if (modifiers & Qt::AltModifier)\r
- flags |= CV_EVENT_FLAG_ALTKEY;\r
+ flags |= CV_EVENT_FLAG_SHIFTKEY;\r
+ if (modifiers & Qt::ControlModifier)\r
+ flags |= CV_EVENT_FLAG_CTRLKEY;\r
+ if (modifiers & Qt::AltModifier)\r
+ flags |= CV_EVENT_FLAG_ALTKEY;\r
\r
if (buttons & Qt::LeftButton)\r
- flags |= CV_EVENT_FLAG_LBUTTON;\r
- if (buttons & Qt::RightButton)\r
- flags |= CV_EVENT_FLAG_RBUTTON;\r
+ flags |= CV_EVENT_FLAG_LBUTTON;\r
+ if (buttons & Qt::RightButton)\r
+ flags |= CV_EVENT_FLAG_RBUTTON;\r
if (buttons & Qt::MidButton)\r
- flags |= CV_EVENT_FLAG_MBUTTON;\r
+ flags |= CV_EVENT_FLAG_MBUTTON;\r
\r
cv_event = CV_EVENT_MOUSEMOVE;\r
- switch (event->button())\r
- {\r
- case Qt::LeftButton:\r
- cv_event = tableMouseButtons[category][0];\r
- flags |= CV_EVENT_FLAG_LBUTTON;\r
- break;\r
-\r
- case Qt::RightButton:\r
- cv_event = tableMouseButtons[category][1];\r
- flags |= CV_EVENT_FLAG_RBUTTON;\r
- break;\r
-\r
- case Qt::MidButton:\r
- cv_event = tableMouseButtons[category][2];\r
- flags |= CV_EVENT_FLAG_MBUTTON;\r
- break;\r
-\r
- default:\r
+ switch (evnt->button())\r
+ {\r
+ case Qt::LeftButton:\r
+ cv_event = tableMouseButtons[category][0];\r
+ flags |= CV_EVENT_FLAG_LBUTTON;\r
+ break;\r
+\r
+ case Qt::RightButton:\r
+ cv_event = tableMouseButtons[category][1];\r
+ flags |= CV_EVENT_FLAG_RBUTTON;\r
+ break;\r
+\r
+ case Qt::MidButton:\r
+ cv_event = tableMouseButtons[category][2];\r
+ flags |= CV_EVENT_FLAG_MBUTTON;\r
+ break;\r
+\r
+ default:\r
;\r
- }\r
+ }\r
}\r
\r
\r
void OpenGlViewPort::icvmouseProcessing(QPointF pt, int cv_event, int flags)\r
{\r
- if (mouseCallback)\r
- mouseCallback(cv_event, pt.x(), pt.y(), flags, mouseData);\r
+ if (mouseCallback)\r
+ mouseCallback(cv_event, pt.x(), pt.y(), flags, mouseData);\r
}\r
\r
\r
#include <GL/glu.h>
#endif
-/*#if _MSC_VER >= 1200
-#pragma warning( disable: 4505 )
-#pragma comment(lib,"gtk-win32-2.0.lib")
-#pragma comment(lib,"glib-2.0.lib")
-#pragma comment(lib,"gobject-2.0.lib")
-#pragma comment(lib,"gdk-win32-2.0.lib")
-#pragma comment(lib,"gdk_pixbuf-2.0.lib")
-#endif*/
-
-
// TODO Fix the initial window size when flags=0. Right now the initial window is by default
// 320x240 size. A better default would be actual size of the image. Problem
// is determining desired window size with trackbars while still allowing resizing.
CV_UNLOCK_MUTEX();
}
-CvSize icvCalcOptimalWindowSize( CvWindow * window, CvSize new_image_size){
- CvSize window_size;
- GtkWidget * toplevel = gtk_widget_get_toplevel( window->frame );
- gdk_drawable_get_size( GDK_DRAWABLE(toplevel->window),
- &window_size.width, &window_size.height );
+// CvSize icvCalcOptimalWindowSize( CvWindow * window, CvSize new_image_size){
+// CvSize window_size;
+// GtkWidget * toplevel = gtk_widget_get_toplevel( window->frame );
+// gdk_drawable_get_size( GDK_DRAWABLE(toplevel->window),
+// &window_size.width, &window_size.height );
- window_size.width = window_size.width + new_image_size.width - window->widget->allocation.width;
- window_size.height = window_size.height + new_image_size.height - window->widget->allocation.height;
+// window_size.width = window_size.width + new_image_size.width - window->widget->allocation.width;
+// window_size.height = window_size.height + new_image_size.height - window->widget->allocation.height;
- return window_size;
-}
+// return window_size;
+// }
CV_IMPL void
cvShowImage( const char* name, const CvArr* arr )
#if defined WIN32 || defined _WIN32
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4710 )
-#endif
-
#define COMPILE_MULTIMON_STUBS // Required for multi-monitor support
#ifndef _MULTIMON_USE_SECURE_CRT
# define _MULTIMON_USE_SECURE_CRT 0 // some MinGW platforms have no strncpy_s
#ifndef __inout
# define __inout
#endif
+
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
#include <MultiMon.h>
#include <commctrl.h>
HGDIOBJ image;
int last_key;
int flags;
- int status;//0 normal, 1 fullscreen (YV)
+ int status;//0 normal, 1 fullscreen (YV)
CvMouseCallback on_mouse;
void* on_mouse_param;
char rootKey[1024];
strcpy( szKey, icvWindowPosRootKey );
strcat( szKey, name );
-
+
if( RegOpenKeyEx( HKEY_CURRENT_USER,szKey,0,KEY_READ,&hkey) != ERROR_SUCCESS )
{
HKEY hroot;
if( RegOpenKeyEx( HKEY_CURRENT_USER,szKey,0,KEY_WRITE,&hkey) != ERROR_SUCCESS )
return;
}
-
+
RegSetValueEx(hkey, "Left", 0, REG_DWORD, (BYTE*)&rect.x, sizeof(rect.x));
RegSetValueEx(hkey, "Top", 0, REG_DWORD, (BYTE*)&rect.y, sizeof(rect.y));
RegSetValueEx(hkey, "Width", 0, REG_DWORD, (BYTE*)&rect.width, sizeof(rect.width));
double cvGetModeWindow_W32(const char* name)//YV
{
- double result = -1;
-
- CV_FUNCNAME( "cvGetModeWindow_W32" );
+ double result = -1;
+
+ CV_FUNCNAME( "cvGetModeWindow_W32" );
__BEGIN__;
window = icvFindWindowByName( name );
if (!window)
EXIT; // keep silence here
-
+
result = window->status;
-
+
__END__;
- return result;
+ return result;
}
void cvSetModeWindow_W32( const char* name, double prop_value)//Yannick Verdie
{
- CV_FUNCNAME( "cvSetModeWindow_W32" );
+ CV_FUNCNAME( "cvSetModeWindow_W32" );
- __BEGIN__;
+ __BEGIN__;
- CvWindow* window;
+ CvWindow* window;
- if(!name)
- CV_ERROR( CV_StsNullPtr, "NULL name string" );
+ if(!name)
+ CV_ERROR( CV_StsNullPtr, "NULL name string" );
- window = icvFindWindowByName( name );
- if( !window )
- CV_ERROR( CV_StsNullPtr, "NULL window" );
+ window = icvFindWindowByName( name );
+ if( !window )
+ CV_ERROR( CV_StsNullPtr, "NULL window" );
- if(window->flags & CV_WINDOW_AUTOSIZE)//if the flag CV_WINDOW_AUTOSIZE is set
- EXIT;
+ if(window->flags & CV_WINDOW_AUTOSIZE)//if the flag CV_WINDOW_AUTOSIZE is set
+ EXIT;
- {
- DWORD dwStyle = (DWORD)GetWindowLongPtr(window->frame, GWL_STYLE);
- CvRect position;
+ {
+ DWORD dwStyle = (DWORD)GetWindowLongPtr(window->frame, GWL_STYLE);
+ CvRect position;
- if (window->status==CV_WINDOW_FULLSCREEN && prop_value==CV_WINDOW_NORMAL)
- {
- icvLoadWindowPos(window->name,position );
- SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle | WS_CAPTION | WS_THICKFRAME);
+ if (window->status==CV_WINDOW_FULLSCREEN && prop_value==CV_WINDOW_NORMAL)
+ {
+ icvLoadWindowPos(window->name,position );
+ SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle | WS_CAPTION | WS_THICKFRAME);
- SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED);
- window->status=CV_WINDOW_NORMAL;
+ SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED);
+ window->status=CV_WINDOW_NORMAL;
- EXIT;
- }
+ EXIT;
+ }
- if (window->status==CV_WINDOW_NORMAL && prop_value==CV_WINDOW_FULLSCREEN)
- {
- //save dimension
- RECT rect;
- GetWindowRect(window->frame, &rect);
- CvRect RectCV = cvRect(rect.left, rect.top,rect.right - rect.left, rect.bottom - rect.top);
- icvSaveWindowPos(window->name,RectCV );
+ if (window->status==CV_WINDOW_NORMAL && prop_value==CV_WINDOW_FULLSCREEN)
+ {
+ //save dimension
+ RECT rect;
+ GetWindowRect(window->frame, &rect);
+ CvRect RectCV = cvRect(rect.left, rect.top,rect.right - rect.left, rect.bottom - rect.top);
+ icvSaveWindowPos(window->name,RectCV );
- //Look at coordinate for fullscreen
- HMONITOR hMonitor;
- MONITORINFO mi;
- hMonitor = MonitorFromRect(&rect, MONITOR_DEFAULTTONEAREST);
+ //Look at coordinate for fullscreen
+ HMONITOR hMonitor;
+ MONITORINFO mi;
+ hMonitor = MonitorFromRect(&rect, MONITOR_DEFAULTTONEAREST);
- mi.cbSize = sizeof(mi);
- GetMonitorInfo(hMonitor, &mi);
+ mi.cbSize = sizeof(mi);
+ GetMonitorInfo(hMonitor, &mi);
- //fullscreen
- position.x=mi.rcMonitor.left;position.y=mi.rcMonitor.top;
- position.width=mi.rcMonitor.right - mi.rcMonitor.left;position.height=mi.rcMonitor.bottom - mi.rcMonitor.top;
- SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle & ~WS_CAPTION & ~WS_THICKFRAME);
+ //fullscreen
+ position.x=mi.rcMonitor.left;position.y=mi.rcMonitor.top;
+ position.width=mi.rcMonitor.right - mi.rcMonitor.left;position.height=mi.rcMonitor.bottom - mi.rcMonitor.top;
+ SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle & ~WS_CAPTION & ~WS_THICKFRAME);
- SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED);
- window->status=CV_WINDOW_FULLSCREEN;
+ SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED);
+ window->status=CV_WINDOW_FULLSCREEN;
- EXIT;
- }
- }
+ EXIT;
+ }
+ }
- __END__;
+ __END__;
}
double cvGetPropWindowAutoSize_W32(const char* name)
double cvGetRatioWindow_W32(const char* name)
{
- double result = -1;
-
- CV_FUNCNAME( "cvGetRatioWindow_W32" );
+ double result = -1;
+
+ CV_FUNCNAME( "cvGetRatioWindow_W32" );
__BEGIN__;
window = icvFindWindowByName( name );
if (!window)
EXIT; // keep silence here
-
+
result = static_cast<double>(window->width) / window->height;
-
+
__END__;
- return result;
+ return result;
}
double cvGetOpenGlProp_W32(const char* name)
{
- double result = -1;
+ double result = -1;
-#ifdef HAVE_OPENGL
- CV_FUNCNAME( "cvGetOpenGlProp_W32" );
+#ifdef HAVE_OPENGL
+ CV_FUNCNAME( "cvGetOpenGlProp_W32" );
__BEGIN__;
window = icvFindWindowByName( name );
if (!window)
EXIT; // keep silence here
-
+
result = window->useGl;
-
+
__END__;
#endif
- (void)name;
+ (void)name;
- return result;
+ return result;
}
void generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const;
bool isGlContextInitialized() const;
-
+
PFNGLGENBUFFERSPROC glGenBuffersExt;
PFNGLDELETEBUFFERSPROC glDeleteBuffersExt;
weight, // font weight
italic ? TRUE : FALSE, // Italic
underline ? TRUE : FALSE, // Underline
- FALSE, // StrikeOut
- ANSI_CHARSET, // CharSet
+ FALSE, // StrikeOut
+ ANSI_CHARSET, // CharSet
OUT_TT_PRECIS, // OutPrecision
CLIP_DEFAULT_PRECIS, // ClipPrecision
ANTIALIASED_QUALITY, // Quality
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
- 32, // 32 Bit Z-Buffer (Depth Buffer)
+ 32, // 32 Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
- 0, 0, 0 // Layer Masks Ignored
+ 0, 0, 0 // Layer Masks Ignored
};
hGLDC = GetDC(hWnd);
void releaseGlContext(CvWindow* window)
{
- CV_FUNCNAME( "releaseGlContext" );
+ //CV_FUNCNAME( "releaseGlContext" );
__BEGIN__;
window->hGLRC = NULL;
}
- if (window->dc)
+ if (window->dc)
{
ReleaseDC(window->hwnd, window->dc);
window->dc = NULL;
if (!wglMakeCurrent(window->dc, window->hGLRC))
CV_ERROR( CV_OpenGlApiCallError, "Can't Activate The GL Rendering Context" );
- glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (window->glDrawCallback)
window->glDrawCallback(window->glDrawData);
ShowWindow(mainhWnd, SW_SHOW);
- //YV- remove one border by changing the style
+ //YV- remove one border by changing the style
hWnd = CreateWindow("HighGUI class", "", (defStyle & ~WS_SIZEBOX) | WS_CHILD, CW_USEDEFAULT, 0, rect.width, rect.height, mainhWnd, 0, hg_hinstance, 0);
if( !hWnd )
CV_ERROR( CV_StsError, "Frame window can not be created" );
CV_ERROR( CV_StsNullPtr, "NULL name" );
window = icvFindWindowByName(name);
- if(!window)
- {
+ if(!window)
+ {
#ifndef HAVE_OPENGL
- cvNamedWindow(name, CV_WINDOW_AUTOSIZE);
+ cvNamedWindow(name, CV_WINDOW_AUTOSIZE);
#else
- cvNamedWindow(name, CV_WINDOW_AUTOSIZE | CV_WINDOW_OPENGL);
+ cvNamedWindow(name, CV_WINDOW_AUTOSIZE | CV_WINDOW_OPENGL);
#endif
- window = icvFindWindowByName(name);
- }
+ window = icvFindWindowByName(name);
+ }
if( !window || !arr )
EXIT; // keep silence here.
__END__;
}
+#if 0
CV_IMPL void
cvShowImageHWND(HWND w_hWnd, const CvArr* arr)
{
if( CV_IS_IMAGE_HDR( arr ) )
origin = ((IplImage*)arr)->origin;
- CV_CALL( image = cvGetMat( arr, &stub ) );
+ CV_CALL( image = cvGetMat( arr, &stub ) );
if ( hdc )
{
dst_ptr = bmp.bmBits;
}
- if( size.cx != image->width || size.cy != image->height || channels != channels0 )
+ if( size.cx != image->width || size.cy != image->height || channels != channels0 )
{
changed_size = true;
__END__;
}
+#endif
CV_IMPL void cvResizeWindow(const char* name, int width, int height )
{
{
// Snap window to screen edges with multi-monitor support. // Adi Shavit
LPWINDOWPOS pos = (LPWINDOWPOS)lParam;
-
+
RECT rect;
GetWindowRect(window->frame, &rect);
const int SNAP_DISTANCE = 15;
- if (abs(pos->x - mi.rcMonitor.left) <= SNAP_DISTANCE)
+ if (abs(pos->x - mi.rcMonitor.left) <= SNAP_DISTANCE)
pos->x = mi.rcMonitor.left; // snap to left edge
- else
+ else
if (abs(pos->x + pos->cx - mi.rcMonitor.right) <= SNAP_DISTANCE)
pos->x = mi.rcMonitor.right - pos->cx; // snap to right edge
if (abs(pos->y - mi.rcMonitor.top) <= SNAP_DISTANCE)
pos->y = mi.rcMonitor.top; // snap to top edge
- else
+ else
if (abs(pos->y + pos->cy - mi.rcMonitor.bottom) <= SNAP_DISTANCE)
pos->y = mi.rcMonitor.bottom - pos->cy; // snap to bottom edge
}
EndPaint(hwnd, &paint);
}
#ifdef HAVE_OPENGL
- else if(window->useGl)
+ else if(window->useGl)
{
- drawGl(window);
+ drawGl(window);
return DefWindowProc(hwnd, uMsg, wParam, lParam);
}
#endif
if( hg_on_preprocess )
{
int was_processed = 0;
- int ret = hg_on_preprocess(hwnd, uMsg, wParam, lParam, &was_processed);
+ int rethg = hg_on_preprocess(hwnd, uMsg, wParam, lParam, &was_processed);
if( was_processed )
- return ret;
+ return rethg;
}
ret = HighGUIProc(hwnd, uMsg, wParam, lParam);
if(hg_on_postprocess)
{
int was_processed = 0;
- int ret = hg_on_postprocess(hwnd, uMsg, wParam, lParam, &was_processed);
+ int rethg = hg_on_postprocess(hwnd, uMsg, wParam, lParam, &was_processed);
if( was_processed )
- return ret;
+ return rethg;
}
return ret;
void CV_DrawingTest::run( int )
{
Mat testImg, valImg;
- const string name = "drawing/image.jpg";
+ const string fname = "drawing/image.jpg";
string path = ts->get_data_path(), filename;
- filename = path + name;
+ filename = path + fname;
draw( testImg );
{
public:
CV_FillConvexPolyTest() {}
- ~CV_FillConvexPolyTest() {}
+ ~CV_FillConvexPolyTest() {}
protected:
void run(int)
{
vector<Point> line1;
vector<Point> line2;
-
+
line1.push_back(Point(1, 1));
line1.push_back(Point(5, 1));
line1.push_back(Point(5, 8));
line1.push_back(Point(1, 8));
-
+
line2.push_back(Point(2, 2));
line2.push_back(Point(10, 2));
line2.push_back(Point(10, 16));
line2.push_back(Point(2, 16));
-
+
Mat gray0(10,10,CV_8U, Scalar(0));
fillConvexPoly(gray0, line1, Scalar(255), 8, 0);
int nz1 = countNonZero(gray0);
-
+
fillConvexPoly(gray0, line2, Scalar(0), 8, 1);
int nz2 = countNonZero(gray0)/255;
-
+
CV_Assert( nz1 == 40 && nz2 == 0 );
}
};
#include "test_precomp.hpp"
#include "opencv2/highgui/highgui.hpp"
-#if defined HAVE_GTK || defined HAVE_QT || defined WIN32 || defined _WIN32 || HAVE_CARBON || HAVE_COCOA
+#if defined HAVE_GTK || defined HAVE_QT || defined WIN32 || defined _WIN32 || defined HAVE_CARBON || defined HAVE_COCOA
using namespace cv;
using namespace std;
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
/*defined(HAVE_OPENNI) || too specialized */ \
defined(HAVE_FFMPEG) || \
defined(WIN32) /* assume that we have ffmpeg */
-
+
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
#else
# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
{
string fourccToString(int fourcc);
-
+
struct VideoFormat
{
VideoFormat() { fourcc = -1; }
VideoFormat(const string& _ext, int _fourcc) : ext(_ext), fourcc(_fourcc) {}
bool empty() const { return ext.empty(); }
-
+
string ext;
int fourcc;
};
-
+
extern const VideoFormat g_specific_fmt_list[];
-
+
}
#endif
{
return format("%c%c%c%c", fourcc & 255, (fourcc >> 8) & 255, (fourcc >> 16) & 255, (fourcc >> 24) & 255);
}
-
+
const VideoFormat g_specific_fmt_list[] =
{
VideoFormat("avi", CV_FOURCC('X', 'V', 'I', 'D')),
VideoFormat("mkv", CV_FOURCC('X', 'V', 'I', 'D')),
VideoFormat("mkv", CV_FOURCC('M', 'P', 'E', 'G')),
VideoFormat("mkv", CV_FOURCC('M', 'J', 'P', 'G')),
-
+
VideoFormat("mov", CV_FOURCC('m', 'p', '4', 'v')),
VideoFormat()
};
-
+
}
class CV_HighGuiTest : public cvtest::BaseTest
if (!img)
break;
-
+
frames.push_back(Mat(img).clone());
if (writer == 0)
{
string ext = fmt.ext;
int fourcc = fmt.fourcc;
-
+
string fourcc_str = cvtest::fourccToString(fourcc);
const string video_file = "video_" + fourcc_str + "." + ext;
if (!writer.isOpened())
{
// call it repeatedly for easier debugging
- VideoWriter writer(video_file, fourcc, 25, frame_size, true);
+ VideoWriter writer2(video_file, fourcc, 25, frame_size, true);
ts->printf(ts->LOG, "Creating a video in %s...\n", video_file.c_str());
ts->printf(ts->LOG, "Cannot create VideoWriter object with codec %s.\n", fourcc_str.c_str());
ts->set_failed_test_info(ts->FAIL_MISMATCH);
const size_t IMAGE_COUNT = 30;
vector<Mat> images;
-
+
for( size_t i = 0; i < IMAGE_COUNT; ++i )
{
string file_path = format("%s../python/images/QCIF_%02d.bmp", dir.c_str(), i);
if (img.at<Vec3b>(k, l) == Vec3b::all(0))
img.at<Vec3b>(k, l) = Vec3b(0, 255, 0);
else img.at<Vec3b>(k, l) = Vec3b(0, 0, 255);
-
+
resize(img, img, frame_size, 0.0, 0.0, INTER_CUBIC);
images.push_back(img);
-/*M///////////////////////////////////////////////////////////////////////////////////////\r
- //\r
- // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
- //\r
- // By downloading, copying, installing or using the software you agree to this license.\r
- // If you do not agree to this license, do not download, install,\r
- // copy or use the software.\r
- //\r
- //\r
- // License Agreement\r
- // For Open Source Computer Vision Library\r
- //\r
- // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
- // Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
- // Third party copyrights are property of their respective owners.\r
- //\r
- // Redistribution and use in source and binary forms, with or without modification,\r
- // are permitted provided that the following conditions are met:\r
- //\r
- // * Redistribution's of source code must retain the above copyright notice,\r
- // this list of conditions and the following disclaimer.\r
- //\r
- // * Redistribution's in binary form must reproduce the above copyright notice,\r
- // this list of conditions and the following disclaimer in the documentation\r
- // and/or other materials provided with the distribution.\r
- //\r
- // * The name of the copyright holders may not be used to endorse or promote products\r
- // derived from this software without specific prior written permission.\r
- //\r
- // This software is provided by the copyright holders and contributors "as is" and\r
- // any express or implied warranties, including, but not limited to, the implied\r
- // warranties of merchantability and fitness for a particular purpose are disclaimed.\r
- // In no event shall the Intel Corporation or contributors be liable for any direct,\r
- // indirect, incidental, special, exemplary, or consequential damages\r
- // (including, but not limited to, procurement of substitute goods or services;\r
- // loss of use, data, or profits; or business interruption) however caused\r
- // and on any theory of liability, whether in contract, strict liability,\r
- // or tort (including negligence or otherwise) arising in any way out of\r
- // the use of this software, even if advised of the possibility of such damage.\r
- //\r
- //M*/\r
-\r
-#include "test_precomp.hpp"\r
-#include "opencv2/highgui/highgui.hpp"\r
-\r
-using namespace cv;\r
-using namespace std;\r
-\r
-class CV_PositioningTest : public cvtest::BaseTest\r
-{\r
-public:\r
- CV_PositioningTest()\r
- {\r
- framesize = Size(640, 480);\r
- }\r
- \r
- Mat drawFrame(int i)\r
- {\r
- Mat mat = Mat::zeros(framesize, CV_8UC3);\r
- \r
- mat = Scalar(fabs(cos(i*0.08)*255), fabs(sin(i*0.05)*255), i);\r
- putText(mat, format("%03d", i), Point(10, 350), 0, 10, Scalar(128, 255, 255), 15);\r
- return mat;\r
- }\r
- \r
- string getFilename(const cvtest::VideoFormat& fmt)\r
- {\r
- return format("test_video_%s.%s", cvtest::fourccToString(fmt.fourcc).c_str(), fmt.ext.c_str());\r
- }\r
- \r
- bool CreateTestVideo(const cvtest::VideoFormat& fmt, int framecount)\r
- {\r
- string filename = getFilename(fmt);\r
- \r
- VideoWriter writer(filename, fmt.fourcc, 25, framesize, true);\r
- if( !writer.isOpened() )\r
- return false;\r
- \r
- for (int i = 0; i < framecount; ++i)\r
- {\r
- Mat img = drawFrame(i);\r
- writer << img;\r
- }\r
- return true;\r
- }\r
-\r
- void run(int)\r
- {\r
- int n_frames = 100;\r
- \r
- for( int testcase = 0; ; testcase++ )\r
- {\r
- const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[testcase];\r
- if( fmt.empty() )\r
- break;\r
- string filename = getFilename(fmt);\r
- ts->printf(ts->LOG, "\nFile: %s\n", filename.c_str());\r
-\r
- if( !CreateTestVideo(fmt, n_frames) )\r
- {\r
- ts->printf(ts->LOG, "\nError: cannot create video file");\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- return;\r
- }\r
- \r
- VideoCapture cap(filename);\r
- \r
- if (!cap.isOpened())\r
- {\r
- ts->printf(ts->LOG, "\nError: cannot read video file.");\r
- ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);\r
- return;\r
- }\r
- \r
- int N0 = cap.get(CV_CAP_PROP_FRAME_COUNT);\r
- cap.set(CV_CAP_PROP_POS_FRAMES, 0);\r
- int N = cap.get(CV_CAP_PROP_FRAME_COUNT);\r
- \r
- if (N != n_frames || N != N0)\r
- {\r
- ts->printf(ts->LOG, "\nError: returned frame count (N0=%d, N=%d) is different from the reference number %d\n", N0, N, n_frames);\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- return;\r
- }\r
- \r
- for (int k = 0; k < N; ++k)\r
- {\r
- int idx = theRNG().uniform(0, N);\r
- \r
- if( !cap.set(CV_CAP_PROP_POS_FRAMES, idx) )\r
- {\r
- ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx);\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- return;\r
- }\r
- \r
- int idx1 = (int)cap.get(CV_CAP_PROP_POS_FRAMES);\r
- \r
- Mat img; cap >> img;\r
- Mat img0 = drawFrame(idx);\r
- \r
- if( idx != idx1 )\r
- {\r
- ts->printf(ts->LOG, "\nError: the current position (%d) after seek is different from specified (%d)\n",\r
- idx1, idx);\r
- ts->printf(ts->LOG, "Saving both frames ...\n");\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- imwrite("opencv_test_highgui_postest_actual.png", img);\r
- imwrite("opencv_test_highgui_postest_expected.png", img0);\r
- return;\r
- }\r
- \r
- if (img.empty())\r
- {\r
- ts->printf(ts->LOG, "\nError: cannot read a frame at position %d.\n", idx);\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- return;\r
- }\r
- \r
- double err = PSNR(img, img0);\r
- \r
- if( err < 20 )\r
- {\r
- ts->printf(ts->LOG, "The frame read after positioning to %d is incorrect (PSNR=%g)\n", idx, err);\r
- ts->printf(ts->LOG, "Saving both frames ...\n");\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- imwrite("opencv_test_highgui_postest_actual.png", img);\r
- imwrite("opencv_test_highgui_postest_expected.png", img0);\r
- return;\r
- }\r
- }\r
- }\r
- }\r
- \r
- Size framesize;\r
-};\r
-\r
-#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT\r
-TEST(Highgui_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); }\r
-#endif\r
+/*M///////////////////////////////////////////////////////////////////////////////////////
+ //
+ // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+ //
+ // By downloading, copying, installing or using the software you agree to this license.
+ // If you do not agree to this license, do not download, install,
+ // copy or use the software.
+ //
+ //
+ // License Agreement
+ // For Open Source Computer Vision Library
+ //
+ // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+ // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+ // Third party copyrights are property of their respective owners.
+ //
+ // Redistribution and use in source and binary forms, with or without modification,
+ // are permitted provided that the following conditions are met:
+ //
+ // * Redistribution's of source code must retain the above copyright notice,
+ // this list of conditions and the following disclaimer.
+ //
+ // * Redistribution's in binary form must reproduce the above copyright notice,
+ // this list of conditions and the following disclaimer in the documentation
+ // and/or other materials provided with the distribution.
+ //
+ // * The name of the copyright holders may not be used to endorse or promote products
+ // derived from this software without specific prior written permission.
+ //
+ // This software is provided by the copyright holders and contributors "as is" and
+ // any express or implied warranties, including, but not limited to, the implied
+ // warranties of merchantability and fitness for a particular purpose are disclaimed.
+ // In no event shall the Intel Corporation or contributors be liable for any direct,
+ // indirect, incidental, special, exemplary, or consequential damages
+ // (including, but not limited to, procurement of substitute goods or services;
+ // loss of use, data, or profits; or business interruption) however caused
+ // and on any theory of liability, whether in contract, strict liability,
+ // or tort (including negligence or otherwise) arising in any way out of
+ // the use of this software, even if advised of the possibility of such damage.
+ //
+ //M*/
+
+#include "test_precomp.hpp"
+#include "opencv2/highgui/highgui.hpp"
+
+using namespace cv;
+using namespace std;
+
+class CV_PositioningTest : public cvtest::BaseTest
+{
+public:
+ CV_PositioningTest()
+ {
+ framesize = Size(640, 480);
+ }
+
+ Mat drawFrame(int i)
+ {
+ Mat mat = Mat::zeros(framesize, CV_8UC3);
+
+ mat = Scalar(fabs(cos(i*0.08)*255), fabs(sin(i*0.05)*255), i);
+ putText(mat, format("%03d", i), Point(10, 350), 0, 10, Scalar(128, 255, 255), 15);
+ return mat;
+ }
+
+ string getFilename(const cvtest::VideoFormat& fmt)
+ {
+ return format("test_video_%s.%s", cvtest::fourccToString(fmt.fourcc).c_str(), fmt.ext.c_str());
+ }
+
+ bool CreateTestVideo(const cvtest::VideoFormat& fmt, int framecount)
+ {
+ string filename = getFilename(fmt);
+
+ VideoWriter writer(filename, fmt.fourcc, 25, framesize, true);
+ if( !writer.isOpened() )
+ return false;
+
+ for (int i = 0; i < framecount; ++i)
+ {
+ Mat img = drawFrame(i);
+ writer << img;
+ }
+ return true;
+ }
+
+ void run(int)
+ {
+ int n_frames = 100;
+
+ for( int testcase = 0; ; testcase++ )
+ {
+ const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[testcase];
+ if( fmt.empty() )
+ break;
+ string filename = getFilename(fmt);
+ ts->printf(ts->LOG, "\nFile: %s\n", filename.c_str());
+
+ if( !CreateTestVideo(fmt, n_frames) )
+ {
+ ts->printf(ts->LOG, "\nError: cannot create video file");
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ return;
+ }
+
+ VideoCapture cap(filename);
+
+ if (!cap.isOpened())
+ {
+ ts->printf(ts->LOG, "\nError: cannot read video file.");
+ ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);
+ return;
+ }
+
+ int N0 = (int)cap.get(CV_CAP_PROP_FRAME_COUNT);
+ cap.set(CV_CAP_PROP_POS_FRAMES, 0);
+ int N = (int)cap.get(CV_CAP_PROP_FRAME_COUNT);
+
+ if (N != n_frames || N != N0)
+ {
+ ts->printf(ts->LOG, "\nError: returned frame count (N0=%d, N=%d) is different from the reference number %d\n", N0, N, n_frames);
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ return;
+ }
+
+ for (int k = 0; k < N; ++k)
+ {
+ int idx = theRNG().uniform(0, N);
+
+ if( !cap.set(CV_CAP_PROP_POS_FRAMES, idx) )
+ {
+ ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx);
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ return;
+ }
+
+ int idx1 = (int)cap.get(CV_CAP_PROP_POS_FRAMES);
+
+ Mat img; cap >> img;
+ Mat img0 = drawFrame(idx);
+
+ if( idx != idx1 )
+ {
+ ts->printf(ts->LOG, "\nError: the current position (%d) after seek is different from specified (%d)\n",
+ idx1, idx);
+ ts->printf(ts->LOG, "Saving both frames ...\n");
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ imwrite("opencv_test_highgui_postest_actual.png", img);
+ imwrite("opencv_test_highgui_postest_expected.png", img0);
+ return;
+ }
+
+ if (img.empty())
+ {
+ ts->printf(ts->LOG, "\nError: cannot read a frame at position %d.\n", idx);
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ return;
+ }
+
+ double err = PSNR(img, img0);
+
+ if( err < 20 )
+ {
+ ts->printf(ts->LOG, "The frame read after positioning to %d is incorrect (PSNR=%g)\n", idx, err);
+ ts->printf(ts->LOG, "Saving both frames ...\n");
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ imwrite("opencv_test_highgui_postest_actual.png", img);
+ imwrite("opencv_test_highgui_postest_expected.png", img0);
+ return;
+ }
+ }
+ }
+ }
+
+ Size framesize;
+};
+
+#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT
+TEST(Highgui_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); }
+#endif
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
_LIST_INLINE CVPOS prefix##get_tail_pos_##type(_CVLIST*);\
_LIST_INLINE type* prefix##get_next_##type(CVPOS*);\
_LIST_INLINE type* prefix##get_prev_##type(CVPOS*);\
+ _LIST_INLINE int prefix##is_pos_##type(CVPOS pos);\
/* Modification functions*/\
_LIST_INLINE void prefix##clear_list_##type(_CVLIST*);\
_LIST_INLINE CVPOS prefix##add_head_##type(_CVLIST*, type*);\
}\
element->m_next = ((element_type*)l->m_head_free.m_pos);\
l->m_head_free.m_pos = element;
-
-
+
+
/*#define GET_FIRST_FREE(l) ((ELEMENT_##type*)(l->m_head_free.m_pos))*/
#define IMPLEMENT_LIST(type, prefix)\
44141 Dortmund
Germany
www.md-it.de
-
+
Redistribution and use in source and binary forms,
with or without modification, are permitted provided
- that the following conditions are met:
+ that the following conditions are met:
Redistributions of source code must retain
- the above copyright notice, this list of conditions and the following disclaimer.
+ the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
+ and/or other materials provided with the distribution.
The name of Contributor may not be used to endorse or promote products
- derived from this software without specific prior written permission.
+ derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
_Tp cn = 0;
int i;
tab[0] = tab[1] = (_Tp)0;
-
+
for(i = 1; i < n-1; i++)
{
_Tp t = 3*(f[i+1] - 2*f[i] + f[i-1]);
_Tp l = 1/(4 - tab[(i-1)*4]);
tab[i*4] = l; tab[i*4+1] = (t - tab[(i-1)*4+1])*l;
}
-
+
for(i = n-1; i >= 0; i--)
{
_Tp c = tab[i*4+1] - tab[i*4]*cn;
x -= ix;
tab += ix*4;
return ((tab[3]*x + tab[2])*x + tab[1])*x + tab[0];
-}
+}
+
-
template<typename _Tp> struct ColorChannel
{
typedef float worktype_f;
static _Tp max() { return std::numeric_limits<_Tp>::max(); }
- static _Tp half() { return (_Tp)(max()/2 + 1); }
+ static _Tp half() { return (_Tp)(max()/2 + 1); }
};
template<> struct ColorChannel<float>
static double half() { return 0.5; }
};*/
-
+
///////////////////////////// Top-level template function ////////////////////////////////
template<class Cvt> void CvtColorLoop(const Mat& srcmat, Mat& dstmat, const Cvt& cvt)
const uchar* src = srcmat.data;
uchar* dst = dstmat.data;
size_t srcstep = srcmat.step, dststep = dstmat.step;
-
+
if( srcmat.isContinuous() && dstmat.isContinuous() )
{
sz.width *= sz.height;
sz.height = 1;
- }
-
+ }
+
for( ; sz.height--; src += srcstep, dst += dststep )
cvt((const _Tp*)src, (_Tp*)dst, sz.width);
}
-
-
+
+
////////////////// Various 3/4-channel to 3/4-channel RGB transformations /////////////////
-
+
template<typename _Tp> struct RGB2RGB
{
typedef _Tp channel_type;
-
+
RGB2RGB(int _srccn, int _dstcn, int _blueIdx) : srccn(_srccn), dstcn(_dstcn), blueIdx(_blueIdx) {}
void operator()(const _Tp* src, _Tp* dst, int n) const
{
}
}
}
-
+
int srccn, dstcn, blueIdx;
};
-
+
/////////// Transforming 16-bit (565 or 555) RGB to/from 24/32-bit (888[8]) RGB //////////
struct RGB5x52RGB
{
typedef uchar channel_type;
-
+
RGB5x52RGB(int _dstcn, int _blueIdx, int _greenBits)
- : dstcn(_dstcn), blueIdx(_blueIdx), greenBits(_greenBits) {}
-
+ : dstcn(_dstcn), blueIdx(_blueIdx), greenBits(_greenBits) {}
+
void operator()(const uchar* src, uchar* dst, int n) const
{
int dcn = dstcn, bidx = blueIdx;
dst[3] = t & 0x8000 ? 255 : 0;
}
}
-
+
int dstcn, blueIdx, greenBits;
};
-
+
struct RGB2RGB5x5
{
typedef uchar channel_type;
-
+
RGB2RGB5x5(int _srccn, int _blueIdx, int _greenBits)
- : srccn(_srccn), blueIdx(_blueIdx), greenBits(_greenBits) {}
-
+ : srccn(_srccn), blueIdx(_blueIdx), greenBits(_greenBits) {}
+
void operator()(const uchar* src, uchar* dst, int n) const
{
int scn = srccn, bidx = blueIdx;
((src[bidx^2]&~7) << 7)|(src[3] ? 0x8000 : 0));
}
}
-
+
int srccn, blueIdx, greenBits;
};
-
+
///////////////////////////////// Color to/from Grayscale ////////////////////////////////
template<typename _Tp>
struct Gray2RGB
{
typedef _Tp channel_type;
-
+
Gray2RGB(int _dstcn) : dstcn(_dstcn) {}
void operator()(const _Tp* src, _Tp* dst, int n) const
{
}
}
}
-
+
int dstcn;
};
-
+
struct Gray2RGB5x5
{
typedef uchar channel_type;
-
+
Gray2RGB5x5(int _greenBits) : greenBits(_greenBits) {}
void operator()(const uchar* src, uchar* dst, int n) const
{
#undef R2Y
#undef G2Y
#undef B2Y
-
+
enum
{
yuv_shift = 14,
struct RGB5x52Gray
{
typedef uchar channel_type;
-
+
RGB5x52Gray(int _greenBits) : greenBits(_greenBits) {}
void operator()(const uchar* src, uchar* dst, int n) const
{
template<typename _Tp> struct RGB2Gray
{
typedef _Tp channel_type;
-
+
RGB2Gray(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn)
{
static const float coeffs0[] = { 0.299f, 0.587f, 0.114f };
if(blueIdx == 0)
std::swap(coeffs[0], coeffs[2]);
}
-
+
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int scn = srccn;
float coeffs[3];
};
-
+
template<> struct RGB2Gray<uchar>
{
typedef uchar channel_type;
-
+
RGB2Gray<uchar>(int _srccn, int blueIdx, const int* coeffs) : srccn(_srccn)
{
const int coeffs0[] = { R2Y, G2Y, B2Y };
if(!coeffs) coeffs = coeffs0;
-
+
int b = 0, g = 0, r = (1 << (yuv_shift-1));
int db = coeffs[blueIdx^2], dg = coeffs[1], dr = coeffs[blueIdx];
-
+
for( int i = 0; i < 256; i++, b += db, g += dg, r += dr )
{
tab[i] = b;
void operator()(const uchar* src, uchar* dst, int n) const
{
int scn = srccn;
- const int* _tab = tab;
+ const int* _tab = tab;
for(int i = 0; i < n; i++, src += scn)
dst[i] = (uchar)((_tab[src[0]] + _tab[src[1]+256] + _tab[src[2]+512]) >> yuv_shift);
}
- int srccn, blueIdx;
+ int srccn;
int tab[256*3];
};
-
+
template<> struct RGB2Gray<ushort>
{
typedef ushort channel_type;
-
+
RGB2Gray<ushort>(int _srccn, int blueIdx, const int* _coeffs) : srccn(_srccn)
{
static const int coeffs0[] = { R2Y, G2Y, B2Y };
if( blueIdx == 0 )
std::swap(coeffs[0], coeffs[2]);
}
-
+
void operator()(const ushort* src, ushort* dst, int n) const
{
int scn = srccn, cb = coeffs[0], cg = coeffs[1], cr = coeffs[2];
int coeffs[3];
};
-
+
///////////////////////////////////// RGB <-> YCrCb //////////////////////////////////////
template<typename _Tp> struct RGB2YCrCb_f
{
typedef _Tp channel_type;
-
+
RGB2YCrCb_f(int _srccn, int _blueIdx, const float* _coeffs) : srccn(_srccn), blueIdx(_blueIdx)
- {
- static const float coeffs0[] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f};
- memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0]));
- if(blueIdx==0) std::swap(coeffs[0], coeffs[2]);
- }
-
+ {
+ static const float coeffs0[] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f};
+ memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0]));
+ if(blueIdx==0) std::swap(coeffs[0], coeffs[2]);
+ }
+
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int scn = srccn, bidx = blueIdx;
const _Tp delta = ColorChannel<_Tp>::half();
- float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4];
+ float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4];
n *= 3;
for(int i = 0; i < n; i += 3, src += scn)
{
}
}
int srccn, blueIdx;
- float coeffs[5];
+ float coeffs[5];
};
template<typename _Tp> struct RGB2YCrCb_i
{
typedef _Tp channel_type;
-
+
RGB2YCrCb_i(int _srccn, int _blueIdx, const int* _coeffs)
- : srccn(_srccn), blueIdx(_blueIdx)
- {
- static const int coeffs0[] = {R2Y, G2Y, B2Y, 11682, 9241};
- memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0]));
- if(blueIdx==0) std::swap(coeffs[0], coeffs[2]);
- }
+ : srccn(_srccn), blueIdx(_blueIdx)
+ {
+ static const int coeffs0[] = {R2Y, G2Y, B2Y, 11682, 9241};
+ memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0]));
+ if(blueIdx==0) std::swap(coeffs[0], coeffs[2]);
+ }
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int scn = srccn, bidx = blueIdx;
- int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4];
+ int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4];
int delta = ColorChannel<_Tp>::half()*(1 << yuv_shift);
n *= 3;
for(int i = 0; i < n; i += 3, src += scn)
}
}
int srccn, blueIdx;
- int coeffs[5];
-};
+ int coeffs[5];
+};
template<typename _Tp> struct YCrCb2RGB_f
{
typedef _Tp channel_type;
-
+
YCrCb2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs)
- : dstcn(_dstcn), blueIdx(_blueIdx)
- {
- static const float coeffs0[] = {1.403f, -0.714f, -0.344f, 1.773f};
- memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0]));
- }
+ : dstcn(_dstcn), blueIdx(_blueIdx)
+ {
+ static const float coeffs0[] = {1.403f, -0.714f, -0.344f, 1.773f};
+ memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0]));
+ }
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int dcn = dstcn, bidx = blueIdx;
_Tp Y = src[i];
_Tp Cr = src[i+1];
_Tp Cb = src[i+2];
-
+
_Tp b = saturate_cast<_Tp>(Y + (Cb - delta)*C3);
_Tp g = saturate_cast<_Tp>(Y + (Cb - delta)*C2 + (Cr - delta)*C1);
_Tp r = saturate_cast<_Tp>(Y + (Cr - delta)*C0);
-
+
dst[bidx] = b; dst[1] = g; dst[bidx^2] = r;
if( dcn == 4 )
dst[3] = alpha;
}
}
int dstcn, blueIdx;
- float coeffs[4];
-};
+ float coeffs[4];
+};
template<typename _Tp> struct YCrCb2RGB_i
{
typedef _Tp channel_type;
-
+
YCrCb2RGB_i(int _dstcn, int _blueIdx, const int* _coeffs)
: dstcn(_dstcn), blueIdx(_blueIdx)
{
- static const int coeffs0[] = {22987, -11698, -5636, 29049};
- memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0]));
+ static const int coeffs0[] = {22987, -11698, -5636, 29049};
+ memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0]));
}
-
+
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int dcn = dstcn, bidx = blueIdx;
_Tp Y = src[i];
_Tp Cr = src[i+1];
_Tp Cb = src[i+2];
-
+
int b = Y + CV_DESCALE((Cb - delta)*C3, yuv_shift);
int g = Y + CV_DESCALE((Cb - delta)*C2 + (Cr - delta)*C1, yuv_shift);
int r = Y + CV_DESCALE((Cr - delta)*C0, yuv_shift);
-
+
dst[bidx] = saturate_cast<_Tp>(b);
dst[1] = saturate_cast<_Tp>(g);
dst[bidx^2] = saturate_cast<_Tp>(r);
}
int dstcn, blueIdx;
int coeffs[4];
-};
+};
+
-
////////////////////////////////////// RGB <-> XYZ ///////////////////////////////////////
static const float sRGB2XYZ_D65[] =
0.212671f, 0.715160f, 0.072169f,
0.019334f, 0.119193f, 0.950227f
};
-
+
static const float XYZ2sRGB_D65[] =
{
3.240479f, -1.53715f, -0.498535f,
-0.969256f, 1.875991f, 0.041556f,
0.055648f, -0.204043f, 1.057311f
};
-
+
template<typename _Tp> struct RGB2XYZ_f
{
typedef _Tp channel_type;
-
+
RGB2XYZ_f(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn)
{
memcpy(coeffs, _coeffs ? _coeffs : sRGB2XYZ_D65, 9*sizeof(coeffs[0]));
float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2],
C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5],
C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8];
-
+
n *= 3;
for(int i = 0; i < n; i += 3, src += scn)
{
- _Tp X = saturate_cast<_Tp>(src[0]*C0 + src[1]*C1 + src[2]*C2);
- _Tp Y = saturate_cast<_Tp>(src[0]*C3 + src[1]*C4 + src[2]*C5);
- _Tp Z = saturate_cast<_Tp>(src[0]*C6 + src[1]*C7 + src[2]*C8);
+ _Tp X = saturate_cast<_Tp>(src[0]*C0 + src[1]*C1 + src[2]*C2);
+ _Tp Y = saturate_cast<_Tp>(src[0]*C3 + src[1]*C4 + src[2]*C5);
+ _Tp Z = saturate_cast<_Tp>(src[0]*C6 + src[1]*C7 + src[2]*C8);
dst[i] = X; dst[i+1] = Y; dst[i+2] = Z;
}
}
template<typename _Tp> struct RGB2XYZ_i
{
typedef _Tp channel_type;
-
+
RGB2XYZ_i(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn)
{
static const int coeffs0[] =
{
- 1689, 1465, 739,
- 871, 2929, 296,
+ 1689, 1465, 739,
+ 871, 2929, 296,
79, 488, 3892
};
for( int i = 0; i < 9; i++ )
int srccn;
int coeffs[9];
};
-
-
+
+
template<typename _Tp> struct XYZ2RGB_f
{
typedef _Tp channel_type;
-
+
XYZ2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs)
: dstcn(_dstcn), blueIdx(_blueIdx)
{
std::swap(coeffs[2], coeffs[8]);
}
}
-
+
void operator()(const _Tp* src, _Tp* dst, int n) const
{
int dcn = dstcn;
- _Tp alpha = ColorChannel<_Tp>::max();
+ _Tp alpha = ColorChannel<_Tp>::max();
float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2],
C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5],
C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8];
n *= 3;
for(int i = 0; i < n; i += 3, dst += dcn)
{
- _Tp B = saturate_cast<_Tp>(src[i]*C0 + src[i+1]*C1 + src[i+2]*C2);
- _Tp G = saturate_cast<_Tp>(src[i]*C3 + src[i+1]*C4 + src[i+2]*C5);
- _Tp R = saturate_cast<_Tp>(src[i]*C6 + src[i+1]*C7 + src[i+2]*C8);
+ _Tp B = saturate_cast<_Tp>(src[i]*C0 + src[i+1]*C1 + src[i+2]*C2);
+ _Tp G = saturate_cast<_Tp>(src[i]*C3 + src[i+1]*C4 + src[i+2]*C5);
+ _Tp R = saturate_cast<_Tp>(src[i]*C6 + src[i+1]*C7 + src[i+2]*C8);
dst[0] = B; dst[1] = G; dst[2] = R;
- if( dcn == 4 )
- dst[3] = alpha;
+ if( dcn == 4 )
+ dst[3] = alpha;
}
}
int dstcn, blueIdx;
template<typename _Tp> struct XYZ2RGB_i
{
typedef _Tp channel_type;
-
+
XYZ2RGB_i(int _dstcn, int _blueIdx, const int* _coeffs)
: dstcn(_dstcn), blueIdx(_blueIdx)
{
static const int coeffs0[] =
{
- 13273, -6296, -2042,
- -3970, 7684, 170,
+ 13273, -6296, -2042,
+ -3970, 7684, 170,
228, -836, 4331
};
for(int i = 0; i < 9; i++)
coeffs[i] = _coeffs ? cvRound(_coeffs[i]*(1 << xyz_shift)) : coeffs0[i];
-
+
if(blueIdx == 0)
{
std::swap(coeffs[0], coeffs[6]);
dst[0] = saturate_cast<_Tp>(B); dst[1] = saturate_cast<_Tp>(G);
dst[2] = saturate_cast<_Tp>(R);
if( dcn == 4 )
- dst[3] = alpha;
+ dst[3] = alpha;
}
}
int dstcn, blueIdx;
int coeffs[9];
};
-
+
////////////////////////////////////// RGB <-> HSV ///////////////////////////////////////
struct RGB2HSV_b
{
typedef uchar channel_type;
-
+
RGB2HSV_b(int _srccn, int _blueIdx, int _hrange)
: srccn(_srccn), blueIdx(_blueIdx), hrange(_hrange)
{
CV_Assert( hrange == 180 || hrange == 256 );
}
-
+
void operator()(const uchar* src, uchar* dst, int n) const
{
int i, bidx = blueIdx, scn = srccn;
const int hsv_shift = 12;
-
+
static int sdiv_table[256];
static int hdiv_table180[256];
static int hdiv_table256[256];
static volatile bool initialized = false;
-
+
int hr = hrange;
const int* hdiv_table = hr == 180 ? hdiv_table180 : hdiv_table256;
n *= 3;
-
+
if( !initialized )
{
sdiv_table[0] = hdiv_table180[0] = hdiv_table256[0] = 0;
}
initialized = true;
}
-
+
for( i = 0; i < n; i += 3, src += scn )
{
int b = src[bidx], g = src[1], r = src[bidx^2];
int h, s, v = b;
int vmin = b, diff;
int vr, vg;
-
+
CV_CALC_MAX_8U( v, g );
CV_CALC_MAX_8U( v, r );
CV_CALC_MIN_8U( vmin, g );
CV_CALC_MIN_8U( vmin, r );
-
+
diff = v - vmin;
vr = v == r ? -1 : 0;
vg = v == g ? -1 : 0;
-
+
s = (diff * sdiv_table[v] + (1 << (hsv_shift-1))) >> hsv_shift;
h = (vr & (g - b)) +
(~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff))));
h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift;
h += h < 0 ? hr : 0;
-
+
dst[i] = saturate_cast<uchar>(h);
dst[i+1] = (uchar)s;
dst[i+2] = (uchar)v;
}
}
-
+
int srccn, blueIdx, hrange;
-};
+};
+
-
struct RGB2HSV_f
{
typedef float channel_type;
-
+
RGB2HSV_f(int _srccn, int _blueIdx, float _hrange)
: srccn(_srccn), blueIdx(_blueIdx), hrange(_hrange) {}
-
+
void operator()(const float* src, float* dst, int n) const
{
int i, bidx = blueIdx, scn = srccn;
float hscale = hrange*(1.f/360.f);
n *= 3;
-
+
for( i = 0; i < n; i += 3, src += scn )
{
float b = src[bidx], g = src[1], r = src[bidx^2];
float h, s, v;
-
+
float vmin, diff;
-
+
v = vmin = r;
if( v < g ) v = g;
if( v < b ) v = b;
if( vmin > g ) vmin = g;
if( vmin > b ) vmin = b;
-
+
diff = v - vmin;
s = diff/(float)(fabs(v) + FLT_EPSILON);
diff = (float)(60./(diff + FLT_EPSILON));
h = (b - r)*diff + 120.f;
else
h = (r - g)*diff + 240.f;
-
+
if( h < 0 ) h += 360.f;
-
+
dst[i] = h*hscale;
dst[i+1] = s;
dst[i+2] = v;
}
}
-
+
int srccn, blueIdx;
float hrange;
};
struct HSV2RGB_f
{
typedef float channel_type;
-
+
HSV2RGB_f(int _dstcn, int _blueIdx, float _hrange)
: dstcn(_dstcn), blueIdx(_blueIdx), hscale(6.f/_hrange) {}
-
+
void operator()(const float* src, float* dst, int n) const
{
int i, bidx = blueIdx, dcn = dstcn;
float _hscale = hscale;
float alpha = ColorChannel<float>::max();
n *= 3;
-
+
for( i = 0; i < n; i += 3, dst += dcn )
{
float h = src[i], s = src[i+1], v = src[i+2];
tab[1] = v*(1.f - s);
tab[2] = v*(1.f - s*h);
tab[3] = v*(1.f - s*(1.f - h));
-
+
b = tab[sector_data[sector][0]];
g = tab[sector_data[sector][1]];
r = tab[sector_data[sector][2]];
int dstcn, blueIdx;
float hscale;
};
-
+
struct HSV2RGB_b
{
typedef uchar channel_type;
-
+
HSV2RGB_b(int _dstcn, int _blueIdx, int _hrange)
: dstcn(_dstcn), cvt(3, _blueIdx, (float)_hrange)
{}
-
+
void operator()(const uchar* src, uchar* dst, int n) const
{
int i, j, dcn = dstcn;
uchar alpha = ColorChannel<uchar>::max();
float buf[3*BLOCK_SIZE];
-
+
for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 )
{
int dn = std::min(n - i, (int)BLOCK_SIZE);
-
+
for( j = 0; j < dn*3; j += 3 )
{
buf[j] = src[j];
buf[j+2] = src[j+2]*(1.f/255.f);
}
cvt(buf, buf, dn);
-
+
for( j = 0; j < dn*3; j += 3, dst += dcn )
{
dst[0] = saturate_cast<uchar>(buf[j]*255.f);
}
}
}
-
+
int dstcn;
HSV2RGB_f cvt;
};
-
+
///////////////////////////////////// RGB <-> HLS ////////////////////////////////////////
struct RGB2HLS_f
{
typedef float channel_type;
-
+
RGB2HLS_f(int _srccn, int _blueIdx, float _hrange)
: srccn(_srccn), blueIdx(_blueIdx), hrange(_hrange) {}
-
+
void operator()(const float* src, float* dst, int n) const
{
int i, bidx = blueIdx, scn = srccn;
float hscale = hrange*(1.f/360.f);
n *= 3;
-
+
for( i = 0; i < n; i += 3, src += scn )
{
float b = src[bidx], g = src[1], r = src[bidx^2];
float h = 0.f, s = 0.f, l;
float vmin, vmax, diff;
-
+
vmax = vmin = r;
if( vmax < g ) vmax = g;
if( vmax < b ) vmax = b;
if( vmin > g ) vmin = g;
if( vmin > b ) vmin = b;
-
+
diff = vmax - vmin;
l = (vmax + vmin)*0.5f;
-
+
if( diff > FLT_EPSILON )
{
s = l < 0.5f ? diff/(vmax + vmin) : diff/(2 - vmax - vmin);
diff = 60.f/diff;
-
+
if( vmax == r )
h = (g - b)*diff;
else if( vmax == g )
h = (b - r)*diff + 120.f;
else
h = (r - g)*diff + 240.f;
-
+
if( h < 0.f ) h += 360.f;
}
-
+
dst[i] = h*hscale;
dst[i+1] = l;
dst[i+2] = s;
}
}
-
+
int srccn, blueIdx;
float hrange;
};
-
-
+
+
struct RGB2HLS_b
{
typedef uchar channel_type;
-
+
RGB2HLS_b(int _srccn, int _blueIdx, int _hrange)
: srccn(_srccn), cvt(3, _blueIdx, (float)_hrange) {}
-
+
void operator()(const uchar* src, uchar* dst, int n) const
{
int i, j, scn = srccn;
float buf[3*BLOCK_SIZE];
-
+
for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*3 )
{
int dn = std::min(n - i, (int)BLOCK_SIZE);
-
+
for( j = 0; j < dn*3; j += 3, src += scn )
{
buf[j] = src[0]*(1.f/255.f);
buf[j+2] = src[2]*(1.f/255.f);
}
cvt(buf, buf, dn);
-
+
for( j = 0; j < dn*3; j += 3 )
{
dst[j] = saturate_cast<uchar>(buf[j]);
}
}
}
-
+
int srccn;
RGB2HLS_f cvt;
};
-
+
struct HLS2RGB_f
{
typedef float channel_type;
-
+
HLS2RGB_f(int _dstcn, int _blueIdx, float _hrange)
: dstcn(_dstcn), blueIdx(_blueIdx), hscale(6.f/_hrange) {}
-
+
void operator()(const float* src, float* dst, int n) const
{
int i, bidx = blueIdx, dcn = dstcn;
float _hscale = hscale;
float alpha = ColorChannel<float>::max();
n *= 3;
-
+
for( i = 0; i < n; i += 3, dst += dcn )
{
float h = src[i], l = src[i+1], s = src[i+2];
float b, g, r;
-
+
if( s == 0 )
b = g = r = l;
else
{{1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0}};
float tab[4];
int sector;
-
+
float p2 = l <= 0.5f ? l*(1 + s) : l + s - l*s;
float p1 = 2*l - p2;
-
+
h *= _hscale;
if( h < 0 )
do h += 6; while( h < 0 );
else if( h >= 6 )
do h -= 6; while( h >= 6 );
-
+
assert( 0 <= h && h < 6 );
sector = cvFloor(h);
h -= sector;
-
+
tab[0] = p2;
tab[1] = p1;
tab[2] = p1 + (p2 - p1)*(1-h);
tab[3] = p1 + (p2 - p1)*h;
-
+
b = tab[sector_data[sector][0]];
g = tab[sector_data[sector][1]];
r = tab[sector_data[sector][2]];
}
-
+
dst[bidx] = b;
dst[1] = g;
dst[bidx^2] = r;
dst[3] = alpha;
}
}
-
+
int dstcn, blueIdx;
float hscale;
};
-
+
struct HLS2RGB_b
{
typedef uchar channel_type;
-
+
HLS2RGB_b(int _dstcn, int _blueIdx, int _hrange)
: dstcn(_dstcn), cvt(3, _blueIdx, (float)_hrange)
{}
-
+
void operator()(const uchar* src, uchar* dst, int n) const
{
int i, j, dcn = dstcn;
uchar alpha = ColorChannel<uchar>::max();
float buf[3*BLOCK_SIZE];
-
+
for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 )
{
int dn = std::min(n - i, (int)BLOCK_SIZE);
-
+
for( j = 0; j < dn*3; j += 3 )
{
buf[j] = src[j];
buf[j+2] = src[j+2]*(1.f/255.f);
}
cvt(buf, buf, dn);
-
+
for( j = 0; j < dn*3; j += 3, dst += dcn )
{
dst[0] = saturate_cast<uchar>(buf[j]*255.f);
}
}
}
-
+
int dstcn;
HLS2RGB_f cvt;
};
-
+
///////////////////////////////////// RGB <-> L*a*b* /////////////////////////////////////
static const float D65[] = { 0.950456f, 1.f, 1.088754f };
static float sRGBGammaTab[GAMMA_TAB_SIZE*4], sRGBInvGammaTab[GAMMA_TAB_SIZE*4];
static const float GammaTabScale = (float)GAMMA_TAB_SIZE;
-
-static ushort sRGBGammaTab_b[256], linearGammaTab_b[256];
+
+static ushort sRGBGammaTab_b[256], linearGammaTab_b[256];
#undef lab_shift
#define lab_shift xyz_shift
#define gamma_shift 3
#define lab_shift2 (lab_shift + gamma_shift)
#define LAB_CBRT_TAB_SIZE_B (256*3/2*(1<<gamma_shift))
static ushort LabCbrtTab_b[LAB_CBRT_TAB_SIZE_B];
-
+
static void initLabTabs()
{
static bool initialized = false;
f[i] = x < 0.008856f ? x*7.787f + 0.13793103448275862f : cvCbrt(x);
}
splineBuild(f, LAB_CBRT_TAB_SIZE, LabCbrtTab);
-
+
scale = 1.f/GammaTabScale;
for(i = 0; i <= GAMMA_TAB_SIZE; i++)
{
}
splineBuild(g, GAMMA_TAB_SIZE, sRGBGammaTab);
splineBuild(ig, GAMMA_TAB_SIZE, sRGBInvGammaTab);
-
+
for(i = 0; i < 256; i++)
{
float x = i*(1.f/255.f);
sRGBGammaTab_b[i] = saturate_cast<ushort>(255.f*(1 << gamma_shift)*(x <= 0.04045f ? x*(1.f/12.92f) : (float)pow((double)(x + 0.055)*(1./1.055), 2.4)));
linearGammaTab_b[i] = (ushort)(i*(1 << gamma_shift));
}
-
+
for(i = 0; i < LAB_CBRT_TAB_SIZE_B; i++)
{
float x = i*(1.f/(255.f*(1 << gamma_shift)));
struct RGB2Lab_b
{
typedef uchar channel_type;
-
+
RGB2Lab_b(int _srccn, int blueIdx, const float* _coeffs,
const float* _whitept, bool _srgb)
: srccn(_srccn), srgb(_srgb)
{
static volatile int _3 = 3;
initLabTabs();
-
+
if(!_coeffs) _coeffs = sRGB2XYZ_D65;
if(!_whitept) _whitept = D65;
float scale[] =
(float)(1 << lab_shift),
(1 << lab_shift)/_whitept[2]
};
-
+
for( int i = 0; i < _3; i++ )
{
coeffs[i*3+(blueIdx^2)] = cvRound(_coeffs[i*3]*scale[i]);
coeffs[i*3+1] = cvRound(_coeffs[i*3+1]*scale[i]);
coeffs[i*3+blueIdx] = cvRound(_coeffs[i*3+2]*scale[i]);
-
+
CV_Assert( coeffs[i] >= 0 && coeffs[i*3+1] >= 0 && coeffs[i*3+2] >= 0 &&
coeffs[i*3] + coeffs[i*3+1] + coeffs[i*3+2] < 2*(1 << lab_shift) );
}
}
-
+
void operator()(const uchar* src, uchar* dst, int n) const
{
const int Lscale = (116*255+50)/100;
C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5],
C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8];
n *= 3;
-
+
for( i = 0; i < n; i += 3, src += scn )
{
int R = tab[src[0]], G = tab[src[1]], B = tab[src[2]];
int fX = LabCbrtTab_b[CV_DESCALE(R*C0 + G*C1 + B*C2, lab_shift)];
int fY = LabCbrtTab_b[CV_DESCALE(R*C3 + G*C4 + B*C5, lab_shift)];
int fZ = LabCbrtTab_b[CV_DESCALE(R*C6 + G*C7 + B*C8, lab_shift)];
-
+
int L = CV_DESCALE( Lscale*fY + Lshift, lab_shift2 );
int a = CV_DESCALE( 500*(fX - fY) + 128*(1 << lab_shift2), lab_shift2 );
int b = CV_DESCALE( 200*(fY - fZ) + 128*(1 << lab_shift2), lab_shift2 );
-
+
dst[i] = saturate_cast<uchar>(L);
dst[i+1] = saturate_cast<uchar>(a);
dst[i+2] = saturate_cast<uchar>(b);
}
}
-
+
int srccn;
int coeffs[9];
bool srgb;
};
-
-
+
+
struct RGB2Lab_f
{
typedef float channel_type;
-
+
RGB2Lab_f(int _srccn, int blueIdx, const float* _coeffs,
const float* _whitept, bool _srgb)
: srccn(_srccn), srgb(_srgb)
{
volatile int _3 = 3;
initLabTabs();
-
+
if(!_coeffs) _coeffs = sRGB2XYZ_D65;
if(!_whitept) _whitept = D65;
float scale[] = { LabCbrtTabScale/_whitept[0], LabCbrtTabScale, LabCbrtTabScale/_whitept[2] };
-
+
for( int i = 0; i < _3; i++ )
{
coeffs[i*3+(blueIdx^2)] = _coeffs[i*3]*scale[i];
coeffs[i*3] + coeffs[i*3+1] + coeffs[i*3+2] < 1.5f*LabCbrtTabScale );
}
}
-
+
void operator()(const float* src, float* dst, int n) const
{
int i, scn = srccn;
C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5],
C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8];
n *= 3;
-
+
for( i = 0; i < n; i += 3, src += scn )
{
float R = src[0], G = src[1], B = src[2];
G = splineInterpolate(G*gscale, gammaTab, GAMMA_TAB_SIZE);
B = splineInterpolate(B*gscale, gammaTab, GAMMA_TAB_SIZE);
}
- float fX = splineInterpolate(R*C0 + G*C1 + B*C2, LabCbrtTab, LAB_CBRT_TAB_SIZE);
+ float fX = splineInterpolate(R*C0 + G*C1 + B*C2, LabCbrtTab, LAB_CBRT_TAB_SIZE);
float fY = splineInterpolate(R*C3 + G*C4 + B*C5, LabCbrtTab, LAB_CBRT_TAB_SIZE);
float fZ = splineInterpolate(R*C6 + G*C7 + B*C8, LabCbrtTab, LAB_CBRT_TAB_SIZE);
-
+
float L = 116.f*fY - 16.f;
float a = 500.f*(fX - fY);
float b = 200.f*(fY - fZ);
-
+
dst[i] = L; dst[i+1] = a; dst[i+2] = b;
}
}
-
+
int srccn;
float coeffs[9];
bool srgb;
};
-
+
struct Lab2RGB_f
{
typedef float channel_type;
-
+
Lab2RGB_f( int _dstcn, int blueIdx, const float* _coeffs,
const float* _whitept, bool _srgb )
: dstcn(_dstcn), srgb(_srgb)
{
initLabTabs();
-
+
if(!_coeffs) _coeffs = XYZ2sRGB_D65;
if(!_whitept) _whitept = D65;
-
+
for( int i = 0; i < 3; i++ )
{
coeffs[i+(blueIdx^2)*3] = _coeffs[i]*_whitept[i];
coeffs[i+blueIdx*3] = _coeffs[i+6]*_whitept[i];
}
}
-
+
void operator()(const float* src, float* dst, int n) const
{
int i, dcn = dstcn;
C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8];
float alpha = ColorChannel<float>::max();
n *= 3;
-
+
for( i = 0; i < n; i += 3, dst += dcn )
{
float L = src[i], a = src[i+1], b = src[i+2];
Y = Y*Y*Y;
X = X*X*X;
Z = Z*Z*Z;
-
+
float R = X*C0 + Y*C1 + Z*C2;
float G = X*C3 + Y*C4 + Z*C5;
float B = X*C6 + Y*C7 + Z*C8;
-
+
if( gammaTab )
{
R = splineInterpolate(R*gscale, gammaTab, GAMMA_TAB_SIZE);
G = splineInterpolate(G*gscale, gammaTab, GAMMA_TAB_SIZE);
B = splineInterpolate(B*gscale, gammaTab, GAMMA_TAB_SIZE);
}
-
+
dst[0] = R; dst[1] = G; dst[2] = B;
if( dcn == 4 )
dst[3] = alpha;
}
}
-
+
int dstcn;
float coeffs[9];
bool srgb;
};
-
+
struct Lab2RGB_b
{
typedef uchar channel_type;
-
+
Lab2RGB_b( int _dstcn, int blueIdx, const float* _coeffs,
const float* _whitept, bool _srgb )
: dstcn(_dstcn), cvt(3, blueIdx, _coeffs, _whitept, _srgb ) {}
-
+
void operator()(const uchar* src, uchar* dst, int n) const
{
int i, j, dcn = dstcn;
uchar alpha = ColorChannel<uchar>::max();
float buf[3*BLOCK_SIZE];
-
+
for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 )
{
int dn = std::min(n - i, (int)BLOCK_SIZE);
-
+
for( j = 0; j < dn*3; j += 3 )
{
buf[j] = src[j]*(100.f/255.f);
buf[j+2] = (float)(src[j+2] - 128);
}
cvt(buf, buf, dn);
-
+
for( j = 0; j < dn*3; j += 3, dst += dcn )
{
dst[0] = saturate_cast<uchar>(buf[j]*255.f);
}
}
}
-
+
int dstcn;
Lab2RGB_f cvt;
};
-
-
+
+
///////////////////////////////////// RGB <-> L*u*v* /////////////////////////////////////
struct RGB2Luv_f
{
typedef float channel_type;
-
+
RGB2Luv_f( int _srccn, int blueIdx, const float* _coeffs,
const float* whitept, bool _srgb )
: srccn(_srccn), srgb(_srgb)
{
- volatile int i;
+ volatile int i;
initLabTabs();
-
+
if(!_coeffs) _coeffs = sRGB2XYZ_D65;
if(!whitept) whitept = D65;
-
+
for( i = 0; i < 3; i++ )
{
coeffs[i*3] = _coeffs[i*3];
CV_Assert( coeffs[i*3] >= 0 && coeffs[i*3+1] >= 0 && coeffs[i*3+2] >= 0 &&
coeffs[i*3] + coeffs[i*3+1] + coeffs[i*3+2] < 1.5f );
}
-
+
float d = 1.f/(whitept[0] + whitept[1]*15 + whitept[2]*3);
un = 4*whitept[0]*d;
vn = 9*whitept[1]*d;
-
+
CV_Assert(whitept[1] == 1.f);
}
-
+
void operator()(const float* src, float* dst, int n) const
{
int i, scn = srccn;
C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8];
float _un = 13*un, _vn = 13*vn;
n *= 3;
-
+
for( i = 0; i < n; i += 3, src += scn )
{
float R = src[0], G = src[1], B = src[2];
G = splineInterpolate(G*gscale, gammaTab, GAMMA_TAB_SIZE);
B = splineInterpolate(B*gscale, gammaTab, GAMMA_TAB_SIZE);
}
-
+
float X = R*C0 + G*C1 + B*C2;
float Y = R*C3 + G*C4 + B*C5;
float Z = R*C6 + G*C7 + B*C8;
-
+
float L = splineInterpolate(Y*LabCbrtTabScale, LabCbrtTab, LAB_CBRT_TAB_SIZE);
L = 116.f*L - 16.f;
-
- float d = (4*13) / std::max(X + 15 * Y + 3 * Z, FLT_EPSILON);
+
+ float d = (4*13) / std::max(X + 15 * Y + 3 * Z, FLT_EPSILON);
float u = L*(X*d - _un);
float v = L*((9*0.25f)*Y*d - _vn);
-
+
dst[i] = L; dst[i+1] = u; dst[i+2] = v;
}
}
-
+
int srccn;
float coeffs[9], un, vn;
bool srgb;
};
-
+
struct Luv2RGB_f
{
typedef float channel_type;
-
+
Luv2RGB_f( int _dstcn, int blueIdx, const float* _coeffs,
const float* whitept, bool _srgb )
: dstcn(_dstcn), srgb(_srgb)
{
initLabTabs();
-
+
if(!_coeffs) _coeffs = XYZ2sRGB_D65;
if(!whitept) whitept = D65;
-
+
for( int i = 0; i < 3; i++ )
{
coeffs[i+(blueIdx^2)*3] = _coeffs[i];
coeffs[i+3] = _coeffs[i+3];
coeffs[i+blueIdx*3] = _coeffs[i+6];
}
-
+
float d = 1.f/(whitept[0] + whitept[1]*15 + whitept[2]*3);
un = 4*whitept[0]*d;
vn = 9*whitept[1]*d;
-
+
CV_Assert(whitept[1] == 1.f);
}
-
+
void operator()(const float* src, float* dst, int n) const
{
int i, dcn = dstcn;
float alpha = ColorChannel<float>::max();
float _un = un, _vn = vn;
n *= 3;
-
+
for( i = 0; i < n; i += 3, dst += dcn )
{
float L = src[i], u = src[i+1], v = src[i+2], d, X, Y, Z;
v = v*d + _vn;
float iv = 1.f/v;
X = 2.25f * u * Y * iv ;
- Z = (12 - 3 * u - 20 * v) * Y * 0.25f * iv;
-
+ Z = (12 - 3 * u - 20 * v) * Y * 0.25f * iv;
+
float R = X*C0 + Y*C1 + Z*C2;
float G = X*C3 + Y*C4 + Z*C5;
float B = X*C6 + Y*C7 + Z*C8;
-
+
if( gammaTab )
{
R = splineInterpolate(R*gscale, gammaTab, GAMMA_TAB_SIZE);
G = splineInterpolate(G*gscale, gammaTab, GAMMA_TAB_SIZE);
B = splineInterpolate(B*gscale, gammaTab, GAMMA_TAB_SIZE);
}
-
+
dst[0] = R; dst[1] = G; dst[2] = B;
if( dcn == 4 )
dst[3] = alpha;
}
}
-
+
int dstcn;
float coeffs[9], un, vn;
bool srgb;
};
-
+
struct RGB2Luv_b
{
typedef uchar channel_type;
-
+
RGB2Luv_b( int _srccn, int blueIdx, const float* _coeffs,
const float* _whitept, bool _srgb )
: srccn(_srccn), cvt(3, blueIdx, _coeffs, _whitept, _srgb) {}
-
+
void operator()(const uchar* src, uchar* dst, int n) const
{
int i, j, scn = srccn;
float buf[3*BLOCK_SIZE];
-
+
for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*3 )
{
int dn = std::min(n - i, (int)BLOCK_SIZE);
-
+
for( j = 0; j < dn*3; j += 3, src += scn )
{
buf[j] = src[0]*(1.f/255.f);
buf[j+2] = (float)(src[2]*(1.f/255.f));
}
cvt(buf, buf, dn);
-
+
for( j = 0; j < dn*3; j += 3 )
{
dst[j] = saturate_cast<uchar>(buf[j]*2.55f);
}
}
}
-
+
int srccn;
RGB2Luv_f cvt;
};
-
+
struct Luv2RGB_b
{
typedef uchar channel_type;
-
+
Luv2RGB_b( int _dstcn, int blueIdx, const float* _coeffs,
const float* _whitept, bool _srgb )
: dstcn(_dstcn), cvt(3, blueIdx, _coeffs, _whitept, _srgb ) {}
-
+
void operator()(const uchar* src, uchar* dst, int n) const
{
int i, j, dcn = dstcn;
uchar alpha = ColorChannel<uchar>::max();
float buf[3*BLOCK_SIZE];
-
+
for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 )
{
int dn = std::min(n - i, (int)BLOCK_SIZE);
-
+
for( j = 0; j < dn*3; j += 3 )
{
buf[j] = src[j]*(100.f/255.f);
buf[j+2] = (float)(src[j+2]*1.003921568627451f - 140.f);
}
cvt(buf, buf, dn);
-
+
for( j = 0; j < dn*3; j += 3, dst += dcn )
{
dst[0] = saturate_cast<uchar>(buf[j]*255.f);
}
}
}
-
+
int dstcn;
Luv2RGB_f cvt;
};
-
+
//////////////////////////// Bayer Pattern -> RGB conversion /////////////////////////////
template<typename T>
{
return 0;
}
-
+
int bayer2RGB(const T*, int, T*, int, int) const
{
return 0;
}
-};
-
+};
+
#if CV_SSE2
class SIMDBayerInterpolator_8u
{
{
use_simd = checkHardwareSupport(CV_CPU_SSE2);
}
-
+
int bayer2Gray(const uchar* bayer, int bayer_step, uchar* dst,
int width, int bcoeff, int gcoeff, int rcoeff) const
{
if( !use_simd )
return 0;
-
+
__m128i _b2y = _mm_set1_epi16((short)(rcoeff*2));
__m128i _g2y = _mm_set1_epi16((short)(gcoeff*2));
__m128i _r2y = _mm_set1_epi16((short)(bcoeff*2));
const uchar* bayer_end = bayer + width;
-
+
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 14 )
{
__m128i r0 = _mm_loadu_si128((const __m128i*)bayer);
__m128i r1 = _mm_loadu_si128((const __m128i*)(bayer+bayer_step));
__m128i r2 = _mm_loadu_si128((const __m128i*)(bayer+bayer_step*2));
-
+
__m128i b1 = _mm_add_epi16(_mm_srli_epi16(_mm_slli_epi16(r0, 8), 7),
_mm_srli_epi16(_mm_slli_epi16(r2, 8), 7));
__m128i b0 = _mm_add_epi16(b1, _mm_srli_si128(b1, 2));
b1 = _mm_slli_epi16(_mm_srli_si128(b1, 2), 1);
-
+
__m128i g0 = _mm_add_epi16(_mm_srli_epi16(r0, 7), _mm_srli_epi16(r2, 7));
__m128i g1 = _mm_srli_epi16(_mm_slli_epi16(r1, 8), 7);
g0 = _mm_add_epi16(g0, _mm_add_epi16(g1, _mm_srli_si128(g1, 2)));
g1 = _mm_slli_epi16(_mm_srli_si128(g1, 2), 2);
-
+
r0 = _mm_srli_epi16(r1, 8);
r1 = _mm_slli_epi16(_mm_add_epi16(r0, _mm_srli_si128(r0, 2)), 2);
r0 = _mm_slli_epi16(r0, 3);
g0 = _mm_unpacklo_epi8(g0, g1);
_mm_storeu_si128((__m128i*)dst, g0);
}
-
+
return (int)(bayer - (bayer_end - width));
}
-
+
int bayer2RGB(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue) const
{
if( !use_simd )
__m128i mask = _mm_set1_epi16(blue < 0 ? -1 : 0), z = _mm_setzero_si128();
__m128i masklo = _mm_set1_epi16(0x00ff);
const uchar* bayer_end = bayer + width;
-
+
for( ; bayer <= bayer_end - 18; bayer += 14, dst += 42 )
{
__m128i r0 = _mm_loadu_si128((const __m128i*)bayer);
__m128i r1 = _mm_loadu_si128((const __m128i*)(bayer+bayer_step));
__m128i r2 = _mm_loadu_si128((const __m128i*)(bayer+bayer_step*2));
-
+
__m128i b1 = _mm_add_epi16(_mm_and_si128(r0, masklo), _mm_and_si128(r2, masklo));
__m128i b0 = _mm_add_epi16(b1, _mm_srli_si128(b1, 2));
b1 = _mm_srli_si128(b1, 2);
b1 = _mm_srli_epi16(_mm_add_epi16(b1, delta1), 1);
b0 = _mm_srli_epi16(_mm_add_epi16(b0, delta2), 2);
b0 = _mm_packus_epi16(b0, b1);
-
+
__m128i g0 = _mm_add_epi16(_mm_srli_epi16(r0, 8), _mm_srli_epi16(r2, 8));
__m128i g1 = _mm_and_si128(r1, masklo);
g0 = _mm_add_epi16(g0, _mm_add_epi16(g1, _mm_srli_si128(g1, 2)));
g1 = _mm_srli_si128(g1, 2);
g0 = _mm_srli_epi16(_mm_add_epi16(g0, delta2), 2);
g0 = _mm_packus_epi16(g0, g1);
-
+
r0 = _mm_srli_epi16(r1, 8);
r1 = _mm_add_epi16(r0, _mm_srli_si128(r0, 2));
r1 = _mm_srli_epi16(_mm_add_epi16(r1, delta1), 1);
r0 = _mm_packus_epi16(r0, r1);
-
+
b1 = _mm_and_si128(_mm_xor_si128(b0, r0), mask);
b0 = _mm_xor_si128(b0, b1);
r0 = _mm_xor_si128(r0, b1);
-
+
// b1 g1 b1 g1 ...
b1 = _mm_unpackhi_epi8(b0, g0);
// b0 g0 b2 g2 b4 g4 ....
b0 = _mm_unpacklo_epi8(b0, g0);
-
+
// r1 0 r3 0 ...
r1 = _mm_unpackhi_epi8(r0, z);
// r0 0 r2 0 r4 0 ...
r0 = _mm_unpacklo_epi8(r0, z);
-
+
// 0 b0 g0 r0 0 b2 g2 r2 0 ...
g0 = _mm_slli_si128(_mm_unpacklo_epi16(b0, r0), 1);
// 0 b8 g8 r8 0 b10 g10 r10 0 ...
g1 = _mm_slli_si128(_mm_unpackhi_epi16(b0, r0), 1);
-
+
// b1 g1 r1 0 b3 g3 r3 ....
r0 = _mm_unpacklo_epi16(b1, r1);
// b9 g9 r9 0 ...
r1 = _mm_unpackhi_epi16(b1, r1);
-
+
b0 = _mm_srli_si128(_mm_unpacklo_epi32(g0, r0), 1);
b1 = _mm_srli_si128(_mm_unpackhi_epi32(g0, r0), 1);
-
+
_mm_storel_epi64((__m128i*)(dst-1+0), b0);
_mm_storel_epi64((__m128i*)(dst-1+6*1), _mm_srli_si128(b0, 8));
_mm_storel_epi64((__m128i*)(dst-1+6*2), b1);
_mm_storel_epi64((__m128i*)(dst-1+6*3), _mm_srli_si128(b1, 8));
-
+
g0 = _mm_srli_si128(_mm_unpacklo_epi32(g1, r1), 1);
g1 = _mm_srli_si128(_mm_unpackhi_epi32(g1, r1), 1);
-
+
_mm_storel_epi64((__m128i*)(dst-1+6*4), g0);
_mm_storel_epi64((__m128i*)(dst-1+6*5), _mm_srli_si128(g0, 8));
-
+
_mm_storel_epi64((__m128i*)(dst-1+6*6), g1);
}
-
+
return (int)(bayer - (bayer_end - width));
}
-
+
bool use_simd;
};
#else
typedef SIMDBayerStubInterpolator_<uchar> SIMDBayerInterpolator_8u;
#endif
-
+
template<typename T, class SIMDInterpolator>
static void Bayer2Gray_( const Mat& srcmat, Mat& dstmat, int code )
{
const int G2Y = 9617;
const int B2Y = 1868;
const int SHIFT = 14;
-
+
const T* bayer0 = (const T*)srcmat.data;
int bayer_step = (int)(srcmat.step/sizeof(T));
T* dst0 = (T*)dstmat.data;
int bcoeff = B2Y, rcoeff = R2Y;
int start_with_green = code == CV_BayerGB2GRAY || code == CV_BayerGR2GRAY;
bool brow = true;
-
+
if( code != CV_BayerBG2GRAY && code != CV_BayerGB2GRAY )
{
brow = false;
std::swap(bcoeff, rcoeff);
}
-
+
dst0 += dst_step + 1;
size.height -= 2;
size.width -= 2;
-
+
for( ; size.height-- > 0; bayer0 += bayer_step, dst0 += dst_step )
{
unsigned t0, t1, t2;
const T* bayer = bayer0;
T* dst = dst0;
const T* bayer_end = bayer + size.width;
-
+
if( size.width <= 0 )
{
dst[-1] = dst[size.width] = 0;
continue;
}
-
+
if( start_with_green )
{
t0 = (bayer[1] + bayer[bayer_step*2+1])*rcoeff;
t1 = (bayer[bayer_step] + bayer[bayer_step+2])*bcoeff;
t2 = bayer[bayer_step+1]*(2*G2Y);
-
+
dst[0] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+1);
bayer++;
dst++;
}
-
+
int delta = vecOp.bayer2Gray(bayer, bayer_step, dst, size.width, bcoeff, G2Y, rcoeff);
bayer += delta;
dst += delta;
-
+
for( ; bayer <= bayer_end - 2; bayer += 2, dst += 2 )
{
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] + bayer[bayer_step*2+2])*rcoeff;
t1 = (bayer[1] + bayer[bayer_step] + bayer[bayer_step+2] + bayer[bayer_step*2+1])*G2Y;
t2 = bayer[bayer_step+1]*(4*bcoeff);
dst[0] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+2);
-
+
t0 = (bayer[2] + bayer[bayer_step*2+2])*rcoeff;
t1 = (bayer[bayer_step+1] + bayer[bayer_step+3])*bcoeff;
t2 = bayer[bayer_step+2]*(2*G2Y);
dst[1] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+1);
}
-
+
if( bayer < bayer_end )
{
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] + bayer[bayer_step*2+2])*rcoeff;
bayer++;
dst++;
}
-
+
dst0[-1] = dst0[0];
dst0[size.width] = dst0[size.width-1];
-
+
brow = !brow;
std::swap(bcoeff, rcoeff);
start_with_green = !start_with_green;
}
-
+
size = dstmat.size();
dst0 = (T*)dstmat.data;
if( size.height > 2 )
}
}
-template<typename T, class SIMDInterpolator>
+template<typename T, class SIMDInterpolator>
static void Bayer2RGB_( const Mat& srcmat, Mat& dstmat, int code )
{
SIMDInterpolator vecOp;
Size size = srcmat.size();
int blue = code == CV_BayerBG2BGR || code == CV_BayerGB2BGR ? -1 : 1;
int start_with_green = code == CV_BayerGB2BGR || code == CV_BayerGR2BGR;
-
+
dst0 += dst_step + 3 + 1;
size.height -= 2;
size.width -= 2;
-
+
for( ; size.height-- > 0; bayer0 += bayer_step, dst0 += dst_step )
{
int t0, t1;
const T* bayer = bayer0;
T* dst = dst0;
const T* bayer_end = bayer + size.width;
-
+
if( size.width <= 0 )
{
dst[-4] = dst[-3] = dst[-2] = dst[size.width*3-1] =
dst[size.width*3] = dst[size.width*3+1] = 0;
continue;
}
-
+
if( start_with_green )
{
t0 = (bayer[1] + bayer[bayer_step*2+1] + 1) >> 1;
bayer++;
dst += 3;
}
-
+
int delta = vecOp.bayer2RGB(bayer, bayer_step, dst, size.width, blue);
bayer += delta;
dst += delta*3;
-
+
if( blue > 0 )
{
for( ; bayer <= bayer_end - 2; bayer += 2, dst += 6 )
dst[-1] = (T)t0;
dst[0] = (T)t1;
dst[1] = bayer[bayer_step+1];
-
+
t0 = (bayer[2] + bayer[bayer_step*2+2] + 1) >> 1;
t1 = (bayer[bayer_step+1] + bayer[bayer_step+3] + 1) >> 1;
dst[2] = (T)t0;
dst[1] = (T)t0;
dst[0] = (T)t1;
dst[-1] = bayer[bayer_step+1];
-
+
t0 = (bayer[2] + bayer[bayer_step*2+2] + 1) >> 1;
t1 = (bayer[bayer_step+1] + bayer[bayer_step+3] + 1) >> 1;
dst[4] = (T)t0;
dst[2] = (T)t1;
}
}
-
+
if( bayer < bayer_end )
{
t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] +
bayer++;
dst += 3;
}
-
+
dst0[-4] = dst0[-1];
dst0[-3] = dst0[0];
dst0[-2] = dst0[1];
dst0[size.width*3-1] = dst0[size.width*3-4];
dst0[size.width*3] = dst0[size.width*3-3];
dst0[size.width*3+1] = dst0[size.width*3-2];
-
+
blue = -blue;
start_with_green = !start_with_green;
}
-
+
size = dstmat.size();
dst0 = (T*)dstmat.data;
if( size.height > 2 )
}
}
-
+
/////////////////// Demosaicing using Variable Number of Gradients ///////////////////////
-
+
static void Bayer2RGB_VNG_8u( const Mat& srcmat, Mat& dstmat, int code )
{
const uchar* bayer = srcmat.data;
uchar* dst = dstmat.data;
int dststep = (int)dstmat.step;
Size size = srcmat.size();
-
+
int blueIdx = code == CV_BayerBG2BGR_VNG || code == CV_BayerGB2BGR_VNG ? 0 : 2;
bool greenCell0 = code != CV_BayerBG2BGR_VNG && code != CV_BayerRG2BGR_VNG;
-
+
// for too small images use the simple interpolation algorithm
if( MIN(size.width, size.height) < 8 )
{
Bayer2RGB_<uchar, SIMDBayerInterpolator_8u>( srcmat, dstmat, code );
return;
}
-
+
const int brows = 3, bcn = 7;
- int N = size.width, N2 = N*2, N3 = N*3, N4 = N*4, N5 = N*5, N6 = N*6, N7 = N*7;
+ int N = size.width, N2 = N*2, N3 = N*3, N4 = N*4, N5 = N*5, N6 = N*6, N7 = N*7;
int i, bufstep = N7*bcn;
cv::AutoBuffer<ushort> _buf(bufstep*brows);
ushort* buf = (ushort*)_buf;
-
+
bayer += bstep*2;
-
+
#if CV_SSE2
bool haveSSE = cv::checkHardwareSupport(CV_CPU_SSE2);
#define _mm_absdiff_epu16(a,b) _mm_adds_epu16(_mm_subs_epu16(a, b), _mm_subs_epu16(b, a))
#endif
-
+
for( int y = 2; y < size.height - 4; y++ )
{
uchar* dstrow = dst + dststep*y + 6;
const uchar* srow;
-
+
for( int dy = (y == 2 ? -1 : 1); dy <= 1; dy++ )
{
ushort* brow = buf + ((y + dy - 1)%brows)*bufstep + 1;
srow = bayer + (y+dy)*bstep + 1;
-
+
for( i = 0; i < bcn; i++ )
brow[N*i-1] = brow[(N-2) + N*i] = 0;
-
+
i = 1;
-
+
#if CV_SSE2
if( haveSSE )
{
for( ; i <= N-9; i += 8, srow += 8, brow += 8 )
{
__m128i s1, s2, s3, s4, s6, s7, s8, s9;
-
+
s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1-bstep)),z);
s2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep)),z);
s3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1-bstep)),z);
-
+
s4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1)),z);
s6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1)),z);
-
+
s7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1+bstep)),z);
s8 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep)),z);
s9 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1+bstep)),z);
-
+
__m128i b0, b1, b2, b3, b4, b5, b6;
-
+
b0 = _mm_adds_epu16(_mm_slli_epi16(_mm_absdiff_epu16(s2,s8),1),
_mm_adds_epu16(_mm_absdiff_epu16(s1, s7),
_mm_absdiff_epu16(s3, s9)));
_mm_absdiff_epu16(s7, s9)));
b2 = _mm_slli_epi16(_mm_absdiff_epu16(s3,s7),1);
b3 = _mm_slli_epi16(_mm_absdiff_epu16(s1,s9),1);
-
+
_mm_storeu_si128((__m128i*)brow, b0);
_mm_storeu_si128((__m128i*)(brow + N), b1);
_mm_storeu_si128((__m128i*)(brow + N2), b2);
_mm_storeu_si128((__m128i*)(brow + N3), b3);
-
+
b4 = _mm_adds_epu16(b2,_mm_adds_epu16(_mm_absdiff_epu16(s2, s4),
_mm_absdiff_epu16(s6, s8)));
b5 = _mm_adds_epu16(b3,_mm_adds_epu16(_mm_absdiff_epu16(s2, s6),
_mm_absdiff_epu16(s4, s8)));
b6 = _mm_adds_epu16(_mm_adds_epu16(s2, s4), _mm_adds_epu16(s6, s8));
b6 = _mm_srli_epi16(b6, 1);
-
+
_mm_storeu_si128((__m128i*)(brow + N4), b4);
_mm_storeu_si128((__m128i*)(brow + N5), b5);
_mm_storeu_si128((__m128i*)(brow + N6), b6);
}
}
#endif
-
+
for( ; i < N-1; i++, srow++, brow++ )
{
brow[0] = (ushort)(std::abs(srow[-1-bstep] - srow[-1+bstep]) +
brow[N6] = (ushort)((srow[-bstep] + srow[-1] + srow[1] + srow[+bstep])>>1);
}
}
-
+
const ushort* brow0 = buf + ((y - 2) % brows)*bufstep + 2;
const ushort* brow1 = buf + ((y - 1) % brows)*bufstep + 2;
const ushort* brow2 = buf + (y % brows)*bufstep + 2;
static const float scale[] = { 0.f, 0.5f, 0.25f, 0.1666666666667f, 0.125f, 0.1f, 0.08333333333f, 0.0714286f, 0.0625f };
srow = bayer + y*bstep + 2;
bool greenCell = greenCell0;
-
+
i = 2;
#if CV_SSE2
int limit = !haveSSE ? N-2 : greenCell ? std::min(3, N-2) : 2;
#else
int limit = N - 2;
#endif
-
+
do
{
for( ; i < limit; i++, srow++, brow0++, brow1++, brow2++, dstrow += 3 )
int minGrad = std::min(std::min(std::min(gradN, gradS), gradW), gradE);
int maxGrad = std::max(std::max(std::max(gradN, gradS), gradW), gradE);
int R, G, B;
-
+
if( !greenCell )
{
int gradNE = brow0[N4+1] + brow1[N4];
int gradSW = brow1[N4] + brow2[N4-1];
int gradNW = brow0[N5-1] + brow1[N5];
int gradSE = brow1[N5] + brow2[N5+1];
-
+
minGrad = std::min(std::min(std::min(std::min(minGrad, gradNE), gradSW), gradNW), gradSE);
maxGrad = std::max(std::max(std::max(std::max(maxGrad, gradNE), gradSW), gradNW), gradSE);
int T = minGrad + maxGrad/2;
-
+
int Rs = 0, Gs = 0, Bs = 0, ng = 0;
if( gradN < T )
{
}
R = srow[0];
G = R + cvRound((Gs - Rs)*scale[ng]);
- B = R + cvRound((Bs - Rs)*scale[ng]);
+ B = R + cvRound((Bs - Rs)*scale[ng]);
}
else
{
int gradSW = brow1[N2] + brow1[N2-1] + brow2[N2] + brow2[N2-1];
int gradNW = brow0[N3] + brow0[N3-1] + brow1[N3] + brow1[N3-1];
int gradSE = brow1[N3] + brow1[N3+1] + brow2[N3] + brow2[N3+1];
-
+
minGrad = std::min(std::min(std::min(std::min(minGrad, gradNE), gradSW), gradNW), gradSE);
maxGrad = std::max(std::max(std::max(std::max(maxGrad, gradNE), gradSW), gradNW), gradSE);
int T = minGrad + maxGrad/2;
-
+
int Rs = 0, Gs = 0, Bs = 0, ng = 0;
if( gradN < T )
{
#if CV_SSE2
if( !haveSSE )
break;
-
+
__m128i emask = _mm_set1_epi32(0x0000ffff),
omask = _mm_set1_epi32(0xffff0000),
z = _mm_setzero_si128();
__m128 _0_5 = _mm_set1_ps(0.5f);
-
+
#define _mm_merge_epi16(a, b) _mm_or_si128(_mm_and_si128(a, emask), _mm_and_si128(b, omask)) //(aA_aA_aA_aA) * (bB_bB_bB_bB) => (bA_bA_bA_bA)
#define _mm_cvtloepi16_ps(a) _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(a,a), 16)) //(1,2,3,4,5,6,7,8) => (1f,2f,3f,4f)
#define _mm_cvthiepi16_ps(a) _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(a,a), 16)) //(1,2,3,4,5,6,7,8) => (5f,6f,7f,8f)
#define _mm_loadl_u8_s16(ptr, offset) _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)((ptr) + (offset))), z) //load 8 uchars to 8 shorts
-
+
// process 8 pixels at once
for( ; i <= N - 10; i += 8, srow += 8, brow0 += 8, brow1 += 8, brow2 += 8 )
{
//int gradE = brow1[N+1] + brow1[N];
__m128i gradE = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N+1)), _mm_loadu_si128((__m128i*)(brow1+N)));
-
+
//int minGrad = std::min(std::min(std::min(gradN, gradS), gradW), gradE);
//int maxGrad = std::max(std::max(std::max(gradN, gradS), gradW), gradE);
__m128i minGrad = _mm_min_epi16(_mm_min_epi16(gradN, gradS), _mm_min_epi16(gradW, gradE));
__m128i maxGrad = _mm_max_epi16(_mm_max_epi16(gradN, gradS), _mm_max_epi16(gradW, gradE));
-
+
__m128i grad0, grad1;
-
+
//int gradNE = brow0[N4+1] + brow1[N4];
//int gradNE = brow0[N2] + brow0[N2+1] + brow1[N2] + brow1[N2+1];
grad0 = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow0+N4+1)), _mm_loadu_si128((__m128i*)(brow1+N4)));
grad1 = _mm_adds_epi16( _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow0+N2)), _mm_loadu_si128((__m128i*)(brow0+N2+1))),
_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N2)), _mm_loadu_si128((__m128i*)(brow1+N2+1))));
__m128i gradNE = _mm_merge_epi16(grad0, grad1);
-
+
//int gradSW = brow1[N4] + brow2[N4-1];
//int gradSW = brow1[N2] + brow1[N2-1] + brow2[N2] + brow2[N2-1];
grad0 = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow2+N4-1)), _mm_loadu_si128((__m128i*)(brow1+N4)));
grad1 = _mm_adds_epi16(_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow2+N2)), _mm_loadu_si128((__m128i*)(brow2+N2-1))),
_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N2)), _mm_loadu_si128((__m128i*)(brow1+N2-1))));
__m128i gradSW = _mm_merge_epi16(grad0, grad1);
-
+
minGrad = _mm_min_epi16(_mm_min_epi16(minGrad, gradNE), gradSW);
maxGrad = _mm_max_epi16(_mm_max_epi16(maxGrad, gradNE), gradSW);
grad1 = _mm_adds_epi16(_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow0+N3)), _mm_loadu_si128((__m128i*)(brow0+N3-1))),
_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N3)), _mm_loadu_si128((__m128i*)(brow1+N3-1))));
__m128i gradNW = _mm_merge_epi16(grad0, grad1);
-
+
//int gradSE = brow1[N5] + brow2[N5+1];
//int gradSE = brow1[N3] + brow1[N3+1] + brow2[N3] + brow2[N3+1];
grad0 = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow2+N5+1)), _mm_loadu_si128((__m128i*)(brow1+N5)));
grad1 = _mm_adds_epi16(_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow2+N3)), _mm_loadu_si128((__m128i*)(brow2+N3+1))),
_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N3)), _mm_loadu_si128((__m128i*)(brow1+N3+1))));
__m128i gradSE = _mm_merge_epi16(grad0, grad1);
-
+
minGrad = _mm_min_epi16(_mm_min_epi16(minGrad, gradNW), gradSE);
maxGrad = _mm_max_epi16(_mm_max_epi16(maxGrad, gradNW), gradSE);
-
+
//int T = minGrad + maxGrad/2;
__m128i T = _mm_adds_epi16(_mm_srli_epi16(maxGrad, 1), minGrad);
__m128i RGs = z, GRs = z, Bs = z, ng = z;
-
+
__m128i x0 = _mm_loadl_u8_s16(srow, +0 );
__m128i x1 = _mm_loadl_u8_s16(srow, -1 - bstep );
__m128i x2 = _mm_loadl_u8_s16(srow, -1 - bstep*2);
__m128i x16 = _mm_loadl_u8_s16(srow, -2 - bstep );
__m128i t0, t1, mask;
-
+
// gradN ***********************************************
mask = _mm_cmpgt_epi16(T, gradN); // mask = T>gradN
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradN)
-
+
t0 = _mm_slli_epi16(x3, 1); // srow[-bstep]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, -bstep*2), x0); // srow[-bstep*2] + srow[0]
-
+
// RGs += (srow[-bstep*2] + srow[0]) * (T>gradN)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(t1, mask));
// GRs += {srow[-bstep]*2; (srow[-bstep*2-1] + srow[-bstep*2+1])} * (T>gradN)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(t0, _mm_adds_epi16(x2,x4)), mask));
// Bs += {(srow[-bstep-1]+srow[-bstep+1]); srow[-bstep]*2 } * (T>gradN)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epi16(x1,x5), t0), mask));
-
+
// gradNE **********************************************
mask = _mm_cmpgt_epi16(T, gradNE); // mask = T>gradNE
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradNE)
t0 = _mm_slli_epi16(x5, 1); // srow[-bstep+1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, -bstep*2+2), x0); // srow[-bstep*2+2] + srow[0]
-
+
// RGs += {(srow[-bstep*2+2] + srow[0]); srow[-bstep+1]*2} * (T>gradNE)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(_mm_merge_epi16(t1, t0), mask));
// GRs += {brow0[N6+1]; (srow[-bstep*2+1] + srow[1])} * (T>gradNE)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow0+N6+1)), _mm_adds_epi16(x4,x7)), mask));
// Bs += {srow[-bstep+1]*2; (srow[-bstep] + srow[-bstep+2])} * (T>gradNE)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(t0,_mm_adds_epi16(x3,x6)), mask));
-
+
// gradE ***********************************************
mask = _mm_cmpgt_epi16(T, gradE); // mask = T>gradE
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradE)
-
+
t0 = _mm_slli_epi16(x7, 1); // srow[1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, 2), x0); // srow[2] + srow[0]
GRs = _mm_adds_epi16(GRs, _mm_and_si128(t0, mask));
// Bs += {(srow[-bstep+1]+srow[bstep+1]); (srow[-bstep+2]+srow[bstep+2])} * (T>gradE)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epi16(x5,x9), _mm_adds_epi16(x6,x8)), mask));
-
+
// gradSE **********************************************
mask = _mm_cmpgt_epi16(T, gradSE); // mask = T>gradSE
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradSE)
-
+
t0 = _mm_slli_epi16(x9, 1); // srow[bstep+1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, bstep*2+2), x0); // srow[bstep*2+2] + srow[0]
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow2+N6+1)), _mm_adds_epi16(x7,x10)), mask));
// Bs += {srow[-bstep+1]*2; (srow[bstep+2]+srow[bstep])} * (T>gradSE)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_slli_epi16(x5, 1), _mm_adds_epi16(x8,x11)), mask));
-
+
// gradS ***********************************************
mask = _mm_cmpgt_epi16(T, gradS); // mask = T>gradS
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradS)
-
+
t0 = _mm_slli_epi16(x11, 1); // srow[bstep]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow,bstep*2), x0); // srow[bstep*2]+srow[0]
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(t0, _mm_adds_epi16(x10,x12)), mask));
// Bs += {(srow[bstep+1]+srow[bstep-1]); srow[bstep]*2} * (T>gradS)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epi16(x9,x13), t0), mask));
-
+
// gradSW **********************************************
mask = _mm_cmpgt_epi16(T, gradSW); // mask = T>gradSW
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradSW)
-
+
t0 = _mm_slli_epi16(x13, 1); // srow[bstep-1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, bstep*2-2), x0); // srow[bstep*2-2]+srow[0]
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow2+N6-1)), _mm_adds_epi16(x12,x15)), mask));
// Bs += {srow[bstep-1]*2; (srow[bstep]+srow[bstep-2])} * (T>gradSW)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(t0,_mm_adds_epi16(x11,x14)), mask));
-
+
// gradW ***********************************************
mask = _mm_cmpgt_epi16(T, gradW); // mask = T>gradW
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradW)
-
+
t0 = _mm_slli_epi16(x15, 1); // srow[-1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, -2), x0); // srow[-2]+srow[0]
GRs = _mm_adds_epi16(GRs, _mm_and_si128(t0, mask));
// Bs += {(srow[-bstep-1]+srow[bstep-1]); (srow[bstep-2]+srow[-bstep-2])} * (T>gradW)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epi16(x1,x13), _mm_adds_epi16(x14,x16)), mask));
-
+
// gradNW **********************************************
mask = _mm_cmpgt_epi16(T, gradNW); // mask = T>gradNW
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradNW)
-
+
t0 = _mm_slli_epi16(x1, 1); // srow[-bstep-1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow,-bstep*2-2), x0); // srow[-bstep*2-2]+srow[0]
__m128 ngf0, ngf1;
ngf0 = _mm_div_ps(_0_5, _mm_cvtloepi16_ps(ng));
ngf1 = _mm_div_ps(_0_5, _mm_cvthiepi16_ps(ng));
-
+
// now interpolate r, g & b
t0 = _mm_sub_epi16(GRs, RGs);
t1 = _mm_sub_epi16(Bs, RGs);
-
+
t0 = _mm_add_epi16(x0, _mm_packs_epi32(
_mm_cvtps_epi32(_mm_mul_ps(_mm_cvtloepi16_ps(t0), ngf0)),
_mm_cvtps_epi32(_mm_mul_ps(_mm_cvthiepi16_ps(t0), ngf1))));
-
+
t1 = _mm_add_epi16(x0, _mm_packs_epi32(
_mm_cvtps_epi32(_mm_mul_ps(_mm_cvtloepi16_ps(t1), ngf0)),
_mm_cvtps_epi32(_mm_mul_ps(_mm_cvthiepi16_ps(t1), ngf1))));
-
+
x1 = _mm_merge_epi16(x0, t0);
x2 = _mm_merge_epi16(t0, x0);
-
+
uchar R[8], G[8], B[8];
-
+
_mm_storel_epi64(blueIdx ? (__m128i*)B : (__m128i*)R, _mm_packus_epi16(x1, z));
_mm_storel_epi64((__m128i*)G, _mm_packus_epi16(x2, z));
_mm_storel_epi64(blueIdx ? (__m128i*)R : (__m128i*)B, _mm_packus_epi16(t1, z));
-
+
for( int j = 0; j < 8; j++, dstrow += 3 )
{
dstrow[0] = B[j]; dstrow[1] = G[j]; dstrow[2] = R[j];
}
}
#endif
-
+
limit = N - 2;
}
while( i < N - 2 );
-
+
for( i = 0; i < 6; i++ )
{
dst[dststep*y + 5 - i] = dst[dststep*y + 8 - i];
dst[dststep*y + (N - 2)*3 + i] = dst[dststep*y + (N - 3)*3 + i];
}
-
+
greenCell0 = !greenCell0;
blueIdx ^= 2;
}
-
+
for( i = 0; i < size.width*3; i++ )
{
dst[i] = dst[i + dststep] = dst[i + dststep*2];
{
const int rangeBegin = range.begin() * 2;
const int rangeEnd = range.end() * 2;
-
+
size_t uvsteps[2] = {width/2, stride - width/2};
int usIdx = ustepIdx, vsIdx = vstepIdx;
const uchar* y1 = my1 + rangeBegin * stride;
const uchar* u1 = mu + (range.begin() / 2) * stride;
const uchar* v1 = mv + (range.begin() / 2) * stride;
-
+
if(range.begin() % 2 == 1)
{
u1 += uvsteps[(usIdx++) & 1];
const uchar* y1 = my1 + rangeBegin * stride;
const uchar* u1 = mu + (range.begin() / 2) * stride;
const uchar* v1 = mv + (range.begin() / 2) * stride;
-
+
if(range.begin() % 2 == 1)
{
u1 += uvsteps[(usIdx++) & 1];
Mat src = _src.getMat(), dst;
Size sz = src.size();
int scn = src.channels(), depth = src.depth(), bidx;
-
+
CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_32F );
-
+
switch( code )
{
case CV_BGR2BGRA: case CV_RGB2BGRA: case CV_BGRA2BGR:
CV_Assert( scn == 3 || scn == 4 );
dcn = code == CV_BGR2BGRA || code == CV_RGB2BGRA || code == CV_BGRA2RGBA ? 4 : 3;
bidx = code == CV_BGR2BGRA || code == CV_BGRA2BGR ? 0 : 2;
-
+
_dst.create( sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
if( depth == CV_8U )
{
#ifdef HAVE_TEGRA_OPTIMIZATION
else
CvtColorLoop(src, dst, RGB2RGB<float>(scn, dcn, bidx));
break;
-
+
case CV_BGR2BGR565: case CV_BGR2BGR555: case CV_RGB2BGR565: case CV_RGB2BGR555:
case CV_BGRA2BGR565: case CV_BGRA2BGR555: case CV_RGBA2BGR565: case CV_RGBA2BGR555:
CV_Assert( (scn == 3 || scn == 4) && depth == CV_8U );
if(tegra::cvtRGB2RGB565(src, dst, code == CV_RGB2BGR565 || code == CV_RGBA2BGR565 ? 0 : 2))
break;
#endif
-
+
CvtColorLoop(src, dst, RGB2RGB5x5(scn,
code == CV_BGR2BGR565 || code == CV_BGR2BGR555 ||
code == CV_BGRA2BGR565 || code == CV_BGRA2BGR555 ? 0 : 2,
code == CV_BGRA2BGR565 || code == CV_RGBA2BGR565 ? 6 : 5 // green bits
));
break;
-
+
case CV_BGR5652BGR: case CV_BGR5552BGR: case CV_BGR5652RGB: case CV_BGR5552RGB:
case CV_BGR5652BGRA: case CV_BGR5552BGRA: case CV_BGR5652RGBA: case CV_BGR5552RGBA:
if(dcn <= 0) dcn = (code==CV_BGR5652BGRA || code==CV_BGR5552BGRA || code==CV_BGR5652RGBA || code==CV_BGR5552RGBA) ? 4 : 3;
CV_Assert( (dcn == 3 || dcn == 4) && scn == 2 && depth == CV_8U );
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
CvtColorLoop(src, dst, RGB5x52RGB(dcn,
code == CV_BGR5652BGR || code == CV_BGR5552BGR ||
code == CV_BGR5652BGRA || code == CV_BGR5552BGRA ? 0 : 2, // blue idx
code == CV_BGR5652BGRA || code == CV_BGR5652RGBA ? 6 : 5 // green bits
));
break;
-
+
case CV_BGR2GRAY: case CV_BGRA2GRAY: case CV_RGB2GRAY: case CV_RGBA2GRAY:
CV_Assert( scn == 3 || scn == 4 );
_dst.create(sz, CV_MAKETYPE(depth, 1));
dst = _dst.getMat();
-
+
bidx = code == CV_BGR2GRAY || code == CV_BGRA2GRAY ? 0 : 2;
-
+
if( depth == CV_8U )
{
#ifdef HAVE_TEGRA_OPTIMIZATION
else
CvtColorLoop(src, dst, RGB2Gray<float>(scn, bidx, 0));
break;
-
+
case CV_BGR5652GRAY: case CV_BGR5552GRAY:
CV_Assert( scn == 2 && depth == CV_8U );
_dst.create(sz, CV_8UC1);
dst = _dst.getMat();
-
+
CvtColorLoop(src, dst, RGB5x52Gray(code == CV_BGR5652GRAY ? 6 : 5));
break;
-
+
case CV_GRAY2BGR: case CV_GRAY2BGRA:
if( dcn <= 0 ) dcn = (code==CV_GRAY2BGRA) ? 4 : 3;
CV_Assert( scn == 1 && (dcn == 3 || dcn == 4));
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
if( depth == CV_8U )
{
#ifdef HAVE_TEGRA_OPTIMIZATION
else
CvtColorLoop(src, dst, Gray2RGB<float>(dcn));
break;
-
+
case CV_GRAY2BGR565: case CV_GRAY2BGR555:
CV_Assert( scn == 1 && depth == CV_8U );
_dst.create(sz, CV_8UC2);
dst = _dst.getMat();
-
+
CvtColorLoop(src, dst, Gray2RGB5x5(code == CV_GRAY2BGR565 ? 6 : 5));
break;
-
+
case CV_BGR2YCrCb: case CV_RGB2YCrCb:
case CV_BGR2YUV: case CV_RGB2YUV:
{
static const int yuv_i[] = { B2Y, G2Y, R2Y, 8061, 14369 };
const float* coeffs_f = code == CV_BGR2YCrCb || code == CV_RGB2YCrCb ? 0 : yuv_f;
const int* coeffs_i = code == CV_BGR2YCrCb || code == CV_RGB2YCrCb ? 0 : yuv_i;
-
+
_dst.create(sz, CV_MAKETYPE(depth, 3));
dst = _dst.getMat();
-
+
if( depth == CV_8U )
{
#ifdef HAVE_TEGRA_OPTIMIZATION
CvtColorLoop(src, dst, RGB2YCrCb_f<float>(scn, bidx, coeffs_f));
}
break;
-
+
case CV_YCrCb2BGR: case CV_YCrCb2RGB:
case CV_YUV2BGR: case CV_YUV2RGB:
{
CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) );
bidx = code == CV_YCrCb2BGR || code == CV_YUV2RGB ? 0 : 2;
static const float yuv_f[] = { 2.032f, -0.395f, -0.581f, 1.140f };
- static const int yuv_i[] = { 33292, -6472, -9519, 18678 };
+ static const int yuv_i[] = { 33292, -6472, -9519, 18678 };
const float* coeffs_f = code == CV_YCrCb2BGR || code == CV_YCrCb2RGB ? 0 : yuv_f;
const int* coeffs_i = code == CV_YCrCb2BGR || code == CV_YCrCb2RGB ? 0 : yuv_i;
-
+
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
if( depth == CV_8U )
CvtColorLoop(src, dst, YCrCb2RGB_i<uchar>(dcn, bidx, coeffs_i));
else if( depth == CV_16U )
CvtColorLoop(src, dst, YCrCb2RGB_f<float>(dcn, bidx, coeffs_f));
}
break;
-
+
case CV_BGR2XYZ: case CV_RGB2XYZ:
CV_Assert( scn == 3 || scn == 4 );
bidx = code == CV_BGR2XYZ ? 0 : 2;
-
+
_dst.create(sz, CV_MAKETYPE(depth, 3));
dst = _dst.getMat();
-
+
if( depth == CV_8U )
CvtColorLoop(src, dst, RGB2XYZ_i<uchar>(scn, bidx, 0));
else if( depth == CV_16U )
else
CvtColorLoop(src, dst, RGB2XYZ_f<float>(scn, bidx, 0));
break;
-
+
case CV_XYZ2BGR: case CV_XYZ2RGB:
if( dcn <= 0 ) dcn = 3;
CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) );
bidx = code == CV_XYZ2BGR ? 0 : 2;
-
+
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
if( depth == CV_8U )
CvtColorLoop(src, dst, XYZ2RGB_i<uchar>(dcn, bidx, 0));
else if( depth == CV_16U )
else
CvtColorLoop(src, dst, XYZ2RGB_f<float>(dcn, bidx, 0));
break;
-
+
case CV_BGR2HSV: case CV_RGB2HSV: case CV_BGR2HSV_FULL: case CV_RGB2HSV_FULL:
case CV_BGR2HLS: case CV_RGB2HLS: case CV_BGR2HLS_FULL: case CV_RGB2HLS_FULL:
{
code == CV_BGR2HSV_FULL || code == CV_BGR2HLS_FULL ? 0 : 2;
int hrange = depth == CV_32F ? 360 : code == CV_BGR2HSV || code == CV_RGB2HSV ||
code == CV_BGR2HLS || code == CV_RGB2HLS ? 180 : 256;
-
+
_dst.create(sz, CV_MAKETYPE(depth, 3));
dst = _dst.getMat();
-
+
if( code == CV_BGR2HSV || code == CV_RGB2HSV ||
code == CV_BGR2HSV_FULL || code == CV_RGB2HSV_FULL )
{
#ifdef HAVE_TEGRA_OPTIMIZATION
- if(tegra::cvtRGB2HSV(src, dst, bidx, hrange))
+ if(tegra::cvtRGB2HSV(src, dst, bidx, hrange))
break;
#endif
if( depth == CV_8U )
}
}
break;
-
+
case CV_HSV2BGR: case CV_HSV2RGB: case CV_HSV2BGR_FULL: case CV_HSV2RGB_FULL:
case CV_HLS2BGR: case CV_HLS2RGB: case CV_HLS2BGR_FULL: case CV_HLS2RGB_FULL:
{
code == CV_HSV2BGR_FULL || code == CV_HLS2BGR_FULL ? 0 : 2;
int hrange = depth == CV_32F ? 360 : code == CV_HSV2BGR || code == CV_HSV2RGB ||
code == CV_HLS2BGR || code == CV_HLS2RGB ? 180 : 255;
-
+
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
if( code == CV_HSV2BGR || code == CV_HSV2RGB ||
code == CV_HSV2BGR_FULL || code == CV_HSV2RGB_FULL )
{
}
}
break;
-
+
case CV_BGR2Lab: case CV_RGB2Lab: case CV_LBGR2Lab: case CV_LRGB2Lab:
case CV_BGR2Luv: case CV_RGB2Luv: case CV_LBGR2Luv: case CV_LRGB2Luv:
{
code == CV_LBGR2Lab || code == CV_LBGR2Luv ? 0 : 2;
bool srgb = code == CV_BGR2Lab || code == CV_RGB2Lab ||
code == CV_BGR2Luv || code == CV_RGB2Luv;
-
+
_dst.create(sz, CV_MAKETYPE(depth, 3));
dst = _dst.getMat();
-
+
if( code == CV_BGR2Lab || code == CV_RGB2Lab ||
code == CV_LBGR2Lab || code == CV_LRGB2Lab )
{
}
}
break;
-
+
case CV_Lab2BGR: case CV_Lab2RGB: case CV_Lab2LBGR: case CV_Lab2LRGB:
case CV_Luv2BGR: case CV_Luv2RGB: case CV_Luv2LBGR: case CV_Luv2LRGB:
{
code == CV_Lab2LBGR || code == CV_Luv2LBGR ? 0 : 2;
bool srgb = code == CV_Lab2BGR || code == CV_Lab2RGB ||
code == CV_Luv2BGR || code == CV_Luv2RGB;
-
+
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
if( code == CV_Lab2BGR || code == CV_Lab2RGB ||
code == CV_Lab2LBGR || code == CV_Lab2LRGB )
{
}
}
break;
-
+
case CV_BayerBG2GRAY: case CV_BayerGB2GRAY: case CV_BayerRG2GRAY: case CV_BayerGR2GRAY:
if(dcn <= 0) dcn = 1;
CV_Assert( scn == 1 && dcn == 1 );
-
+
_dst.create(sz, depth);
dst = _dst.getMat();
-
+
if( depth == CV_8U )
Bayer2Gray_<uchar, SIMDBayerInterpolator_8u>(src, dst, code);
else if( depth == CV_16U )
else
CV_Error(CV_StsUnsupportedFormat, "Bayer->Gray demosaicing only supports 8u and 16u types");
break;
-
+
case CV_BayerBG2BGR: case CV_BayerGB2BGR: case CV_BayerRG2BGR: case CV_BayerGR2BGR:
case CV_BayerBG2BGR_VNG: case CV_BayerGB2BGR_VNG: case CV_BayerRG2BGR_VNG: case CV_BayerGR2BGR_VNG:
if(dcn <= 0) dcn = 3;
CV_Assert( scn == 1 && dcn == 3 );
-
+
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
if( code == CV_BayerBG2BGR || code == CV_BayerGB2BGR ||
code == CV_BayerRG2BGR || code == CV_BayerGR2BGR )
{
{
// http://www.fourcc.org/yuv.php#NV21 == yuv420sp -> a plane of 8 bit Y samples followed by an interleaved V/U plane containing 8 bit 2x2 subsampled chroma samples
// http://www.fourcc.org/yuv.php#NV12 -> a plane of 8 bit Y samples followed by an interleaved U/V plane containing 8 bit 2x2 subsampled colour difference samples
-
+
if (dcn <= 0) dcn = (code==CV_YUV420sp2BGRA || code==CV_YUV420sp2RGBA || code==CV_YUV2BGRA_NV12 || code==CV_YUV2RGBA_NV12) ? 4 : 3;
- const int bidx = (code==CV_YUV2BGR_NV21 || code==CV_YUV2BGRA_NV21 || code==CV_YUV2BGR_NV12 || code==CV_YUV2BGRA_NV12) ? 0 : 2;
- const int uidx = (code==CV_YUV2BGR_NV21 || code==CV_YUV2BGRA_NV21 || code==CV_YUV2RGB_NV21 || code==CV_YUV2RGBA_NV21) ? 1 : 0;
-
+ const int bIdx = (code==CV_YUV2BGR_NV21 || code==CV_YUV2BGRA_NV21 || code==CV_YUV2BGR_NV12 || code==CV_YUV2BGRA_NV12) ? 0 : 2;
+ const int uIdx = (code==CV_YUV2BGR_NV21 || code==CV_YUV2BGRA_NV21 || code==CV_YUV2RGB_NV21 || code==CV_YUV2RGBA_NV21) ? 1 : 0;
+
CV_Assert( dcn == 3 || dcn == 4 );
CV_Assert( sz.width % 2 == 0 && sz.height % 3 == 0 && depth == CV_8U );
-
+
Size dstSz(sz.width, sz.height * 2 / 3);
_dst.create(dstSz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
int srcstep = (int)src.step;
const uchar* y = src.ptr();
const uchar* uv = y + srcstep * dstSz.height;
-
- switch(dcn*100 + bidx * 10 + uidx)
+
+ switch(dcn*100 + bIdx * 10 + uIdx)
{
case 300: cvtYUV420sp2RGB<0, 0> (dst, srcstep, y, uv); break;
case 301: cvtYUV420sp2RGB<0, 1> (dst, srcstep, y, uv); break;
{
//http://www.fourcc.org/yuv.php#YV12 == yuv420p -> It comprises an NxM Y plane followed by (N/2)x(M/2) V and U planes.
//http://www.fourcc.org/yuv.php#IYUV == I420 -> It comprises an NxN Y plane followed by (N/2)x(N/2) U and V planes
-
+
if (dcn <= 0) dcn = (code==CV_YUV2BGRA_YV12 || code==CV_YUV2RGBA_YV12 || code==CV_YUV2RGBA_IYUV || code==CV_YUV2BGRA_IYUV) ? 4 : 3;
- const int bidx = (code==CV_YUV2BGR_YV12 || code==CV_YUV2BGRA_YV12 || code==CV_YUV2BGR_IYUV || code==CV_YUV2BGRA_IYUV) ? 0 : 2;
- const int uidx = (code==CV_YUV2BGR_YV12 || code==CV_YUV2RGB_YV12 || code==CV_YUV2BGRA_YV12 || code==CV_YUV2RGBA_YV12) ? 1 : 0;
-
+ const int bIdx = (code==CV_YUV2BGR_YV12 || code==CV_YUV2BGRA_YV12 || code==CV_YUV2BGR_IYUV || code==CV_YUV2BGRA_IYUV) ? 0 : 2;
+ const int uIdx = (code==CV_YUV2BGR_YV12 || code==CV_YUV2RGB_YV12 || code==CV_YUV2BGRA_YV12 || code==CV_YUV2RGBA_YV12) ? 1 : 0;
+
CV_Assert( dcn == 3 || dcn == 4 );
CV_Assert( sz.width % 2 == 0 && sz.height % 3 == 0 && depth == CV_8U );
Size dstSz(sz.width, sz.height * 2 / 3);
_dst.create(dstSz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
int srcstep = (int)src.step;
const uchar* y = src.ptr();
const uchar* u = y + srcstep * dstSz.height;
const uchar* v = y + srcstep * (dstSz.height + dstSz.height/4) + (dstSz.width/2) * ((dstSz.height % 4)/2);
-
+
int ustepIdx = 0;
int vstepIdx = dstSz.height % 4 == 2 ? 1 : 0;
-
- if(uidx == 1) { std::swap(u ,v), std::swap(ustepIdx, vstepIdx); };
-
- switch(dcn*10 + bidx)
+
+ if(uIdx == 1) { std::swap(u ,v), std::swap(ustepIdx, vstepIdx); };
+
+ switch(dcn*10 + bIdx)
{
case 30: cvtYUV420p2RGB<0>(dst, srcstep, y, u, v, ustepIdx, vstepIdx); break;
case 32: cvtYUV420p2RGB<2>(dst, srcstep, y, u, v, ustepIdx, vstepIdx); break;
case CV_YUV2GRAY_420:
{
if (dcn <= 0) dcn = 1;
-
+
CV_Assert( dcn == 1 );
CV_Assert( sz.width % 2 == 0 && sz.height % 3 == 0 && depth == CV_8U );
Size dstSz(sz.width, sz.height * 2 / 3);
_dst.create(dstSz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
-
+
src(Range(0, dstSz.height), Range::all()).copyTo(dst);
}
break;
{
//http://www.fourcc.org/yuv.php#UYVY
//http://www.fourcc.org/yuv.php#YUY2
- //http://www.fourcc.org/yuv.php#YVYU
-
+ //http://www.fourcc.org/yuv.php#YVYU
+
if (dcn <= 0) dcn = (code==CV_YUV2RGBA_UYVY || code==CV_YUV2BGRA_UYVY || code==CV_YUV2RGBA_YUY2 || code==CV_YUV2BGRA_YUY2 || code==CV_YUV2RGBA_YVYU || code==CV_YUV2BGRA_YVYU) ? 4 : 3;
- const int bidx = (code==CV_YUV2BGR_UYVY || code==CV_YUV2BGRA_UYVY || code==CV_YUV2BGR_YUY2 || code==CV_YUV2BGRA_YUY2 || code==CV_YUV2BGR_YVYU || code==CV_YUV2BGRA_YVYU) ? 0 : 2;
- const int ycn = (code==CV_YUV2RGB_UYVY || code==CV_YUV2BGR_UYVY || code==CV_YUV2RGBA_UYVY || code==CV_YUV2BGRA_UYVY) ? 1 : 0;
- const int uidx = (code==CV_YUV2RGB_YVYU || code==CV_YUV2BGR_YVYU || code==CV_YUV2RGBA_YVYU || code==CV_YUV2BGRA_YVYU) ? 1 : 0;
-
+ const int bIdx = (code==CV_YUV2BGR_UYVY || code==CV_YUV2BGRA_UYVY || code==CV_YUV2BGR_YUY2 || code==CV_YUV2BGRA_YUY2 || code==CV_YUV2BGR_YVYU || code==CV_YUV2BGRA_YVYU) ? 0 : 2;
+ const int ycn = (code==CV_YUV2RGB_UYVY || code==CV_YUV2BGR_UYVY || code==CV_YUV2RGBA_UYVY || code==CV_YUV2BGRA_UYVY) ? 1 : 0;
+ const int uIdx = (code==CV_YUV2RGB_YVYU || code==CV_YUV2BGR_YVYU || code==CV_YUV2RGBA_YVYU || code==CV_YUV2BGRA_YVYU) ? 1 : 0;
+
CV_Assert( dcn == 3 || dcn == 4 );
CV_Assert( scn == 2 && depth == CV_8U );
_dst.create(sz, CV_8UC(dcn));
dst = _dst.getMat();
-
- switch(dcn*1000 + bidx*100 + uidx*10 + ycn)
+
+ switch(dcn*1000 + bIdx*100 + uIdx*10 + ycn)
{
case 3000: cvtYUV422toRGB<0,0,0>(dst, (int)src.step, src.ptr<uchar>()); break;
case 3001: cvtYUV422toRGB<0,0,1>(dst, (int)src.step, src.ptr<uchar>()); break;
case CV_YUV2GRAY_UYVY: case CV_YUV2GRAY_YUY2:
{
if (dcn <= 0) dcn = 1;
-
+
CV_Assert( dcn == 1 );
CV_Assert( scn == 2 && depth == CV_8U );
CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" );
}
}
-
+
CV_IMPL void
cvCvtColor( const CvArr* srcarr, CvArr* dstarr, int code )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0;
CV_Assert( src.depth() == dst.depth() );
-
+
cv::cvtColor(src, dst, code, dst.channels());
CV_Assert( dst.data == dst0.data );
}
/*
Various border types, image boundaries are denoted with '|'
-
+
* BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh
* BORDER_REFLECT: fedcba|abcdefgh|hgfedcb
* BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba
- * BORDER_WRAP: cdefgh|abcdefgh|abcdefg
+ * BORDER_WRAP: cdefgh|abcdefgh|abcdefg
* BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i'
*/
int cv::borderInterpolate( int p, int len, int borderType )
wholeSize = Size(-1,-1);
}
-
+
FilterEngine::FilterEngine( const Ptr<BaseFilter>& _filter2D,
const Ptr<BaseRowFilter>& _rowFilter,
init(_filter2D, _rowFilter, _columnFilter, _srcType, _dstType, _bufType,
_rowBorderType, _columnBorderType, _borderValue);
}
-
+
FilterEngine::~FilterEngine()
{
}
_srcType = CV_MAT_TYPE(_srcType);
_bufType = CV_MAT_TYPE(_bufType);
_dstType = CV_MAT_TYPE(_dstType);
-
+
srcType = _srcType;
int srcElemSize = (int)getElemSize(srcType);
dstType = _dstType;
bufType = _bufType;
-
+
filter2D = _filter2D;
rowFilter = _rowFilter;
columnFilter = _columnFilter;
if( _columnBorderType < 0 )
_columnBorderType = _rowBorderType;
-
+
rowBorderType = _rowBorderType;
columnBorderType = _columnBorderType;
-
+
CV_Assert( columnBorderType != BORDER_WRAP );
-
+
if( isSeparable() )
{
CV_Assert( !rowFilter.empty() && !columnFilter.empty() );
CV_Assert( 0 <= anchor.x && anchor.x < ksize.width &&
0 <= anchor.y && anchor.y < ksize.height );
- borderElemSize = srcElemSize/(CV_MAT_DEPTH(srcType) >= CV_32S ? sizeof(int) : 1);
+ borderElemSize = srcElemSize/(CV_MAT_DEPTH(srcType) >= CV_32S ? sizeof(int) : 1);
int borderLength = std::max(ksize.width - 1, 1);
borderTab.resize(borderLength*borderElemSize);
int FilterEngine::start(Size _wholeSize, Rect _roi, int _maxBufRows)
{
int i, j;
-
+
wholeSize = _wholeSize;
roi = _roi;
CV_Assert( roi.x >= 0 && roi.y >= 0 && roi.width >= 0 && roi.height >= 0 &&
int n = (int)constBorderValue.size(), N;
N = (maxWidth + ksize.width - 1)*esz;
tdst = isSeparable() ? &srcRow[0] : dst;
-
+
for( i = 0; i < N; i += n )
{
n = std::min( n, N - i );
if( isSeparable() )
(*rowFilter)(&srcRow[0], dst, maxWidth, cn);
}
-
+
int maxBufStep = bufElemSize*(int)alignSize(maxWidth +
(!isSeparable() ? ksize.width - 1 : 0),VEC_ALIGN);
ringBuf.resize(maxBufStep*rows.size()+VEC_ALIGN);
else
{
int xofs1 = std::min(roi.x, anchor.x) - roi.x;
-
+
int btab_esz = borderElemSize, wholeWidth = wholeSize.width;
int* btab = (int*)&borderTab[0];
-
+
for( i = 0; i < dx1; i++ )
{
int p0 = (borderInterpolate(i-dx1, wholeWidth, rowBorderType) + xofs1)*btab_esz;
bool isolated, int maxBufRows)
{
Rect srcRoi = _srcRoi;
-
+
if( srcRoi == Rect(0,0,-1,-1) )
srcRoi = Rect(0,0,src.cols,src.rows);
-
+
CV_Assert( srcRoi.x >= 0 && srcRoi.y >= 0 &&
srcRoi.width >= 0 && srcRoi.height >= 0 &&
srcRoi.x + srcRoi.width <= src.cols &&
srcRoi.y + srcRoi.height <= src.rows );
Point ofs;
- Size wholeSize(src.cols, src.rows);
+ Size wsz(src.cols, src.rows);
if( !isolated )
- src.locateROI( wholeSize, ofs );
- start( wholeSize, srcRoi + ofs, maxBufRows );
+ src.locateROI( wsz, ofs );
+ start( wsz, srcRoi + ofs, maxBufRows );
return startY - ofs.y;
}
uchar* dst, int dststep )
{
CV_Assert( wholeSize.width > 0 && wholeSize.height > 0 );
-
+
const int *btab = &borderTab[0];
int esz = (int)getElemSize(srcType), btab_esz = borderElemSize;
uchar** brows = &rows[0];
int bi = (startY - startY0 + rowCount) % bufRows;
uchar* brow = alignPtr(&ringBuf[0], VEC_ALIGN) + bi*bufStep;
uchar* row = isSep ? &srcRow[0] : brow;
-
+
if( ++rowCount > bufRows )
{
--rowCount;
row[i + (width1 - _dx2)*esz] = src[btab[i+_dx1*esz]];
}
}
-
+
if( isSep )
(*rowFilter)(row, brow, width, CV_MAT_CN(srcType));
}
const Rect& _srcRoi, Point dstOfs, bool isolated)
{
CV_Assert( src.type() == srcType && dst.type() == dstType );
-
+
Rect srcRoi = _srcRoi;
if( srcRoi == Rect(0,0,-1,-1) )
srcRoi = Rect(0,0,src.cols,src.rows);
-
+
if( srcRoi.area() == 0 )
return;
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
-
+
int i = 0, k, _ksize = kernel.rows + kernel.cols - 1;
int* dst = (int*)_dst;
const int* _kx = (const int*)kernel.data;
s2 = _mm_add_epi32(s2, _mm_unpacklo_epi16(x2, x3));
s3 = _mm_add_epi32(s3, _mm_unpackhi_epi16(x2, x3));
}
-
+
_mm_store_si128((__m128i*)(dst + i), s0);
_mm_store_si128((__m128i*)(dst + i + 4), s1);
_mm_store_si128((__m128i*)(dst + i + 8), s2);
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
-
+
int i = 0, j, k, _ksize = kernel.rows + kernel.cols - 1;
int* dst = (int*)_dst;
bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0;
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
-
+
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = (const float*)kernel.data + ksize2;
int i = 0, k;
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
-
+
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = (const float*)kernel.data + ksize2;
int i = 0;
Mat kernel;
};
-
+
/////////////////////////////////////// 16s //////////////////////////////////
-
+
struct RowVec_16s32f
{
RowVec_16s32f() {}
kernel = _kernel;
sse2_supported = checkHardwareSupport(CV_CPU_SSE2);
}
-
+
int operator()(const uchar* _src, uchar* _dst, int width, int cn) const
{
if( !sse2_supported )
return 0;
-
+
int i = 0, k, _ksize = kernel.rows + kernel.cols - 1;
float* dst = (float*)_dst;
const float* _kx = (const float*)kernel.data;
width *= cn;
-
+
for( ; i <= width - 8; i += 8 )
{
const short* src = (const short*)_src + i;
{
f = _mm_load_ss(_kx+k);
f = _mm_shuffle_ps(f, f, 0);
-
+
__m128i x0i = _mm_loadu_si128((const __m128i*)src);
__m128i x1i = _mm_srai_epi32(_mm_unpackhi_epi16(x0i, x0i), 16);
x0i = _mm_srai_epi32(_mm_unpacklo_epi16(x0i, x0i), 16);
}
return i;
}
-
+
Mat kernel;
bool sse2_supported;
};
-
-
+
+
struct SymmColumnVec_32f16s
{
SymmColumnVec_32f16s() { symmetryType=0; }
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 );
sse2_supported = checkHardwareSupport(CV_CPU_SSE2);
}
-
+
int operator()(const uchar** _src, uchar* _dst, int width) const
{
if( !sse2_supported )
return 0;
-
+
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = (const float*)kernel.data + ksize2;
int i = 0, k;
const float *S, *S2;
short* dst = (short*)_dst;
__m128 d4 = _mm_set1_ps(delta);
-
+
if( symmetrical )
{
for( ; i <= width - 16; i += 16 )
s3 = _mm_load_ps(S+12);
s2 = _mm_add_ps(_mm_mul_ps(s2, f), d4);
s3 = _mm_add_ps(_mm_mul_ps(s3, f), d4);
-
+
for( k = 1; k <= ksize2; k++ )
{
S = src[k] + i;
s2 = _mm_add_ps(s2, _mm_mul_ps(x0, f));
s3 = _mm_add_ps(s3, _mm_mul_ps(x1, f));
}
-
+
__m128i s0i = _mm_cvtps_epi32(s0);
__m128i s1i = _mm_cvtps_epi32(s1);
__m128i s2i = _mm_cvtps_epi32(s2);
__m128i s3i = _mm_cvtps_epi32(s3);
-
+
_mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0i, s1i));
_mm_storeu_si128((__m128i*)(dst + i + 8), _mm_packs_epi32(s2i, s3i));
}
-
+
for( ; i <= width - 4; i += 4 )
{
__m128 f = _mm_load_ss(ky);
f = _mm_shuffle_ps(f, f, 0);
__m128 x0, s0 = _mm_load_ps(src[0] + i);
s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4);
-
+
for( k = 1; k <= ksize2; k++ )
{
f = _mm_load_ss(ky+k);
x0 = _mm_add_ps(_mm_load_ps(src[k]+i), _mm_load_ps(src[-k] + i));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
}
-
+
__m128i s0i = _mm_cvtps_epi32(s0);
_mm_storel_epi64((__m128i*)(dst + i), _mm_packs_epi32(s0i, s0i));
}
__m128 f, s0 = d4, s1 = d4, s2 = d4, s3 = d4;
__m128 x0, x1;
S = src[0] + i;
-
+
for( k = 1; k <= ksize2; k++ )
{
S = src[k] + i;
s2 = _mm_add_ps(s2, _mm_mul_ps(x0, f));
s3 = _mm_add_ps(s3, _mm_mul_ps(x1, f));
}
-
+
__m128i s0i = _mm_cvtps_epi32(s0);
__m128i s1i = _mm_cvtps_epi32(s1);
__m128i s2i = _mm_cvtps_epi32(s2);
__m128i s3i = _mm_cvtps_epi32(s3);
-
+
_mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0i, s1i));
_mm_storeu_si128((__m128i*)(dst + i + 8), _mm_packs_epi32(s2i, s3i));
}
-
+
for( ; i <= width - 4; i += 4 )
{
__m128 f, x0, s0 = d4;
-
+
for( k = 1; k <= ksize2; k++ )
{
f = _mm_load_ss(ky+k);
x0 = _mm_sub_ps(_mm_load_ps(src[k]+i), _mm_load_ps(src[-k] + i));
s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f));
}
-
+
__m128i s0i = _mm_cvtps_epi32(s0);
_mm_storel_epi64((__m128i*)(dst + i), _mm_packs_epi32(s0i, s0i));
}
}
-
+
return i;
}
-
+
int symmetryType;
float delta;
Mat kernel;
bool sse2_supported;
-};
-
+};
+
/////////////////////////////////////// 32f //////////////////////////////////
{
if( !checkHardwareSupport(CV_CPU_SSE) )
return 0;
-
+
int i = 0, k, _ksize = kernel.rows + kernel.cols - 1;
float* dst = (float*)_dst;
const float* _kx = (const float*)kernel.data;
{
if( !checkHardwareSupport(CV_CPU_SSE) )
return 0;
-
+
int i = 0, _ksize = kernel.rows + kernel.cols - 1;
float* dst = (float*)_dst;
const float* src = (const float*)_src + (_ksize/2)*cn;
y0 = _mm_mul_ps(_mm_add_ps(y0, y2), k1);
x0 = _mm_add_ps(x0, _mm_mul_ps(x1, k0));
y0 = _mm_add_ps(y0, _mm_mul_ps(y1, k0));
-
+
x2 = _mm_add_ps(_mm_loadu_ps(src + cn*2), _mm_loadu_ps(src - cn*2));
y2 = _mm_add_ps(_mm_loadu_ps(src + cn*2 + 4), _mm_loadu_ps(src - cn*2 + 4));
x0 = _mm_add_ps(x0, _mm_mul_ps(x2, k2));
y0 = _mm_add_ps(y0, _mm_mul_ps(y2, k2));
-
+
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
x0 = _mm_mul_ps(_mm_sub_ps(x0, x2), k1);
y0 = _mm_mul_ps(_mm_sub_ps(y0, y2), k1);
-
+
x2 = _mm_sub_ps(_mm_loadu_ps(src + cn*2), _mm_loadu_ps(src - cn*2));
y2 = _mm_sub_ps(_mm_loadu_ps(src + cn*2 + 4), _mm_loadu_ps(src - cn*2 + 4));
x0 = _mm_add_ps(x0, _mm_mul_ps(x2, k2));
y0 = _mm_add_ps(y0, _mm_mul_ps(y2, k2));
-
+
_mm_store_ps(dst + i, x0);
_mm_store_ps(dst + i + 4, y0);
}
{
if( !checkHardwareSupport(CV_CPU_SSE) )
return 0;
-
+
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = (const float*)kernel.data + ksize2;
int i = 0, k;
{
if( !checkHardwareSupport(CV_CPU_SSE) )
return 0;
-
+
int ksize2 = (kernel.rows + kernel.cols - 1)/2;
const float* ky = (const float*)kernel.data + ksize2;
int i = 0;
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
-
+
const float* kf = (const float*)&coeffs[0];
int i = 0, k, nz = _nz;
__m128 d4 = _mm_set1_ps(delta);
{
if( !checkHardwareSupport(CV_CPU_SSE2) )
return 0;
-
+
const float* kf = (const float*)&coeffs[0];
short* dst = (short*)_dst;
int i = 0, k, nz = _nz;
{
if( !checkHardwareSupport(CV_CPU_SSE) )
return 0;
-
+
const float* kf = (const float*)&coeffs[0];
const float** src = (const float**)_src;
float* dst = (float*)_dst;
(kernel.rows == 1 || kernel.cols == 1));
vecOp = _vecOp;
}
-
+
void operator()(const uchar* src, uchar* dst, int width, int cn)
{
int _ksize = ksize;
s0 += f*S[0]; s1 += f*S[1];
s2 += f*S[2]; s3 += f*S[3];
}
-
+
D[i] = s0; D[i+1] = s1;
D[i+2] = s2; D[i+3] = s3;
}
symmetryType = _symmetryType;
CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 && this->ksize <= 5 );
}
-
+
void operator()(const uchar* src, uchar* dst, int width, int cn)
{
int ksize2 = this->ksize/2, ksize2n = ksize2*cn;
{
typedef typename CastOp::type1 ST;
typedef typename CastOp::rtype DT;
-
+
ColumnFilter( const Mat& _kernel, int _anchor,
double _delta, const CastOp& _castOp=CastOp(),
const VecOp& _vecOp=VecOp() )
{
DT* D = (DT*)dst;
i = vecOp(src, dst, width);
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST f = ky[0];
{
typedef typename CastOp::type1 ST;
typedef typename CastOp::rtype DT;
-
+
SymmColumnSmallFilter( const Mat& _kernel, int _anchor,
double _delta, int _symmetryType,
const CastOp& _castOp=CastOp(),
{
if( is_1_2_1 )
{
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST s0 = S0[i] + S1[i]*2 + S2[i] + _delta;
D[i+3] = castOp(s1);
}
#else
- for( ; i < width; i ++ )
+ for( ; i < width; i ++ )
{
ST s0 = S0[i] + S1[i]*2 + S2[i] + _delta;
D[i] = castOp(s0);
}
else if( is_1_m2_1 )
{
- #if CV_ENABLE_UNROLLED
+ #if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
ST s0 = S0[i] - S1[i]*2 + S2[i] + _delta;
D[i+3] = castOp(s1);
}
#else
- for( ; i < width; i ++ )
+ for( ; i < width; i ++ )
{
ST s0 = S0[i] - S1[i]*2 + S2[i] + _delta;
D[i] = castOp(s0);
D[i+3] = castOp(s1);
}
#else
- for( ; i < width; i ++ )
+ for( ; i < width; i ++ )
{
ST s0 = S2[i] - S0[i] + _delta;
D[i] = castOp(s0);
};
}
-
+
cv::Ptr<cv::BaseRowFilter> cv::getLinearRowFilter( int srcType, int bufType,
InputArray _kernel, int anchor,
int symmetryType )
return Ptr<BaseRowFilter>(new SymmRowSmallFilter<float, float, SymmRowSmallVec_32f>
(kernel, anchor, symmetryType, SymmRowSmallVec_32f(kernel, symmetryType)));
}
-
+
if( sdepth == CV_8U && ddepth == CV_32S )
return Ptr<BaseRowFilter>(new RowFilter<uchar, int, RowVec_8u32s>
(kernel, anchor, RowVec_8u32s(kernel)));
cv::Ptr<cv::BaseColumnFilter> cv::getLinearColumnFilter( int bufType, int dstType,
InputArray _kernel, int anchor,
- int symmetryType, double delta,
+ int symmetryType, double delta,
int bits )
{
Mat kernel = _kernel.getMat();
{
typedef typename CastOp::type1 KT;
typedef typename CastOp::rtype DT;
-
+
Filter2D( const Mat& _kernel, Point _anchor,
double _delta, const CastOp& _castOp=CastOp(),
const VecOp& _vecOp=VecOp() )
kernel = _kernel;
else
_kernel.convertTo(kernel, kdepth, _kernel.type() == CV_32S ? 1./(1 << bits) : 1.);
-
+
if( sdepth == CV_8U && ddepth == CV_8U )
return Ptr<BaseFilter>(new Filter2D<uchar, Cast<float, uchar>, FilterVec_8u>
(kernel, anchor, delta, Cast<float, uchar>(), FilterVec_8u(kernel, 0, delta)));
{
Mat _kernel = filter_kernel.getMat();
_srcType = CV_MAT_TYPE(_srcType);
- _dstType = CV_MAT_TYPE(_dstType);
+ _dstType = CV_MAT_TYPE(_dstType);
int cn = CV_MAT_CN(_srcType);
CV_Assert( cn == CV_MAT_CN(_dstType) );
int bits = 0;
/*int sdepth = CV_MAT_DEPTH(_srcType), ddepth = CV_MAT_DEPTH(_dstType);
- int ktype = _kernel.depth() == CV_32S ? KERNEL_INTEGER : getKernelType(_kernel, _anchor);
+ int ktype = _kernel.depth() == CV_32S ? KERNEL_INTEGER : getKernelType(_kernel, _anchor);
if( sdepth == CV_8U && (ddepth == CV_8U || ddepth == CV_16S) &&
_kernel.rows*_kernel.cols <= (1 << 10) )
{
bits = (ktype & KERNEL_INTEGER) ? 0 : 11;
_kernel.convertTo(kernel, CV_32S, 1 << bits);
}*/
-
+
Ptr<BaseFilter> _filter2D = getLinearFilter(_srcType, _dstType,
kernel, _anchor, _delta, bits);
double delta, int borderType )
{
Mat src = _src.getMat(), kernel = _kernel.getMat();
-
+
if( ddepth < 0 )
ddepth = src.depth();
double delta, int borderType )
{
Mat src = _src.getMat(), kernelX = _kernelX.getMat(), kernelY = _kernelY.getMat();
-
+
if( ddepth < 0 )
ddepth = src.depth();
typedef DiffC1<float> Diff32fC1;
typedef DiffC3<cv::Vec3f> Diff32fC3;
-cv::Vec3i& operator += (cv::Vec3i& a, const cv::Vec3b& b)
+static cv::Vec3i& operator += (cv::Vec3i& a, const cv::Vec3b& b)
{
a[0] += b[0];
a[1] += b[1];
{
cv::Ptr<CvMat> tempMask;
cv::AutoBuffer<CvFFillSegment> buffer;
-
+
if( comp )
memset( comp, 0, sizeof(*comp) );
{
/*int elem_size = CV_ELEM_SIZE(type);
const uchar* seed_ptr = img->data.ptr + img->step*seed_point.y + elem_size*seed_point.x;
-
+
// check if the new value is different from the current value at the seed point.
// if they are exactly the same, use the generic version with mask to avoid infinite loops.
for( i = 0; i < elem_size; i++ )
if( seed_ptr[i] != ((uchar*)nv_buf)[i] )
break;
-
+
if( i == elem_size )
return;*/
-
+
if( type == CV_8UC1 )
icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, nv_buf.b[0],
comp, flags, buffer, buffer_size);
}
int cv::floodFill( InputOutputArray _image, InputOutputArray _mask,
- Point seedPoint, Scalar newVal, Rect* rect,
+ Point seedPoint, Scalar newVal, Rect* rect,
Scalar loDiff, Scalar upDiff, int flags )
{
CvConnectedComp ccomp;
int ts;
int dist;
TWeight weight;
- uchar t;
+ uchar t;
};
class Edge
{
v->t = v->weight < 0;
}
else
- v->parent = 0;
+ v->parent = 0;
}
first = first->next;
last->next = nilNode;
curr_ts++;
while( !orphans.empty() )
{
- Vtx* v = orphans.back();
+ Vtx* v2 = orphans.back();
orphans.pop_back();
int d, minDist = INT_MAX;
e0 = 0;
- vt = v->t;
+ vt = v2->t;
- for( ei = v->first; ei != 0; ei = edgePtr[ei].next )
+ for( ei = v2->first; ei != 0; ei = edgePtr[ei].next )
{
if( edgePtr[ei^(vt^1)].weight == 0 )
continue;
}
}
- if( (v->parent = e0) > 0 )
+ if( (v2->parent = e0) > 0 )
{
- v->ts = curr_ts;
- v->dist = minDist;
+ v2->ts = curr_ts;
+ v2->dist = minDist;
continue;
}
/* no parent is found */
- v->ts = 0;
- for( ei = v->first; ei != 0; ei = edgePtr[ei].next )
+ v2->ts = 0;
+ for( ei = v2->first; ei != 0; ei = edgePtr[ei].next )
{
u = vtxPtr+edgePtr[ei].dst;
ej = u->parent;
u->next = nilNode;
last = last->next = u;
}
- if( ej > 0 && vtxPtr+edgePtr[ej].dst == v )
+ if( ej > 0 && vtxPtr+edgePtr[ej].dst == v2 )
{
orphans.push_back(u);
u->parent = ORPHAN;
Calculate beta - parameter of GrabCut algorithm.
beta = 1/(2*avg(sqr(||color[i] - color[j]||)))
*/
-double calcBeta( const Mat& img )
+static double calcBeta( const Mat& img )
{
double beta = 0;
for( int y = 0; y < img.rows; y++ )
Calculate weights of noterminal vertices of graph.
beta and gamma - parameters of GrabCut algorithm.
*/
-void calcNWeights( const Mat& img, Mat& leftW, Mat& upleftW, Mat& upW, Mat& uprightW, double beta, double gamma )
+static void calcNWeights( const Mat& img, Mat& leftW, Mat& upleftW, Mat& upW, Mat& uprightW, double beta, double gamma )
{
const double gammaDivSqrt2 = gamma / std::sqrt(2.0f);
leftW.create( img.rows, img.cols, CV_64FC1 );
/*
Check size, type and element values of mask matrix.
*/
-void checkMask( const Mat& img, const Mat& mask )
+static void checkMask( const Mat& img, const Mat& mask )
{
if( mask.empty() )
CV_Error( CV_StsBadArg, "mask is empty" );
/*
Initialize mask using rectangular.
*/
-void initMaskWithRect( Mat& mask, Size imgSize, Rect rect )
+static void initMaskWithRect( Mat& mask, Size imgSize, Rect rect )
{
mask.create( imgSize, CV_8UC1 );
mask.setTo( GC_BGD );
/*
Initialize GMM background and foreground models using kmeans algorithm.
*/
-void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM )
+static void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM )
{
const int kMeansItCount = 10;
const int kMeansType = KMEANS_PP_CENTERS;
/*
Assign GMMs components for each pixel.
*/
-void assignGMMsComponents( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, Mat& compIdxs )
+static void assignGMMsComponents( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, Mat& compIdxs )
{
Point p;
for( p.y = 0; p.y < img.rows; p.y++ )
/*
Learn GMMs parameters.
*/
-void learnGMMs( const Mat& img, const Mat& mask, const Mat& compIdxs, GMM& bgdGMM, GMM& fgdGMM )
+static void learnGMMs( const Mat& img, const Mat& mask, const Mat& compIdxs, GMM& bgdGMM, GMM& fgdGMM )
{
bgdGMM.initLearning();
fgdGMM.initLearning();
/*
Construct GCGraph
*/
-void constructGCGraph( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, double lambda,
+static void constructGCGraph( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, double lambda,
const Mat& leftW, const Mat& upleftW, const Mat& upW, const Mat& uprightW,
GCGraph<double>& graph )
{
/*
Estimate segmentation using MaxFlow algorithm
*/
-void estimateSegmentation( GCGraph<double>& graph, Mat& mask )
+static void estimateSegmentation( GCGraph<double>& graph, Mat& mask )
{
graph.maxFlow();
Point p;
Mat& mask = _mask.getMatRef();
Mat& bgdModel = _bgdModel.getMatRef();
Mat& fgdModel = _fgdModel.getMatRef();
-
+
if( img.empty() )
CV_Error( CV_StsBadArg, "image is empty" );
if( img.type() != CV_8UC3 )
// copy or use the software.
//
//
-// Intel License Agreement
-// For Open Source Computer Vision Library
+// Intel License Agreement
+// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
template<> void Ptr<CvHistogram>::delete_obj()
{ cvReleaseHist(&obj); }
-
+
////////////////// Helper functions //////////////////////
static const size_t OUT_OF_RANGE = (size_t)1 << (sizeof(size_t)*8 - 2);
int i, j;
_tab.resize((high-low)*dims);
size_t* tab = &_tab[0];
-
+
if( uniform )
{
for( i = 0; i < dims; i++ )
double b = uniranges[i*2+1];
int sz = !issparse ? hist.size[i] : shist.size(i);
size_t step = !issparse ? hist.step[i] : 1;
-
+
for( j = low; j < high; j++ )
{
int idx = cvFloor(j*a + b);
written_idx = idx*step;
else
written_idx = OUT_OF_RANGE;
-
+
tab[i*(high - low) + j - low] = written_idx;
}
}
int idx = -1, sz = !issparse ? hist.size[i] : shist.size(i);
size_t written_idx = OUT_OF_RANGE;
size_t step = !issparse ? hist.step[i] : 1;
-
+
for(j = low;;)
{
for( ; j < limit; j++ )
tab[i*(high - low) + j - low] = written_idx;
-
+
if( (unsigned)(++idx) < (unsigned)sz )
{
limit = std::min(cvCeil(ranges[i][idx+1]), high);
{
int i, j, c;
CV_Assert( channels != 0 || nimages == dims );
-
+
imsize = images[0].size();
int depth = images[0].depth(), esz1 = (int)images[0].elemSize1();
bool isContinuous = true;
-
+
ptrs.resize(dims + 1);
deltas.resize((dims + 1)*2);
-
+
for( i = 0; i < dims; i++ )
{
if(!channels)
break;
CV_Assert( j < nimages );
}
-
+
CV_Assert( images[j].size() == imsize && images[j].depth() == depth );
if( !images[j].isContinuous() )
isContinuous = false;
deltas[i*2] = images[j].channels();
deltas[i*2+1] = (int)(images[j].step/esz1 - imsize.width*deltas[i*2]);
}
-
+
if( mask.data )
{
CV_Assert( mask.size() == imsize && mask.channels() == 1 );
deltas[dims*2] = 1;
deltas[dims*2 + 1] = (int)(mask.step/mask.elemSize1());
}
-
+
if( isContinuous )
{
imsize.width *= imsize.height;
imsize.height = 1;
}
-
+
if( !ranges )
{
CV_Assert( depth == CV_8U );
-
+
uniranges.resize( dims*2 );
for( i = 0; i < dims; i++ )
{
{
for( i = 0; i < dims; i++ )
{
- size_t j, n = histSize[i];
- for( j = 0; j < n; j++ )
- CV_Assert( ranges[i][j] < ranges[i][j+1] );
+ size_t n = histSize[i];
+ for(size_t k = 0; k < n; k++ )
+ CV_Assert( ranges[i][k] < ranges[i][k+1] );
}
}
}
-
-
-////////////////////////////////// C A L C U L A T E H I S T O G R A M ////////////////////////////////////
-
+
+
+////////////////////////////////// C A L C U L A T E H I S T O G R A M ////////////////////////////////////
+
template<typename T> static void
calcHist_( vector<uchar*>& _ptrs, const vector<int>& _deltas,
Size imsize, Mat& hist, int dims, const float** _ranges,
int mstep = _deltas[dims*2 + 1];
int size[CV_MAX_DIM];
size_t hstep[CV_MAX_DIM];
-
+
for( i = 0; i < dims; i++ )
{
size[i] = hist.size[i];
hstep[i] = hist.step[i];
}
-
+
if( uniform )
{
const double* uniranges = &_uniranges[0];
-
+
if( dims == 1 )
{
double a = uniranges[0], b = uniranges[1];
int sz = size[0], d0 = deltas[0], step0 = deltas[1];
const T* p0 = (const T*)ptrs[0];
-
+
for( ; imsize.height--; p0 += step0, mask += mstep )
{
if( !mask )
size_t hstep0 = hstep[0];
const T* p0 = (const T*)ptrs[0];
const T* p1 = (const T*)ptrs[1];
-
+
for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep )
{
if( !mask )
size_t hstep0 = hstep[0], hstep1 = hstep[1];
const T* p0 = (const T*)ptrs[0];
const T* p1 = (const T*)ptrs[1];
- const T* p2 = (const T*)ptrs[2];
-
+ const T* p2 = (const T*)ptrs[2];
+
for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep )
{
if( !mask )
ptrs[i] += deltas[i*2];
Hptr += idx*hstep[i];
}
-
+
if( i == dims )
++*((int*)Hptr);
else
ptrs[i] += deltas[i*2];
Hptr += idx*hstep[i];
}
-
+
if( i == dims )
++*((int*)Hptr);
else
const float* ranges[CV_MAX_DIM];
for( i = 0; i < dims; i++ )
ranges[i] = &_ranges[i][0];
-
+
for( ; imsize.height--; mask += mstep )
{
for( x = 0; x < imsize.width; x++ )
{
uchar* Hptr = H;
i = 0;
-
+
if( !mask || mask[x] )
for( ; i < dims; i++ )
{
float v = (float)*ptrs[i];
const float* R = ranges[i];
int idx = -1, sz = size[i];
-
+
while( v >= R[idx+1] && ++idx < sz )
; // nop
-
+
if( (unsigned)idx >= (unsigned)sz )
break;
ptrs[i] += deltas[i*2];
Hptr += idx*hstep[i];
}
-
+
if( i == dims )
++*((int*)Hptr);
else
for( ; i < dims; i++ )
ptrs[i] += deltas[i*2];
}
-
+
for( i = 0; i < dims; i++ )
ptrs[i] += deltas[i*2 + 1];
}
- }
+ }
}
-
-
+
+
static void
calcHist_8u( vector<uchar*>& _ptrs, const vector<int>& _deltas,
Size imsize, Mat& hist, int dims, const float** _ranges,
uchar** ptrs = &_ptrs[0];
const int* deltas = &_deltas[0];
uchar* H = hist.data;
- int i, x;
+ int x;
const uchar* mask = _ptrs[dims];
int mstep = _deltas[dims*2 + 1];
vector<size_t> _tab;
-
+
calcHistLookupTables_8u( hist, SparseMat(), dims, _ranges, _uniranges, uniform, false, _tab );
const size_t* tab = &_tab[0];
-
+
if( dims == 1 )
{
int d0 = deltas[0], step0 = deltas[1];
int matH[256] = {0};
const uchar* p0 = (const uchar*)ptrs[0];
-
+
for( ; imsize.height--; p0 += step0, mask += mstep )
{
if( !mask )
}
p0 += x;
}
- else
+ else
for( x = 0; x <= imsize.width - 4; x += 4 )
{
int t0 = p0[0], t1 = p0[d0];
matH[t0]++; matH[t1]++;
p0 += d0*2;
}
-
+
for( ; x < imsize.width; x++, p0 += d0 )
matH[*p0]++;
}
if( mask[x] )
matH[*p0]++;
}
-
- for( i = 0; i < 256; i++ )
+
+ for(int i = 0; i < 256; i++ )
{
size_t hidx = tab[i];
if( hidx < OUT_OF_RANGE )
d1 = deltas[2], step1 = deltas[3];
const uchar* p0 = (const uchar*)ptrs[0];
const uchar* p1 = (const uchar*)ptrs[1];
-
+
for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep )
{
if( !mask )
int d0 = deltas[0], step0 = deltas[1],
d1 = deltas[2], step1 = deltas[3],
d2 = deltas[4], step2 = deltas[5];
-
+
const uchar* p0 = (const uchar*)ptrs[0];
const uchar* p1 = (const uchar*)ptrs[1];
const uchar* p2 = (const uchar*)ptrs[2];
-
+
for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep )
{
if( !mask )
for( x = 0; x < imsize.width; x++ )
{
uchar* Hptr = H;
- for( i = 0; i < dims; i++ )
+ int i = 0;
+ for( ; i < dims; i++ )
{
size_t idx = tab[*ptrs[i] + i*256];
if( idx >= OUT_OF_RANGE )
Hptr += idx;
ptrs[i] += deltas[i*2];
}
-
+
if( i == dims )
++*((int*)Hptr);
else
Hptr += idx;
ptrs[i] += deltas[i*2];
}
-
+
if( i == dims )
++*((int*)Hptr);
else
for( ; i < dims; i++ )
ptrs[i] += deltas[i*2];
}
- for( i = 0; i < dims; i++ )
+ for(int i = 0; i < dims; i++ )
ptrs[i] += deltas[i*2 + 1];
}
}
const float** ranges, bool uniform, bool accumulate )
{
Mat mask = _mask.getMat();
-
+
CV_Assert(dims > 0 && histSize);
-
+
uchar* histdata = _hist.getMat().data;
_hist.create(dims, histSize, CV_32F);
Mat hist = _hist.getMat(), ihist = hist;
ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK)|CV_32S;
-
+
if( !accumulate || histdata != hist.data )
hist = Scalar(0.);
else
hist.convertTo(ihist, CV_32S);
-
+
vector<uchar*> ptrs;
vector<int> deltas;
vector<double> uniranges;
Size imsize;
-
+
CV_Assert( !mask.data || mask.type() == CV_8UC1 );
histPrepareImages( images, nimages, channels, mask, dims, hist.size, ranges,
uniform, ptrs, deltas, imsize, uniranges );
const double* _uniranges = uniform ? &uniranges[0] : 0;
-
+
int depth = images[0].depth();
-
+
if( depth == CV_8U )
calcHist_8u(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform );
else if( depth == CV_16U )
calcHist_<float>(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform );
else
CV_Error(CV_StsUnsupportedFormat, "");
-
+
ihist.convertTo(hist, CV_32F);
}
int mstep = _deltas[dims*2 + 1];
const int* size = hist.hdr->size;
int idx[CV_MAX_DIM];
-
+
if( uniform )
{
const double* uniranges = &_uniranges[0];
-
+
for( ; imsize.height--; mask += mstep )
{
for( x = 0; x < imsize.width; x++ )
break;
ptrs[i] += deltas[i*2];
}
-
+
if( i == dims )
++*(int*)hist.ptr(idx, true);
else
const float* ranges[CV_MAX_DIM];
for( i = 0; i < dims; i++ )
ranges[i] = &_ranges[i][0];
-
+
for( ; imsize.height--; mask += mstep )
{
for( x = 0; x < imsize.width; x++ )
{
i = 0;
-
+
if( !mask || mask[x] )
for( ; i < dims; i++ )
{
float v = (float)*ptrs[i];
const float* R = ranges[i];
int j = -1, sz = size[i];
-
+
while( v >= R[j+1] && ++j < sz )
; // nop
-
+
if( (unsigned)j >= (unsigned)sz )
break;
- ptrs[i] += deltas[i*2];
+ ptrs[i] += deltas[i*2];
idx[i] = j;
}
-
+
if( i == dims )
++*(int*)hist.ptr(idx, true);
else
for( ; i < dims; i++ )
ptrs[i] += deltas[i*2];
}
-
+
for( i = 0; i < dims; i++ )
ptrs[i] += deltas[i*2 + 1];
}
- }
-}
+ }
+}
+
-
static void
calcSparseHist_8u( vector<uchar*>& _ptrs, const vector<int>& _deltas,
Size imsize, SparseMat& hist, int dims, const float** _ranges,
{
uchar** ptrs = (uchar**)&_ptrs[0];
const int* deltas = &_deltas[0];
- int i, x;
+ int x;
const uchar* mask = _ptrs[dims];
int mstep = _deltas[dims*2 + 1];
int idx[CV_MAX_DIM];
vector<size_t> _tab;
-
+
calcHistLookupTables_8u( Mat(), hist, dims, _ranges, _uniranges, uniform, true, _tab );
const size_t* tab = &_tab[0];
-
+
for( ; imsize.height--; mask += mstep )
{
for( x = 0; x < imsize.width; x++ )
ptrs[i] += deltas[i*2];
idx[i] = (int)hidx;
}
-
+
if( i == dims )
++*(int*)hist.ptr(idx,true);
else
for( ; i < dims; i++ )
ptrs[i] += deltas[i*2];
}
- for( i = 0; i < dims; i++ )
+ for(int i = 0; i < dims; i++ )
ptrs[i] += deltas[i*2 + 1];
}
-}
-
+}
+
static void calcHist( const Mat* images, int nimages, const int* channels,
const Mat& mask, SparseMat& hist, int dims, const int* histSize,
const float** ranges, bool uniform, bool accumulate, bool keepInt )
{
size_t i, N;
-
+
if( !accumulate )
hist.create(dims, histSize, CV_32F);
else
val->i = cvRound(val->f);
}
}
-
+
vector<uchar*> ptrs;
vector<int> deltas;
vector<double> uniranges;
Size imsize;
-
+
CV_Assert( !mask.data || mask.type() == CV_8UC1 );
histPrepareImages( images, nimages, channels, mask, dims, hist.hdr->size, ranges,
uniform, ptrs, deltas, imsize, uniranges );
const double* _uniranges = uniform ? &uniranges[0] : 0;
-
+
int depth = images[0].depth();
if( depth == CV_8U )
calcSparseHist_8u(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform );
calcSparseHist_<float>(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform );
else
CV_Error(CV_StsUnsupportedFormat, "");
-
+
if( !keepInt )
{
SparseMatIterator it = hist.begin();
}
}
}
-
+
}
-
+
void cv::calcHist( const Mat* images, int nimages, const int* channels,
InputArray _mask, SparseMat& hist, int dims, const int* histSize,
const float** ranges, bool uniform, bool accumulate )
{
int i, dims = (int)histSize.size(), rsz = (int)ranges.size(), csz = (int)channels.size();
int nimages = (int)images.total();
-
+
CV_Assert(nimages > 0 && dims > 0);
CV_Assert(rsz == dims*2 || (rsz == 0 && images.depth(0) == CV_8U));
CV_Assert(csz == 0 || csz == dims);
for( i = 0; i < rsz/2; i++ )
_ranges[i] = (float*)&ranges[i*2];
}
-
+
AutoBuffer<Mat> buf(nimages);
for( i = 0; i < nimages; i++ )
buf[i] = images.getMat(i);
-
+
calcHist(&buf[0], nimages, csz ? &channels[0] : 0,
mask, hist, dims, &histSize[0], rsz ? (const float**)_ranges : 0,
true, accumulate);
}
-/////////////////////////////////////// B A C K P R O J E C T ////////////////////////////////////
-
+/////////////////////////////////////// B A C K P R O J E C T ////////////////////////////////////
+
namespace cv
{
int bpstep = _deltas[dims*2 + 1];
int size[CV_MAX_DIM];
size_t hstep[CV_MAX_DIM];
-
+
for( i = 0; i < dims; i++ )
{
size[i] = hist.size[i];
hstep[i] = hist.step[i];
}
-
+
if( uniform )
{
const double* uniranges = &_uniranges[0];
-
+
if( dims == 1 )
{
double a = uniranges[0], b = uniranges[1];
int sz = size[0], d0 = deltas[0], step0 = deltas[1];
const T* p0 = (const T*)ptrs[0];
-
+
for( ; imsize.height--; p0 += step0, bproj += bpstep )
{
for( x = 0; x < imsize.width; x++, p0 += d0 )
size_t hstep0 = hstep[0];
const T* p0 = (const T*)ptrs[0];
const T* p1 = (const T*)ptrs[1];
-
+
for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep )
{
for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
size_t hstep0 = hstep[0], hstep1 = hstep[1];
const T* p0 = (const T*)ptrs[0];
const T* p1 = (const T*)ptrs[1];
- const T* p2 = (const T*)ptrs[2];
-
+ const T* p2 = (const T*)ptrs[2];
+
for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep )
{
for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
ptrs[i] += deltas[i*2];
Hptr += idx*hstep[i];
}
-
+
if( i == dims )
bproj[x] = saturate_cast<BT>(*(float*)Hptr*scale);
else
const float* ranges[CV_MAX_DIM];
for( i = 0; i < dims; i++ )
ranges[i] = &_ranges[i][0];
-
+
for( ; imsize.height--; bproj += bpstep )
{
for( x = 0; x < imsize.width; x++ )
float v = (float)*ptrs[i];
const float* R = ranges[i];
int idx = -1, sz = size[i];
-
+
while( v >= R[idx+1] && ++idx < sz )
; // nop
-
+
if( (unsigned)idx >= (unsigned)sz )
break;
ptrs[i] += deltas[i*2];
Hptr += idx*hstep[i];
}
-
+
if( i == dims )
bproj[x] = saturate_cast<BT>(*(float*)Hptr*scale);
else
ptrs[i] += deltas[i*2];
}
}
-
+
for( i = 0; i < dims; i++ )
ptrs[i] += deltas[i*2 + 1];
}
- }
+ }
}
uchar* bproj = _ptrs[dims];
int bpstep = _deltas[dims*2 + 1];
vector<size_t> _tab;
-
+
calcHistLookupTables_8u( hist, SparseMat(), dims, _ranges, _uniranges, uniform, false, _tab );
const size_t* tab = &_tab[0];
-
+
if( dims == 1 )
{
int d0 = deltas[0], step0 = deltas[1];
uchar matH[256] = {0};
const uchar* p0 = (const uchar*)ptrs[0];
-
+
for( i = 0; i < 256; i++ )
{
size_t hidx = tab[i];
if( hidx < OUT_OF_RANGE )
matH[i] = saturate_cast<uchar>(*(float*)(H + hidx)*scale);
}
-
+
for( ; imsize.height--; p0 += step0, bproj += bpstep )
{
if( d0 == 1 )
}
p0 += x;
}
- else
+ else
for( x = 0; x <= imsize.width - 4; x += 4 )
{
uchar t0 = matH[p0[0]], t1 = matH[p0[d0]];
bproj[x+2] = t0; bproj[x+3] = t1;
p0 += d0*2;
}
-
+
for( ; x < imsize.width; x++, p0 += d0 )
bproj[x] = matH[*p0];
}
d1 = deltas[2], step1 = deltas[3];
const uchar* p0 = (const uchar*)ptrs[0];
const uchar* p1 = (const uchar*)ptrs[1];
-
+
for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep )
{
for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
const uchar* p0 = (const uchar*)ptrs[0];
const uchar* p1 = (const uchar*)ptrs[1];
const uchar* p2 = (const uchar*)ptrs[2];
-
+
for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep )
{
for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
ptrs[i] += deltas[i*2];
Hptr += idx;
}
-
+
if( i == dims )
bproj[x] = saturate_cast<uchar>(*(float*)Hptr*scale);
else
ptrs[i] += deltas[i*2 + 1];
}
}
-}
+}
}
-
+
void cv::calcBackProject( const Mat* images, int nimages, const int* channels,
InputArray _hist, OutputArray _backProject,
const float** ranges, double scale, bool uniform )
vector<double> uniranges;
Size imsize;
int dims = hist.dims == 2 && hist.size[1] == 1 ? 1 : hist.dims;
-
+
CV_Assert( dims > 0 && hist.data );
_backProject.create( images[0].size(), images[0].depth() );
Mat backProject = _backProject.getMat();
histPrepareImages( images, nimages, channels, backProject, dims, hist.size, ranges,
uniform, ptrs, deltas, imsize, uniranges );
const double* _uniranges = uniform ? &uniranges[0] : 0;
-
+
int depth = images[0].depth();
if( depth == CV_8U )
calcBackProj_8u(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform);
const int* size = hist.hdr->size;
int idx[CV_MAX_DIM];
const SparseMat_<float>& hist_ = (const SparseMat_<float>&)hist;
-
+
if( uniform )
{
const double* uniranges = &_uniranges[0];
break;
ptrs[i] += deltas[i*2];
}
-
+
if( i == dims )
bproj[x] = saturate_cast<BT>(hist_(idx)*scale);
else
const float* ranges[CV_MAX_DIM];
for( i = 0; i < dims; i++ )
ranges[i] = &_ranges[i][0];
-
+
for( ; imsize.height--; bproj += bpstep )
{
for( x = 0; x < imsize.width; x++ )
float v = (float)*ptrs[i];
const float* R = ranges[i];
int j = -1, sz = size[i];
-
+
while( v >= R[j+1] && ++j < sz )
; // nop
-
+
if( (unsigned)j >= (unsigned)sz )
break;
idx[i] = j;
ptrs[i] += deltas[i*2];
}
-
+
if( i == dims )
bproj[x] = saturate_cast<BT>(hist_(idx)*scale);
else
ptrs[i] += deltas[i*2];
}
}
-
+
for( i = 0; i < dims; i++ )
ptrs[i] += deltas[i*2 + 1];
}
- }
+ }
}
int bpstep = _deltas[dims*2 + 1];
vector<size_t> _tab;
int idx[CV_MAX_DIM];
-
+
calcHistLookupTables_8u( Mat(), hist, dims, _ranges, _uniranges, uniform, true, _tab );
const size_t* tab = &_tab[0];
-
+
for( ; imsize.height--; bproj += bpstep )
{
for( x = 0; x < imsize.width; x++ )
idx[i] = (int)hidx;
ptrs[i] += deltas[i*2];
}
-
+
if( i == dims )
bproj[x] = saturate_cast<uchar>(hist.value<float>(idx)*scale);
else
for( i = 0; i < dims; i++ )
ptrs[i] += deltas[i*2 + 1];
}
-}
+}
}
vector<double> uniranges;
Size imsize;
int dims = hist.dims();
-
+
CV_Assert( dims > 0 );
_backProject.create( images[0].size(), images[0].depth() );
Mat backProject = _backProject.getMat();
dims, hist.hdr->size, ranges,
uniform, ptrs, deltas, imsize, uniranges );
const double* _uniranges = uniform ? &uniranges[0] : 0;
-
+
int depth = images[0].depth();
if( depth == CV_8U )
calcSparseBackProj_8u(ptrs, deltas, imsize, hist, dims, ranges,
CV_Error(CV_StsUnsupportedFormat, "");
}
-
+
void cv::calcBackProject( InputArrayOfArrays images, const vector<int>& channels,
InputArray hist, OutputArray dst,
const vector<float>& ranges,
for( i = 0; i < rsz/2; i++ )
_ranges[i] = (float*)&ranges[i*2];
}
-
+
AutoBuffer<Mat> buf(nimages);
for( i = 0; i < nimages; i++ )
buf[i] = images.getMat(i);
-
+
calcBackProject(&buf[0], nimages, csz ? &channels[0] : 0,
hist, dst, rsz ? (const float**)_ranges : 0, scale, true);
}
-
-////////////////// C O M P A R E H I S T O G R A M S ////////////////////////
+
+////////////////// C O M P A R E H I S T O G R A M S ////////////////////////
double cv::compareHist( InputArray _H1, InputArray _H2, int method )
{
NAryMatIterator it(arrays, planes);
double result = 0;
int j, len = (int)it.size;
-
+
CV_Assert( H1.type() == H2.type() && H1.type() == CV_32F );
-
+
double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0;
-
+
CV_Assert( it.planes[0].isContinuous() && it.planes[1].isContinuous() );
-
+
for( size_t i = 0; i < it.nplanes; i++, ++it )
{
const float* h1 = (const float*)it.planes[0].data;
const float* h2 = (const float*)it.planes[1].data;
len = it.planes[0].rows*it.planes[0].cols;
-
+
if( method == CV_COMP_CHISQR )
{
for( j = 0; j < len; j++ )
{
double a = h1[j];
double b = h2[j];
-
+
s12 += a*b;
s1 += a;
s11 += a*a;
else
CV_Error( CV_StsBadArg, "Unknown comparison method" );
}
-
+
if( method == CV_COMP_CORREL )
{
size_t total = H1.total();
s1 = fabs(s1) > FLT_EPSILON ? 1./std::sqrt(s1) : 1.;
result = std::sqrt(std::max(1. - result*s1, 0.));
}
-
+
return result;
}
-
+
double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method )
{
double result = 0;
int i, dims = H1.dims();
-
+
CV_Assert( dims > 0 && dims == H2.dims() && H1.type() == H2.type() && H1.type() == CV_32F );
for( i = 0; i < dims; i++ )
CV_Assert( H1.size(i) == H2.size(i) );
-
+
const SparseMat *PH1 = &H1, *PH2 = &H2;
if( PH1->nzcount() > PH2->nzcount() && method != CV_COMP_CHISQR )
std::swap(PH1, PH2);
-
+
SparseMatConstIterator it = PH1->begin();
int N1 = (int)PH1->nzcount(), N2 = (int)PH2->nzcount();
-
+
if( method == CV_COMP_CHISQR )
{
for( i = 0; i < N1; i++, ++it )
else if( method == CV_COMP_CORREL )
{
double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0;
-
+
for( i = 0; i < N1; i++, ++it )
{
double v1 = it.value<float>();
s1 += v1;
s11 += v1*v1;
}
-
+
it = PH2->begin();
for( i = 0; i < N2; i++, ++it )
{
s2 += v2;
s22 += v2*v2;
}
-
+
size_t total = 1;
for( i = 0; i < H1.dims(); i++ )
total *= H1.size(i);
else if( method == CV_COMP_BHATTACHARYYA )
{
double s1 = 0, s2 = 0;
-
+
for( i = 0; i < N1; i++, ++it )
{
double v1 = it.value<float>();
result += std::sqrt(v1*v2);
s1 += v1;
}
-
+
it = PH2->begin();
for( i = 0; i < N2; i++, ++it )
s2 += it.value<float>();
-
+
s1 *= s2;
s1 = fabs(s1) > FLT_EPSILON ? 1./std::sqrt(s1) : 1.;
result = std::sqrt(std::max(1. - result*s1, 0.));
}
else
CV_Error( CV_StsBadArg, "Unknown comparison method" );
-
+
return result;
}
-
+
const int CV_HIST_DEFAULT_TYPE = CV_32F;
/* Creates new histogram */
hist = (CvHistogram *)cvAlloc( sizeof( CvHistogram ));
hist->type = CV_HIST_MAGIC_VAL + ((int)type & 1);
- if (uniform) hist->type|= CV_HIST_UNIFORM_FLAG;
+ if (uniform) hist->type|= CV_HIST_UNIFORM_FLAG;
hist->thresh2 = 0;
hist->bins = 0;
if( type == CV_HIST_ARRAY )
cvReleaseData( temp->bins );
temp->bins = 0;
}
-
+
if( temp->thresh2 )
cvFree( &temp->thresh2 );
cvFree( &temp );
CvSparseMat* mat = (CvSparseMat*)hist->bins;
CvSparseMatIterator iterator;
CvSparseNode *node;
-
+
for( node = cvInitSparseMatIterator( mat, &iterator );
node != 0; node = cvGetNextSparseNode( &iterator ))
{
CvSparseMatIterator iterator;
CvSparseNode *node;
float scale;
-
+
for( node = cvInitSparseMatIterator( mat, &iterator );
node != 0; node = cvGetNextSparseNode( &iterator ))
{
int* idx_min, int* idx_max )
{
double minVal, maxVal;
- int i, dims, size[CV_MAX_DIM];
+ int dims, size[CV_MAX_DIM];
if( !CV_IS_HIST(hist) )
CV_Error( CV_StsBadArg, "Invalid histogram header" );
{
int imin = minPt.y*mat.cols + minPt.x;
int imax = maxPt.y*mat.cols + maxPt.x;
- int i;
-
- for( i = dims - 1; i >= 0; i-- )
+
+ for(int i = dims - 1; i >= 0; i-- )
{
if( idx_min )
{
minVal = maxVal = 0;
}
- for( i = 0; i < dims; i++ )
+ for(int i = 0; i < dims; i++ )
{
if( idx_min )
idx_min[i] = _idx_min ? _idx_min[i] : -1;
{
int i;
int size1[CV_MAX_DIM], size2[CV_MAX_DIM], total = 1;
-
+
if( !CV_IS_HIST(hist1) || !CV_IS_HIST(hist2) )
CV_Error( CV_StsBadArg, "Invalid histogram header[s]" );
cv::Mat H1((const CvMatND*)hist1->bins), H2((const CvMatND*)hist2->bins);
return cv::compareHist(H1, H2, method);
}
-
+
int dims1 = cvGetDims( hist1->bins, size1 );
int dims2 = cvGetDims( hist2->bins, size2 );
-
+
if( dims1 != dims2 )
CV_Error( CV_StsUnmatchedSizes,
"The histograms have different numbers of dimensions" );
-
+
for( i = 0; i < dims1; i++ )
{
if( size1[i] != size2[i] )
double s2 = 0, s22 = 0;
double s12 = 0;
double num, denom2, scale = 1./total;
-
+
for( node1 = cvInitSparseMatIterator( mat1, &iterator );
node1 != 0; node1 = cvGetNextSparseNode( &iterator ))
{
else if( method == CV_COMP_BHATTACHARYYA )
{
double s1 = 0, s2 = 0;
-
+
for( node1 = cvInitSparseMatIterator( mat1, &iterator );
node1 != 0; node1 = cvGetNextSparseNode( &iterator ))
{
}
else
CV_Error( CV_StsBadArg, "Unknown comparison method" );
-
+
return result;
}
float* ranges[CV_MAX_DIM];
float** thresh = 0;
CvHistogram* dst;
-
+
if( !_dst )
CV_Error( CV_StsNullPtr, "Destination double pointer is NULL" );
if( dst && is_sparse == CV_IS_SPARSE_MAT(dst->bins))
{
dims2 = cvGetDims( dst->bins, size2 );
-
+
if( dims1 == dims2 )
{
for( i = 0; i < dims1; i++ )
dims = cvGetDims( hist->bins, size );
for( i = 0; i < dims; i++ )
total += size[i]+1;
-
+
if( uniform )
{
for( i = 0; i < dims; i++ )
if( !ranges[i] )
CV_Error( CV_StsNullPtr, "One of <ranges> elements is NULL" );
-
+
for( j = 0; j <= size[i]; j++ )
{
float val = ranges[i][j];
int size[CV_MAX_DIM];
int i, dims = cvGetDims( hist->bins, size);
bool uniform = CV_IS_UNIFORM_HIST(hist);
-
+
cv::vector<cv::Mat> images(dims);
for( i = 0; i < dims; i++ )
images[i] = cv::cvarrToMat(img[i]);
-
+
cv::Mat _mask;
if( mask )
_mask = cv::cvarrToMat(mask);
-
+
const float* uranges[CV_MAX_DIM] = {0};
const float** ranges = 0;
-
+
if( hist->type & CV_HIST_RANGES_FLAG )
{
ranges = (const float**)hist->thresh2;
ranges = uranges;
}
}
-
+
if( !CV_IS_SPARSE_HIST(hist) )
{
cv::Mat H((const CvMatND*)hist->bins);
else
{
CvSparseMat* sparsemat = (CvSparseMat*)hist->bins;
-
+
if( !accumulate )
cvZero( hist->bins );
cv::SparseMat sH(sparsemat);
cv::calcHist( &images[0], (int)images.size(), 0, _mask, sH, sH.dims(),
sH.dims() > 0 ? sH.hdr->size : 0, ranges, uniform, accumulate != 0, true );
-
+
if( accumulate )
cvZero( sparsemat );
-
+
cv::SparseMatConstIterator it = sH.begin();
int nz = (int)sH.nzcount();
for( i = 0; i < nz; i++, ++it )
int size[CV_MAX_DIM];
int i, dims = cvGetDims( hist->bins, size );
-
+
bool uniform = CV_IS_UNIFORM_HIST(hist);
const float* uranges[CV_MAX_DIM] = {0};
const float** ranges = 0;
-
+
if( hist->type & CV_HIST_RANGES_FLAG )
{
ranges = (const float**)hist->thresh2;
ranges = uranges;
}
}
-
+
cv::vector<cv::Mat> images(dims);
for( i = 0; i < dims; i++ )
images[i] = cv::cvarrToMat(img[i]);
-
+
cv::Mat _dst = cv::cvarrToMat(dst);
-
+
CV_Assert( _dst.size() == images[0].size() && _dst.depth() == images[0].depth() );
-
+
if( !CV_IS_SPARSE_HIST(hist) )
{
cv::Mat H((const CvMatND*)hist->bins);
cvCalcBayesianProb( CvHistogram** src, int count, CvHistogram** dst )
{
int i;
-
+
if( !src || !dst )
CV_Error( CV_StsNullPtr, "NULL histogram array pointer" );
if( count < 2 )
CV_Error( CV_StsOutOfRange, "Too small number of histograms" );
-
+
for( i = 0; i < count; i++ )
{
if( !CV_IS_HIST(src[i]) || !CV_IS_HIST(dst[i]) )
if( !CV_IS_MATND(src[i]->bins) || !CV_IS_MATND(dst[i]->bins) )
CV_Error( CV_StsBadArg, "The function supports dense histograms only" );
}
-
+
cvZero( dst[0]->bins );
// dst[0] = src[0] + ... + src[count-1]
for( i = 0; i < count; i++ )
{
CvMat sstub, *src = cvGetMat(srcarr, &sstub);
CvMat dstub, *dst = cvGetMat(dstarr, &dstub);
-
+
CV_Assert( CV_ARE_SIZES_EQ(src, dst) && CV_ARE_TYPES_EQ(src, dst) &&
CV_MAT_TYPE(src->type) == CV_8UC1 );
CvSize size = cvGetMatSize(src);
const int hist_sz = 256;
int hist[hist_sz];
memset(hist, 0, sizeof(hist));
-
+
for( y = 0; y < size.height; y++ )
{
const uchar* sptr = src->data.ptr + src->step*y;
for( x = 0; x < size.width; x++ )
hist[sptr[x]]++;
}
-
+
float scale = 255.f/(size.width*size.height);
int sum = 0;
uchar lut[hist_sz+1];
h->mat.refcount = mat->refcount;
// increase refcount so freeing temp header doesn't free data
- cvIncRefData( mat );
+ cvIncRefData( mat );
// free temporary header
cvReleaseMatND( &mat );
int step, width, height;
int numangle, numrho;
int total = 0;
- float ang;
- int r, n;
int i, j;
float irho = 1 / rho;
double scale;
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
-
+
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
- for( ang = 0, n = 0; n < numangle; ang += theta, n++ )
+ float ang = 0;
+ for(int n = 0; n < numangle; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
- for( n = 0; n < numangle; n++ )
+ for(int n = 0; n < numangle; n++ )
{
- r = cvRound( j * tabCos[n] + i * tabSin[n] );
+ int r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
accum[(n+1) * (numrho+2) + r+1]++;
}
}
// stage 2. find local maximums
- for( r = 0; r < numrho; r++ )
- for( n = 0; n < numangle; n++ )
+ for(int r = 0; r < numrho; r++ )
+ for(int n = 0; n < numangle; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if( accum[base] > threshold &&
* Multi-Scale variant of Classical Hough Transform *
\****************************************************************************************/
-#if defined _MSC_VER && _MSC_VER >= 1200
-#pragma warning( disable: 4714 )
-#endif
-
//DECLARE_AND_IMPLEMENT_LIST( _index, h_ );
IMPLEMENT_LIST( _index, h_ )
/* Precalculating sin */
_sinTable.resize( 5 * tn * stn );
sinTable = &_sinTable[0];
-
+
for( index = 0; index < 5 * tn * stn; index++ )
sinTable[index] = (float)cos( stheta * index * 0.2f );
h_get_next__index( &pos );
}
}
-
+
h_destroy_list__index(list);
}
// choose random point out of the remaining ones
int idx = cvRandInt(&rng) % count;
int max_val = threshold-1, max_n = 0;
- CvPoint* pt = (CvPoint*)cvGetSeqElem( seq, idx );
+ CvPoint* point = (CvPoint*)cvGetSeqElem( seq, idx );
CvPoint line_end[2] = {{0,0}, {0,0}};
float a, b;
int* adata = (int*)accum.data;
int good_line;
const int shift = 16;
- i = pt->y;
- j = pt->x;
+ i = point->y;
+ j = point->x;
// "remove" it by overriding it with the last element
- *pt = *(CvPoint*)cvGetSeqElem( seq, count-1 );
+ *point = *(CvPoint*)cvGetSeqElem( seq, count-1 );
// check if it has been excluded already (i.e. belongs to some other line)
if( !mdata0[i*width + j] )
}
else
CV_Error( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );
-
+
iparam1 = cvRound(param1);
iparam2 = cvRound(param2);
acols = accum->cols - 2;
adata = accum->data.i;
astep = accum->step/sizeof(adata[0]);
- // Accumulate circle evidence for each edge pixel
+ // Accumulate circle evidence for each edge pixel
for( y = 0; y < rows; y++ )
{
const uchar* edges_row = edges->data.ptr + y*edges->step;
for( x = 0; x < cols; x++ )
{
float vx, vy;
- int sx, sy, x0, y0, x1, y1, r, k;
+ int sx, sy, x0, y0, x1, y1, r;
CvPoint pt;
vx = dx_row[x];
x0 = cvRound((x*idp)*ONE);
y0 = cvRound((y*idp)*ONE);
- // Step from min_radius to max_radius in both directions of the gradient
- for( k = 0; k < 2; k++ )
+ // Step from min_radius to max_radius in both directions of the gradient
+ for(int k1 = 0; k1 < 2; k1++ )
{
x1 = x0 + min_radius * sx;
y1 = y0 + min_radius * sy;
nz_count = nz->total;
if( !nz_count )
return;
- //Find possible circle centers
+ //Find possible circle centers
for( y = 1; y < arows - 1; y++ )
{
for( x = 1; x < acols - 1; x++ )
dr = dp;
min_dist = MAX( min_dist, dp );
min_dist *= min_dist;
- // For each found possible center
- // Estimate radius and check support
+ // For each found possible center
+ // Estimate radius and check support
for( i = 0; i < centers->total; i++ )
{
int ofs = *(int*)cvGetSeqElem( centers, i );
y = ofs/(acols+2);
x = ofs - (y)*(acols+2);
- //Calculate circle's center in pixels
+ //Calculate circle's center in pixels
float cx = (float)((x + 0.5f)*dp), cy = (float)(( y + 0.5f )*dp);
float start_dist, dist_sum;
- float r_best = 0, c[3];
+ float r_best = 0;
int max_count = 0;
- // Check distance with previously detected circles
+ // Check distance with previously detected circles
for( j = 0; j < circles->total; j++ )
{
float* c = (float*)cvGetSeqElem( circles, j );
if( j < circles->total )
continue;
- // Estimate best radius
+ // Estimate best radius
cvStartReadSeq( nz, &reader );
for( j = k = 0; j < nz_count; j++ )
{
{
float r_cur = ddata[sort_buf[(j + start_idx)/2]];
if( (start_idx - j)*r_best >= max_count*r_cur ||
- (r_best < FLT_EPSILON && start_idx - j >= max_count) )
+ (r_best < FLT_EPSILON && start_idx - j >= max_count) )
{
r_best = r_cur;
max_count = start_idx - j;
}
dist_sum += d;
}
- // Check if the circle has enough support
+ // Check if the circle has enough support
if( max_count > acc_threshold )
{
+ float c[3];
c[0] = cx;
c[1] = cy;
c[2] = (float)r_best;
else
_arr.release();
}
-
+
}
-
+
void cv::HoughLines( InputArray _image, OutputArray _lines,
double rho, double theta, int threshold,
double srn, double stn )
static const double cs[][2]=
{{1, 0}, {-s45, -s45}, {0, 1}, {s45, -s45}, {-1, 0}, {s45, s45}, {0, -1}, {-s45, s45}};
- int i;
if( x < FLT_EPSILON )
{
for( int i = 0; i < 8; i++ )
float sum = 0;
double y0=-(x+3)*CV_PI*0.25, s0 = sin(y0), c0=cos(y0);
- for( i = 0; i < 8; i++ )
+ for(int i = 0; i < 8; i++ )
{
double y = -(x+3-i)*CV_PI*0.25;
coeffs[i] = (float)((cs[i][0]*s0 + cs[i][1]*c0)/(y*y));
}
sum = 1.f/sum;
- for( i = 0; i < 8; i++ )
+ for(int i = 0; i < 8; i++ )
coeffs[i] *= sum;
}
const T* srows[MAX_ESIZE]={0};
WT* rows[MAX_ESIZE]={0};
int prev_sy[MAX_ESIZE];
- int k, dy;
+ int dy;
xmin *= cn;
xmax *= cn;
HResize hresize;
VResize vresize;
- for( k = 0; k < ksize; k++ )
+ for(int k = 0; k < ksize; k++ )
{
prev_sy[k] = -1;
rows[k] = (WT*)_buffer + bufstep*k;
// image resize is a separable operation. In case of not too strong
for( dy = 0; dy < dsize.height; dy++, beta += ksize )
{
- int sy0 = yofs[dy], k, k0=ksize, k1=0, ksize2 = ksize/2;
+ int sy0 = yofs[dy], k0=ksize, k1=0, ksize2 = ksize/2;
- for( k = 0; k < ksize; k++ )
+ for(int k = 0; k < ksize; k++ )
{
int sy = clip(sy0 - ksize2 + 1 + k, 0, ssize.height);
for( k1 = std::max(k1, k); k1 < ksize; k1++ )
assert( k < ssize.width*2 );
xofs[k].di = dx*cn;
xofs[k].si = sx*cn;
- xofs[k++].alpha = 1.f / min(scale_x, src.cols - fsx1);
+ xofs[k++].alpha = float(1.0 / min(scale_x, src.cols - fsx1));
}
if( fsx2 - sx2 > 1e-3 )
for( i = 0; i < 8; i++, w += 8 )
{
int yi = y[i];
- const T* S = S0 + yi*sstep;
+ const T* S1 = S0 + yi*sstep;
if( yi < 0 )
continue;
if( x[0] >= 0 )
- sum += (S[x[0]] - cv)*w[0];
+ sum += (S1[x[0]] - cv)*w[0];
if( x[1] >= 0 )
- sum += (S[x[1]] - cv)*w[1];
+ sum += (S1[x[1]] - cv)*w[1];
if( x[2] >= 0 )
- sum += (S[x[2]] - cv)*w[2];
+ sum += (S1[x[2]] - cv)*w[2];
if( x[3] >= 0 )
- sum += (S[x[3]] - cv)*w[3];
+ sum += (S1[x[3]] - cv)*w[3];
if( x[4] >= 0 )
- sum += (S[x[4]] - cv)*w[4];
+ sum += (S1[x[4]] - cv)*w[4];
if( x[5] >= 0 )
- sum += (S[x[5]] - cv)*w[5];
+ sum += (S1[x[5]] - cv)*w[5];
if( x[6] >= 0 )
- sum += (S[x[6]] - cv)*w[6];
+ sum += (S1[x[6]] - cv)*w[6];
if( x[7] >= 0 )
- sum += (S[x[7]] - cv)*w[7];
+ sum += (S1[x[7]] - cv)*w[7];
}
D[k] = castOp(sum);
}
remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
else
{
- Mat matA(bh, bw, CV_16U, A);
- remap( src, dpart, _XY, matA, interpolation, borderType, borderValue );
+ Mat _matA(bh, bw, CV_16U, A);
+ remap( src, dpart, _XY, _matA, interpolation, borderType, borderValue );
}
}
}
remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
else
{
- Mat matA(bh, bw, CV_16U, A);
- remap( src, dpart, _XY, matA, interpolation, borderType, borderValue );
+ Mat _matA(bh, bw, CV_16U, A);
+ remap( src, dpart, _XY, _matA, interpolation, borderType, borderValue );
}
}
}
yi_1 = ((CvPoint2D32f*)(reader.ptr))->y;
}
CV_NEXT_SEQ_ELEM( contour->elem_size, reader );
-
+
xi_12 = xi_1 * xi_1;
yi_12 = yi_1 * yi_1;
const T* ptr = (const T*)(img.data + y*img.step);
WT x0 = 0, x1 = 0, x2 = 0;
MT x3 = 0;
-
+
for( x = 0; x < size.width; x++ )
{
WT p = ptr[x];
typedef int WT;
typedef int MT;
cv::Size size = img.size();
- int x, y;
+ int y;
MT mom[10] = {0,0,0,0,0,0,0,0,0,0};
bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2);
-
+
for( y = 0; y < size.height; y++ )
{
const T* ptr = img.ptr<T>(y);
int x0 = 0, x1 = 0, x2 = 0, x3 = 0, x = 0;
-
+
if( useSIMD )
{
__m128i qx_init = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
__m128i dx = _mm_set1_epi16(8);
__m128i z = _mm_setzero_si128(), qx0 = z, qx1 = z, qx2 = z, qx3 = z, qx = qx_init;
-
+
for( ; x <= size.width - 8; x += 8 )
{
__m128i p = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr + x)), z);
qx1 = _mm_add_epi32(qx1, _mm_madd_epi16(p, qx));
qx2 = _mm_add_epi32(qx2, _mm_madd_epi16(p, sx));
qx3 = _mm_add_epi32(qx3, _mm_madd_epi16(px, sx));
-
+
qx = _mm_add_epi16(qx, dx);
}
int CV_DECL_ALIGNED(16) buf[4];
_mm_store_si128((__m128i*)buf, qx0);
x0 = buf[0] + buf[1] + buf[2] + buf[3];
_mm_store_si128((__m128i*)buf, qx1);
- x1 = buf[0] + buf[1] + buf[2] + buf[3];
+ x1 = buf[0] + buf[1] + buf[2] + buf[3];
_mm_store_si128((__m128i*)buf, qx2);
x2 = buf[0] + buf[1] + buf[2] + buf[3];
_mm_store_si128((__m128i*)buf, qx3);
x3 = buf[0] + buf[1] + buf[2] + buf[3];
}
-
+
for( ; x < size.width; x++ )
{
WT p = ptr[x];
WT xp = x * p, xxp;
-
+
x0 += p;
x1 += xp;
xxp = xp * x;
x2 += xxp;
x3 += xxp * x;
}
-
+
WT py = y * x0, sy = y*y;
-
+
mom[9] += ((MT)py) * sy; // m03
mom[8] += ((MT)x1) * sy; // m12
mom[7] += ((MT)x2) * y; // m21
mom[1] += x1; // m10
mom[0] += x0; // m00
}
-
- for( x = 0; x < 10; x++ )
+
+ for(int x = 0; x < 10; x++ )
moments[x] = (double)mom[x];
}
type = CV_MAT_TYPE( mat->type );
depth = CV_MAT_DEPTH( type );
cn = CV_MAT_CN( type );
-
+
cv::Size size = cvGetMatSize( mat );
if( cn > 1 && coi == 0 )
func = momentsInTile<double, double, double>;
else
CV_Error( CV_StsUnsupportedFormat, "" );
-
+
cv::Mat src0(mat);
for( int y = 0; y < size.height; y += TILE_SIZE )
{
cv::Size tileSize;
tileSize.height = std::min(TILE_SIZE, size.height - y);
-
+
for( int x = 0; x < size.width; x += TILE_SIZE )
{
tileSize.width = std::min(TILE_SIZE, size.width - x);
cv::compare( src, 0, tmp, CV_CMP_NE );
src = tmp;
}
-
+
double mom[10];
func( src, mom );
-
+
if(binary)
{
double s = 1./255;
for( int k = 0; k < 10; k++ )
mom[k] *= s;
}
-
+
double xm = x * mom[0], ym = y * mom[0];
- // accumulate moments computed in each tile
+ // accumulate moments computed in each tile
// + m00 ( = m00' )
moments->m00 += mom[0];
// + m21 ( = m21' + x*(2*m11' + 2*y*m10' + x*m01' + x*y*m00') + y*m20')
moments->m21 += mom[7] + x * (2 * (mom[4] + y * mom[1]) + x * (mom[2] + ym)) + y * mom[3];
-
+
// + m12 ( = m12' + y*(2*m11' + 2*x*m01' + y*m10' + x*y*m00') + x*m02')
moments->m12 += mom[8] + y * (2 * (mom[4] + x * mom[2]) + y * (mom[1] + xm)) + x * mom[5];
return m;
}
-
+
}
-
+
cv::Moments cv::moments( InputArray _array, bool binaryImage )
{
CvMoments om;
merge(planes, out);
}
-Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weightBoxSize)
+static Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weightBoxSize)
{
Mat src = _src.getMat();
-
+
int type = src.type();
CV_Assert( type == CV_32FC1 || type == CV_64FC1 );
-
+
int minr = peakLocation.y - (weightBoxSize.height >> 1);
int maxr = peakLocation.y + (weightBoxSize.height >> 1);
int minc = peakLocation.x - (weightBoxSize.width >> 1);
int maxc = peakLocation.x + (weightBoxSize.width >> 1);
-
+
Point2d centroid;
double sumIntensity = 0.0;
-
+
// clamp the values to min and max if needed.
if(minr < 0)
{
minr = 0;
}
-
+
if(minc < 0)
{
minc = 0;
}
-
+
if(maxr > src.rows - 1)
{
maxr = src.rows - 1;
}
-
+
if(maxc > src.cols - 1)
{
maxc = src.cols - 1;
}
-
+
if(type == CV_32FC1)
{
const float* dataIn = (const float*)src.data;
centroid.y += (double)y*dataIn[x];
sumIntensity += (double)dataIn[x];
}
-
+
dataIn += src.cols;
}
}
centroid.y += (double)y*dataIn[x];
sumIntensity += dataIn[x];
}
-
+
dataIn += src.cols;
}
}
-
+
sumIntensity += DBL_EPSILON; // prevent div0 problems...
-
+
centroid.x /= sumIntensity;
centroid.y /= sumIntensity;
-
+
return centroid;
}
-
+
}
cv::Point2d cv::phaseCorrelate(InputArray _src1, InputArray _src2, InputArray _window)
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if defined _MSC_VER && _MSC_VER >= 1200
- // disable warnings related to inline functions
- #pragma warning( disable: 4251 4711 4710 4514 )
-#endif
-
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
// Parameters:
// points - convex hull vertices ( any orientation )
// n - number of vertices
-// mode - concrete application of algorithm
-// can be CV_CALIPERS_MAXDIST or
-// CV_CALIPERS_MINAREARECT
+// mode - concrete application of algorithm
+// can be CV_CALIPERS_MAXDIST or
+// CV_CALIPERS_MINAREARECT
// left, bottom, right, top - indexes of extremal points
// out - output info.
-// In case CV_CALIPERS_MAXDIST it points to float value -
+// In case CV_CALIPERS_MAXDIST it points to float value -
// maximal height of polygon.
// In case CV_CALIPERS_MINAREARECT
-// ((CvPoint2D32f*)out)[0] - corner
+// ((CvPoint2D32f*)out)[0] - corner
// ((CvPoint2D32f*)out)[1] - vector1
// ((CvPoint2D32f*)out)[0] - corner2
-//
+//
// ^
// |
// vector2 |
{
float minarea = FLT_MAX;
float max_dist = 0;
- char buffer[32];
+ char buffer[32] = {};
int i, k;
CvPoint2D32f* vect = (CvPoint2D32f*)cvAlloc( n * sizeof(vect[0]) );
float* inv_vect_length = (float*)cvAlloc( n * sizeof(inv_vect_length[0]) );
int left = 0, bottom = 0, right = 0, top = 0;
int seq[4] = { -1, -1, -1, -1 };
- /* rotating calipers sides will always have coordinates
- (a,b) (-b,a) (-a,-b) (b, -a)
+ /* rotating calipers sides will always have coordinates
+ (a,b) (-b,a) (-a,-b) (b, -a)
*/
/* this is a first base bector (a,b) initialized by (1,0) */
float orientation = 0;
float left_x, right_x, top_y, bottom_y;
CvPoint2D32f pt0 = points[0];
-
+
left_x = right_x = pt0.x;
top_y = bottom_y = pt0.y;
-
+
for( i = 0; i < n; i++ )
{
double dx, dy;
-
+
if( pt0.x < left_x )
left_x = pt0.x, left = i;
bottom_y = pt0.y, bottom = i;
CvPoint2D32f pt = points[(i+1) & (i+1 < n ? -1 : 0)];
-
+
dx = pt.x - pt0.x;
dy = pt.y - pt0.y;
{
double ax = vect[n-1].x;
double ay = vect[n-1].y;
-
+
for( i = 0; i < n; i++ )
{
double bx = vect[i].x;
base_b = lead_y;
break;
case 1:
- base_a = lead_y;
+ base_a = lead_y;
base_b = -lead_x;
break;
case 2:
break;
default: assert(0);
}
- }
+ }
/* change base point of main edge */
seq[main_element] += 1;
seq[main_element] = (seq[main_element] == n) ? 0 : seq[main_element];
-
+
switch (mode)
{
case CV_CALIPERS_MAXHEIGHT:
CvBox2D box;
cv::AutoBuffer<CvPoint2D32f> _points;
CvPoint2D32f* points;
-
+
memset(&box, 0, sizeof(box));
int i, n;
CV_READ_SEQ_ELEM( points[i], reader );
}
}
-
+
if( n > 2 )
{
icvRotatingCalipers( points, n, CV_CALIPERS_MINAREARECT, (float*)out );
int i, j = 0, count;
const int N = 16;
float buf[N];
- CvMat buffer = cvMat( 1, N, CV_32F, buf );
+ CvMat buffer = cvMat( 1, N, CV_32F, buf );
CvSeqReader reader;
CvContour contour_header;
CvSeq* contour = 0;
if( contour->total > 1 )
{
int is_float = CV_SEQ_ELTYPE( contour ) == CV_32FC2;
-
+
cvStartReadSeq( contour, &reader, 0 );
cvSetSeqReaderPos( &reader, slice.start_index );
count = cvSliceLength( slice, contour );
CV_NEXT_SEQ_ELEM( contour->elem_size, reader );
// Bugfix by Axel at rubico.com 2010-03-22, affects closed slices only
// wraparound not handled by CV_NEXT_SEQ_ELEM
- if( is_closed && i == count - 2 )
+ if( is_closed && i == count - 2 )
cvSetSeqReaderPos( &reader, slice.start_index );
buffer.data.fl[j] = dx * dx + dy * dy;
*_radius = 0;
CvSeqReader reader;
- int i, k, count;
+ int k, count;
CvPoint2D32f pts[8];
CvContour contour_header;
CvSeqBlock block;
pt_left = pt_right = pt_top = pt_bottom = (CvPoint *)(reader.ptr);
CV_READ_SEQ_ELEM( pt, reader );
- for( i = 1; i < count; i++ )
+ for(int i = 1; i < count; i++ )
{
CvPoint* pt_ptr = (CvPoint*)reader.ptr;
CV_READ_SEQ_ELEM( pt, reader );
pt_left = pt_right = pt_top = pt_bottom = (CvPoint2D32f *) (reader.ptr);
CV_READ_SEQ_ELEM( pt, reader );
- for( i = 1; i < count; i++ )
+ for(int i = 1; i < count; i++ )
{
CvPoint2D32f* pt_ptr = (CvPoint2D32f*)reader.ptr;
CV_READ_SEQ_ELEM( pt, reader );
for( k = 0; k < max_iters; k++ )
{
double min_delta = 0, delta;
- CvPoint2D32f ptfl, farAway = { 0, 0};
- /*only for first iteration because the alg is repared at the loop's foot*/
- if(k==0)
- icvFindEnslosingCicle4pts_32f( pts, ¢er, &radius );
+ CvPoint2D32f ptfl, farAway = { 0, 0};
+ /*only for first iteration because the alg is repared at the loop's foot*/
+ if(k==0)
+ icvFindEnslosingCicle4pts_32f( pts, ¢er, &radius );
cvStartReadSeq( sequence, &reader, 0 );
- for( i = 0; i < count; i++ )
+ for(int i = 0; i < count; i++ )
{
if( !is_float )
{
if( result )
break;
- CvPoint2D32f ptsCopy[4];
- /* find good replacement partner for the point which is at most far away,
- starting with the one that lays in the actual circle (i=3) */
- for(int i = 3; i >=0; i-- )
- {
- for(int j = 0; j < 4; j++ )
- {
- ptsCopy[j]=(i != j)? pts[j]: farAway;
- }
-
- icvFindEnslosingCicle4pts_32f(ptsCopy, ¢er, &radius );
- if( icvIsPtInCircle( pts[i], center, radius )>=0){ // replaced one again in the new circle?
- pts[i] = farAway;
- break;
- }
- }
+ CvPoint2D32f ptsCopy[4];
+ /* find good replacement partner for the point which is at most far away,
+ starting with the one that lays in the actual circle (i=3) */
+ for(int i = 3; i >=0; i-- )
+ {
+ for(int j = 0; j < 4; j++ )
+ {
+ ptsCopy[j]=(i != j)? pts[j]: farAway;
+ }
+
+ icvFindEnslosingCicle4pts_32f(ptsCopy, ¢er, &radius );
+ if( icvIsPtInCircle( pts[i], center, radius )>=0){ // replaced one again in the new circle?
+ pts[i] = farAway;
+ break;
+ }
+ }
}
if( !result )
cvStartReadSeq( sequence, &reader, 0 );
radius = 0.f;
- for( i = 0; i < count; i++ )
+ for(int i = 0; i < count; i++ )
{
CvPoint2D32f ptfl;
float t, dx, dy;
yi_1 = ((CvPoint2D32f*)(reader.ptr))->y;
}
CV_NEXT_SEQ_ELEM( contour->elem_size, reader );
-
+
while( lpt-- > 0 )
{
double dxy, xi, yi;
/****************************************************************************************\
- copy data from one buffer to other buffer
+ copy data from one buffer to other buffer
\****************************************************************************************/
n = ptseq->total;
if( n < 5 )
CV_Error( CV_StsBadSize, "Number of points should be >= 5" );
-
+
/*
- * New fitellipse algorithm, contributed by Dr. Daniel Weiss
+ * New fitellipse algorithm, contributed by Dr. Daniel Weiss
*/
CvPoint2D32f c = {0,0};
double gfp[5], rp[5], t;
cvStartReadSeq( ptseq, &reader );
is_float = CV_SEQ_ELTYPE(ptseq) == CV_32FC2;
-
+
for( i = 0; i < n; i++ )
{
CvPoint2D32f p;
Ad[i*5 + 3] = p.x;
Ad[i*5 + 4] = p.y;
}
-
+
cvSolve( &A, &b, &x, CV_SVD );
// now use general-form parameters A - E to find the ellipse center:
xmin = ymin = 0;
}
else if( ptseq->total )
- {
+ {
int is_float = CV_SEQ_ELTYPE(ptseq) == CV_32FC2;
cvStartReadSeq( ptseq, &reader, 0 );
ymin = ymax = pt.y;
for( i = 1; i < ptseq->total; i++ )
- {
+ {
CV_READ_SEQ_ELEM( pt, reader );
-
+
if( xmin > pt.x )
xmin = pt.x;
-
+
if( xmax < pt.x )
xmax = pt.x;
ymin = ymax = CV_TOGGLE_FLT(pt.y);
for( i = 1; i < ptseq->total; i++ )
- {
+ {
CV_READ_SEQ_ELEM( pt, reader );
pt.x = CV_TOGGLE_FLT(pt.x);
pt.y = CV_TOGGLE_FLT(pt.y);
-
+
if( xmin > pt.x )
xmin = pt.x;
-
+
if( xmax < pt.x )
xmax = pt.x;
if( update )
((CvContour*)ptseq)->rect = rect;
-
+
return rect;
}
ksize = _ksize;
anchor = _anchor;
}
-
+
void operator()(const uchar* src, uchar* dst, int width, int cn)
{
const T* S = (const T*)src;
ST* D = (ST*)dst;
int i = 0, k, ksz_cn = ksize*cn;
-
+
width = (width - 1)*cn;
for( k = 0; k < cn; k++, S++, D++ )
{
}
void reset() { sumCount = 0; }
-
+
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
{
int i;
}
-
+
cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
{
int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
Size ksize, Point anchor, int borderType )
{
boxFilter( src, dst, -1, ksize, anchor, true, borderType );
-}
+}
/****************************************************************************************\
Gaussian Blur
Mat src = _src.getMat();
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
-
+
if( borderType != BORDER_CONSTANT )
{
if( src.rows == 1 )
namespace cv
{
-
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4244 )
-#endif
-
typedef ushort HT;
/**
#if CV_SSE2
#define MEDIAN_HAVE_SIMD 1
-
+
static inline void histogram_add_simd( const HT x[16], HT y[16] )
{
const __m128i* rx = (const __m128i*)x;
_mm_store_si128(ry+0, r0);
_mm_store_si128(ry+1, r1);
}
-
+
#else
#define MEDIAN_HAVE_SIMD 0
#endif
-
+
static inline void histogram_add( const HT x[16], HT y[16] )
{
int i;
for( c = 0; c < cn; c++ )
{
for( j = 0; j < n; j++ )
- COP( c, j, src[cn*j+c], += r+2 );
+ COP( c, j, src[cn*j+c], += (cv::HT)(r+2) );
for( i = 1; i < r; i++ )
{
if ( luc[c][k] <= j-r )
{
memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
- for ( luc[c][k] = j-r; luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
+ for ( luc[c][k] = cv::HT(j-r); luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
histogram_add_simd( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
if ( luc[c][k] < j+r+1 )
{
for( j = 0; j < 2*r; ++j )
histogram_add( &h_coarse[16*(n*c+j)], H[c].coarse );
-
+
for( j = r; j < n-r; j++ )
{
int t = 2*r*r + 2*r, b, sum = 0;
HT* segment;
-
+
histogram_add( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse );
-
+
// Find median at coarse level
for ( k = 0; k < 16 ; ++k )
{
}
}
assert( k < 16 );
-
+
/* Update corresponding histogram segment */
if ( luc[c][k] <= j-r )
{
memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
- for ( luc[c][k] = j-r; luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
+ for ( luc[c][k] = cv::HT(j-r); luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
histogram_add( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
-
+
if ( luc[c][k] < j+r+1 )
{
histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] );
histogram_add( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] );
}
}
-
+
histogram_sub( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse );
-
+
/* Find median in segment */
segment = H[c].fine[k];
for ( b = 0; b < 16 ; b++ )
#undef COP
}
-
-#if _MSC_VER >= 1200
-#pragma warning( default: 4244 )
-#endif
-
static void
medianBlur_8u_Om( const Mat& _src, Mat& _dst, int m )
{
b = std::max(b, t);
}
};
-
+
struct MinMax16s
{
typedef short value_type;
}
};
-
+
struct MinMaxVec16s
{
typedef short value_type;
a = _mm_min_epi16(a, b);
b = _mm_max_epi16(b, t);
}
-};
+};
+
-
struct MinMaxVec32f
{
typedef float value_type;
Op op;
VecOp vop;
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
-
+
if( m == 3 )
{
if( size.width == 1 || size.height == 1 )
}
return;
}
-
+
size.width *= cn;
for( i = 0; i < size.height; i++, dst += dstep )
{
p[k*5+2] = rowk[j]; p[k*5+3] = rowk[j3];
p[k*5+4] = rowk[j4];
}
-
+
op(p[1], p[2]); op(p[0], p[1]); op(p[1], p[2]); op(p[4], p[5]); op(p[3], p[4]);
op(p[4], p[5]); op(p[0], p[3]); op(p[2], p[5]); op(p[2], p[3]); op(p[1], p[4]);
op(p[1], p[2]); op(p[3], p[4]); op(p[7], p[8]); op(p[6], p[7]); op(p[7], p[8]);
p[k*5+2] = vop.load(rowk+j); p[k*5+3] = vop.load(rowk+j+cn);
p[k*5+4] = vop.load(rowk+j+cn*2);
}
-
+
vop(p[1], p[2]); vop(p[0], p[1]); vop(p[1], p[2]); vop(p[4], p[5]); vop(p[3], p[4]);
vop(p[4], p[5]); vop(p[0], p[3]); vop(p[2], p[5]); vop(p[2], p[3]); vop(p[1], p[4]);
vop(p[1], p[2]); vop(p[3], p[4]); vop(p[7], p[8]); vop(p[6], p[7]); vop(p[7], p[8]);
}
}
-
+
void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
{
Mat src0 = _src0.getMat();
_dst.create( src0.size(), src0.type() );
Mat dst = _dst.getMat();
-
+
if( ksize <= 1 )
{
src0.copyTo(dst);
if (tegra::medianBlur(src0, dst, ksize))
return;
#endif
-
+
bool useSortNet = ksize == 3 || (ksize == 5
#if !CV_SSE2
&& src0.depth() > CV_8U
#endif
);
-
+
Mat src;
if( useSortNet )
{
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
-
+
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
-
+
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
radius = MAX(radius, 1);
d = radius*2 + 1;
// compute the min/max range for the input image (even if multichannel)
-
+
minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
-
+
// temporary copy of the image with borders for easy processing
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
float* expLUT = &_expLUT[0];
scale_index = kExpNumBins/len;
-
+
// initialize the exp LUT
for( i = 0; i < kExpNumBins+2; i++ )
{
else
expLUT[i] = 0.f;
}
-
+
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
for( i = 0; i < size.height; i++ )
{
- const float* sptr = (const float*)(temp.data + (i+radius)*temp.step) + radius*cn;
+ const float* sptr = (const float*)(temp.data + (i+radius)*temp.step) + radius*cn;
float* dptr = (float*)(dst.data + i*dst.step);
if( cn == 1 )
for( k = 0; k < maxk; k++ )
{
float val = sptr[j + space_ofs[k]];
- float alpha = (float)(std::abs(val - val0)*scale_index);
+ float alpha = (float)(std::abs(val - val0)*scale_index);
int idx = cvFloor(alpha);
alpha -= idx;
float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
- sum += val*w;
+ sum += val*w;
wsum += w;
}
dptr[j] = (float)(sum/wsum);
{
const float* sptr_k = sptr + j + space_ofs[k];
float b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
- float alpha = (float)((std::abs(b - b0) +
+ float alpha = (float)((std::abs(b - b0) +
std::abs(g - g0) + std::abs(r - r0))*scale_index);
int idx = cvFloor(alpha);
alpha -= idx;
Mat src = _src.getMat();
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
-
+
if( src.depth() == CV_8U )
bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
else if( src.depth() == CV_32F )
if( size.width == cn )
buf[cn] = 0;
-
+
if( sqsum )
{
sqsum[-cn] = 0;
sum += sumstep - cn;
tilted += tiltedstep - cn;
buf += -cn;
-
+
if( sqsum )
sqsum += sqsumstep - cn;
tilted[x] = t0 + t1 + tilted[x - tiltedstep - cn];
buf[x] = t0;
}
-
+
if( sqsum )
sqsum++;
}
}
}
-
+
#define DEF_INTEGRAL_FUNC(suffix, T, ST, QT) \
-void integral_##suffix( T* src, size_t srcstep, ST* sum, size_t sumstep, QT* sqsum, size_t sqsumstep, \
- ST* tilted, size_t tiltedstep, Size size, int cn ) \
+static void integral_##suffix( T* src, size_t srcstep, ST* sum, size_t sumstep, QT* sqsum, size_t sqsumstep, \
+ ST* tilted, size_t tiltedstep, Size size, int cn ) \
{ integral_(src, srcstep, sum, sumstep, sqsum, sqsumstep, tilted, tiltedstep, size, cn); }
DEF_INTEGRAL_FUNC(8u32s, uchar, int, double)
DEF_INTEGRAL_FUNC(32f, float, float, double)
DEF_INTEGRAL_FUNC(32f64f, float, double, double)
DEF_INTEGRAL_FUNC(64f, double, double, double)
-
+
typedef void (*IntegralFunc)(const uchar* src, size_t srcstep, uchar* sum, size_t sumstep,
uchar* sqsum, size_t sqsumstep, uchar* tilted, size_t tstep,
Size size, int cn );
sdepth = CV_MAT_DEPTH(sdepth);
_sum.create( isize, CV_MAKETYPE(sdepth, cn) );
sum = _sum.getMat();
-
+
if( _tilted.needed() )
{
_tilted.create( isize, CV_MAKETYPE(sdepth, cn) );
tilted = _tilted.getMat();
}
-
+
if( _sqsum.needed() )
{
_sqsum.create( isize, CV_MAKETYPE(CV_64F, cn) );
sqsum = _sqsum.getMat();
}
-
+
IntegralFunc func = 0;
if( depth == CV_8U && sdepth == CV_32S )
func( src.data, src.step, sum.data, sum.step, sqsum.data, sqsum.step,
tilted.data, tilted.step, src.size(), cn );
}
-
+
void cv::integral( InputArray src, OutputArray sum, int sdepth )
{
integral( src, sum, noArray(), noArray(), sdepth );
Mat cameraMatrix = _cameraMatrix.getMat();
if( !centerPrincipalPoint && cameraMatrix.type() == CV_64F )
return cameraMatrix;
-
+
Mat newCameraMatrix;
cameraMatrix.convertTo(newCameraMatrix, CV_64F);
if( centerPrincipalPoint )
{
Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
Mat matR = _matR.getMat(), newCameraMatrix = _newCameraMatrix.getMat();
-
+
if( m1type <= 0 )
m1type = CV_16SC2;
CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 );
double u0 = A(0, 2), v0 = A(1, 2);
double fx = A(0, 0), fy = A(1, 1);
- CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) ||
+ CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) ||
distCoeffs.size() == Size(1, 5) || distCoeffs.size() == Size(5, 1) ||
distCoeffs.size() == Size(1, 8) || distCoeffs.size() == Size(8, 1));
{
Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat();
Mat distCoeffs = _distCoeffs.getMat(), newCameraMatrix = _newCameraMatrix.getMat();
-
+
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
-
+
CV_Assert( dst.data != src.data );
int stripe_size0 = std::min(std::max(1, (1 << 12) / std::max(src.cols, 1)), src.rows);
(_distCoeffs->rows == 1 || _distCoeffs->cols == 1) &&
(_distCoeffs->rows*_distCoeffs->cols == 4 ||
_distCoeffs->rows*_distCoeffs->cols == 5 ||
- _distCoeffs->rows*_distCoeffs->cols == 8));
+ _distCoeffs->rows*_distCoeffs->cols == 8));
_Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols,
CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k);
-
+
cvConvert( _distCoeffs, &_Dk );
iters = 5;
}
{
Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat();
Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat();
-
+
CV_Assert( src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) &&
((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2));
-
+
_dst.create(src.size(), src.type(), -1, true);
Mat dst = _dst.getMat();
-
+
CvMat _csrc = src, _cdst = dst, _ccameraMatrix = cameraMatrix;
CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0;
if( R.data )
double beta = 1 + 2*alpha;
double v = x*x + y*y + 1, iv = 1/v;
double u = sqrt(beta*v + alpha*alpha);
-
+
double k = (u - alpha)*iv;
double kv = (v*beta/u - (u - alpha)*2)*iv*iv;
double kx = kv*x, ky = kv*y;
-
+
if( projType == PROJ_SPHERICAL_ORTHO )
{
if(J)
double iR = 1/(alpha + 1);
double x1 = std::max(std::min(x*k*iR, 1.), -1.);
double y1 = std::max(std::min(y*k*iR, 1.), -1.);
-
+
if(J)
{
double fx1 = iR/sqrt(1 - x1*x1);
return Point2f();
}
-
+
static Point2f invMapPointSpherical(Point2f _p, float alpha, int projType)
{
static int avgiter = 0, avgn = 0;
-
+
double eps = 1e-12;
Vec2d p(_p.x, _p.y), q(_p.x, _p.y), err;
Vec4d J;
int i, maxiter = 5;
-
+
for( i = 0; i < maxiter; i++ )
{
Point2f p1 = mapPointSpherical(Point2f((float)q[0], (float)q[1]), alpha, &J, projType);
err = Vec2d(p1.x, p1.y) - p;
if( err[0]*err[0] + err[1]*err[1] < eps )
break;
-
+
Vec4d JtJ(J[0]*J[0] + J[2]*J[2], J[0]*J[1] + J[2]*J[3],
J[0]*J[1] + J[2]*J[3], J[1]*J[1] + J[3]*J[3]);
double d = JtJ[0]*JtJ[3] - JtJ[1]*JtJ[2];
d = d ? 1./d : 0;
Vec4d iJtJ(JtJ[3]*d, -JtJ[1]*d, -JtJ[2]*d, JtJ[0]*d);
Vec2d JtErr(J[0]*err[0] + J[2]*err[1], J[1]*err[0] + J[3]*err[1]);
-
+
q -= Vec2d(iJtJ[0]*JtErr[0] + iJtJ[1]*JtErr[1], iJtJ[2]*JtErr[0] + iJtJ[3]*JtErr[1]);
//Matx22d J(kx*x + k, kx*y, ky*x, ky*y + k);
//q -= Vec2d((J.t()*J).inv()*(J.t()*err));
}
-
+
if( i < maxiter )
{
avgiter += i;
if( avgn == 1500 )
printf("avg iters = %g\n", (double)avgiter/avgn);
}
-
+
return i < maxiter ? Point2f((float)q[0], (float)q[1]) : Point2f(-FLT_MAX, -FLT_MAX);
}
}
-
+
float cv::initWideAngleProjMap( InputArray _cameraMatrix0, InputArray _distCoeffs0,
Size imageSize, int destImageWidth, int m1type,
OutputArray _map1, OutputArray _map2, int projType, double _alpha )
Point2f dcenter((destImageWidth-1)*0.5f, 0.f);
float xmin = FLT_MAX, xmax = -FLT_MAX, ymin = FLT_MAX, ymax = -FLT_MAX;
int N = 9;
- std::vector<Point2f> u(1), v(1);
- Mat _u(u), I = Mat::eye(3,3,CV_64F);
+ std::vector<Point2f> uvec(1), vvec(1);
+ Mat I = Mat::eye(3,3,CV_64F);
float alpha = (float)_alpha;
-
+
int ndcoeffs = distCoeffs0.cols*distCoeffs0.rows*distCoeffs0.channels();
CV_Assert((distCoeffs0.cols == 1 || distCoeffs0.rows == 1) &&
(ndcoeffs == 4 || ndcoeffs == 5 || ndcoeffs == 8));
CV_Assert(cameraMatrix0.size() == Size(3,3));
distCoeffs0.convertTo(distCoeffs,CV_64F);
cameraMatrix0.convertTo(cameraMatrix,CV_64F);
-
+
alpha = std::min(alpha, 0.999f);
-
+
for( int i = 0; i < N; i++ )
for( int j = 0; j < N; j++ )
{
Point2f p((float)j*imageSize.width/(N-1), (float)i*imageSize.height/(N-1));
- u[0] = p;
- undistortPoints(_u, v, cameraMatrix, distCoeffs, I, I);
- Point2f q = mapPointSpherical(v[0], alpha, 0, projType);
+ uvec[0] = p;
+ undistortPoints(uvec, vvec, cameraMatrix, distCoeffs, I, I);
+ Point2f q = mapPointSpherical(vvec[0], alpha, 0, projType);
if( xmin > q.x ) xmin = q.x;
if( xmax < q.x ) xmax = q.x;
if( ymin > q.y ) ymin = q.y;
if( ymax < q.y ) ymax = q.y;
}
-
+
float scale = (float)std::min(dcenter.x/fabs(xmax), dcenter.x/fabs(xmin));
Size dsize(destImageWidth, cvCeil(std::max(scale*fabs(ymin)*2, scale*fabs(ymax)*2)));
dcenter.y = (dsize.height - 1)*0.5f;
-
+
Mat mapxy(dsize, CV_32FC2);
double k1 = k[0], k2 = k[1], k3 = k[2], p1 = k[3], p2 = k[4], k4 = k[5], k5 = k[6], k6 = k[7];
double fx = cameraMatrix.at<double>(0,0), fy = cameraMatrix.at<double>(1,1), cx = scenter.x, cy = scenter.y;
-
+
for( int y = 0; y < dsize.height; y++ )
{
Point2f* mxy = mapxy.ptr<Point2f>(y);
double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2/(1 + ((k6*r2 + k5)*r2 + k4)*r2);
double u = fx*(q.x*kr + p1*_2xy + p2*(r2 + 2*x2)) + cx;
double v = fy*(q.y*kr + p1*(r2 + 2*y2) + p2*_2xy) + cy;
-
+
mxy[x] = Point2f((float)u, (float)v);
}
}
-
+
if(m1type == CV_32FC2)
{
_map1.create(mapxy.size(), mapxy.type());
}
else
convertMaps(mapxy, Mat(), _map1, _map2, m1type, false);
-
+
return scale;
}
// the whole testing is done here, run_func() is not utilized in this test
int CV_FindContourTest::validate_test_results( int /*test_case_idx*/ )
{
- int i, code = cvtest::TS::OK;
+ int code = cvtest::TS::OK;
cvCmpS( img[0], 0, img[0], CV_CMP_GT );
Mat _img[4];
for( int i = 0; i < 4; i++ )
_img[i] = cvarrToMat(img[i]);
-
+
code = cvtest::cmpEps2(ts, _img[0], _img[3], 0, true, "Comparing original image with the map of filled contours" );
if( code < 0 )
CvTreeNodeIterator iterator2;
int count3;
- for( i = 0; i < 2; i++ )
+ for(int i = 0; i < 2; i++ )
{
CvTreeNodeIterator iterator;
cvInitTreeNodeIterator( &iterator, i == 0 ? contours : contours2, INT_MAX );
goto _exit_;
}
- for( i = 0; i < seq1->total; i++ )
+ for(int i = 0; i < seq1->total; i++ )
{
CvPoint pt1;
CvPoint pt2;
void* result;
double low_high_range;
CvScalar low, high;
-
+
bool test_cpp;
};
}
-void CV_BaseShapeDescrTest::generate_point_set( void* points )
+void CV_BaseShapeDescrTest::generate_point_set( void* pointsSet )
{
RNG& rng = ts->get_rng();
int i, k, n, total, point_type;
}
memset( &reader, 0, sizeof(reader) );
- if( CV_IS_SEQ(points) )
+ if( CV_IS_SEQ(pointsSet) )
{
- CvSeq* ptseq = (CvSeq*)points;
+ CvSeq* ptseq = (CvSeq*)pointsSet;
total = ptseq->total;
point_type = CV_SEQ_ELTYPE(ptseq);
cvStartReadSeq( ptseq, &reader );
}
else
{
- CvMat* ptm = (CvMat*)points;
+ CvMat* ptm = (CvMat*)pointsSet;
assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
total = ptm->rows + ptm->cols - 1;
point_type = CV_MAT_TYPE(ptm->type);
}
generate_point_set( points );
-
+
test_cpp = (cvtest::randInt(rng) & 16) == 0;
return 1;
}
for( i = 0; i < point_count; i++ )
{
int idx = 0, on_edge = 0;
- double result = cvTsPointPolygonTest( p[i], h, hull_count, &idx, &on_edge );
+ double pptresult = cvTsPointPolygonTest( p[i], h, hull_count, &idx, &on_edge );
- if( result < 0 )
+ if( pptresult < 0 )
{
ts->printf( cvtest::TS::LOG, "The point #%d is outside of the convex hull\n", i );
code = cvtest::TS::FAIL_BAD_ACCURACY;
goto _exit_;
}
- if( result < FLT_EPSILON && !on_edge )
+ if( pptresult < FLT_EPSILON && !on_edge )
mask->data.ptr[idx] = (uchar)1;
}
for( i = 0; i < point_count; i++ )
{
int idx = 0, on_edge = 0;
- double result = cvTsPointPolygonTest( p[i], box_pt, 4, &idx, &on_edge );
- if( result < -eps )
+ double pptresult = cvTsPointPolygonTest( p[i], box_pt, 4, &idx, &on_edge );
+ if( pptresult < -eps )
{
ts->printf( cvtest::TS::LOG, "The point #%d is outside of the box\n", i );
code = cvtest::TS::FAIL_BAD_ACCURACY;
goto _exit_;
}
- if( result < eps )
+ if( pptresult < eps )
{
for( j = 0; j < 4; j++ )
{
}
-void CV_FitEllipseTest::generate_point_set( void* points )
+void CV_FitEllipseTest::generate_point_set( void* pointsSet )
{
RNG& rng = ts->get_rng();
int i, total, point_type;
}
memset( &reader, 0, sizeof(reader) );
- if( CV_IS_SEQ(points) )
+ if( CV_IS_SEQ(pointsSet) )
{
- CvSeq* ptseq = (CvSeq*)points;
+ CvSeq* ptseq = (CvSeq*)pointsSet;
total = ptseq->total;
point_type = CV_SEQ_ELTYPE(ptseq);
cvStartReadSeq( ptseq, &reader );
}
else
{
- CvMat* ptm = (CvMat*)points;
+ CvMat* ptm = (CvMat*)pointsSet;
assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
total = ptm->rows + ptm->cols - 1;
point_type = CV_MAT_TYPE(ptm->type);
{
public:
CV_FitEllipseSmallTest() {}
- ~CV_FitEllipseSmallTest() {}
+ ~CV_FitEllipseSmallTest() {}
protected:
void run(int)
{
c[0].push_back(Point(8, 6)*scale+ofs);
c[0].push_back(Point(8, 2)*scale+ofs);
c[0].push_back(Point(6, 0)*scale+ofs);
-
+
RotatedRect e = fitEllipse(c[0]);
CV_Assert( fabs(e.center.x - 4) <= 1. &&
fabs(e.center.y - 4) <= 1. &&
}
-void CV_FitLineTest::generate_point_set( void* points )
+void CV_FitLineTest::generate_point_set( void* pointsSet )
{
RNG& rng = ts->get_rng();
int i, k, n, total, point_type;
memset( &reader, 0, sizeof(reader) );
- if( CV_IS_SEQ(points) )
+ if( CV_IS_SEQ(pointsSet) )
{
- CvSeq* ptseq = (CvSeq*)points;
+ CvSeq* ptseq = (CvSeq*)pointsSet;
total = ptseq->total;
point_type = CV_MAT_DEPTH(CV_SEQ_ELTYPE(ptseq));
cvStartReadSeq( ptseq, &reader );
}
else
{
- CvMat* ptm = (CvMat*)points;
+ CvMat* ptm = (CvMat*)pointsSet;
assert( CV_IS_MAT(ptm) && CV_IS_MAT_CONT(ptm->type) );
total = ptm->rows + ptm->cols - 1;
point_type = CV_MAT_DEPTH(CV_MAT_TYPE(ptm->type));
}
-void CV_ContourMomentsTest::generate_point_set( void* points )
+void CV_ContourMomentsTest::generate_point_set( void* pointsSet )
{
RNG& rng = ts->get_rng();
float max_sz;
max_r_scale = cvtest::randReal(rng)*max_max_r_scale*0.01;
angle = cvtest::randReal(rng)*360;
- cvTsGenerateTousledBlob( center, axes, max_r_scale, angle, points, rng );
+ cvTsGenerateTousledBlob( center, axes, max_r_scale, angle, pointsSet, rng );
if( points1 )
points1->flags = CV_SEQ_MAGIC_VAL + CV_SEQ_POLYGON;
{
public:
CV_PerimeterAreaSliceTest();
- ~CV_PerimeterAreaSliceTest();
-protected:
+ ~CV_PerimeterAreaSliceTest();
+protected:
void run(int);
};
Ptr<CvMemStorage> storage = cvCreateMemStorage();
RNG& rng = theRNG();
const double min_r = 90, max_r = 120;
-
+
for( int i = 0; i < 100; i++ )
{
ts->update_context( this, i, true );
CvPoint center;
center.x = rng.uniform(cvCeil(max_r), cvFloor(640-max_r));
center.y = rng.uniform(cvCeil(max_r), cvFloor(480-max_r));
-
+
for( int j = 0; j < n; j++ )
{
CvPoint pt;
pt.y = cvRound(center.y - r*sin(phi));
cvSeqPush(contour, &pt);
}
-
+
CvSlice slice;
for(;;)
{
/*printf( "%d. (%d, %d) of %d, length = %d, length1 = %d\n",
i, slice.start_index, slice.end_index,
contour->total, cvSliceLength(slice, contour), cslice->total );
-
+
double area0 = cvContourArea(cslice);
- double area1 = cvContourArea(contour, slice);
+ double area1 = cvContourArea(contour, slice);
if( area0 != area1 )
{
ts->printf(cvtest::TS::LOG,
"The contour area slice is computed differently (%g vs %g)\n", area0, area1 );
- ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
+ ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
return;
}*/
{
ts->printf(cvtest::TS::LOG,
"The contour arc length is computed differently (%g vs %g)\n", len0, len1 );
- ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
+ ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
return;
}
}
Mat _ielement(element->nRows, element->nCols, CV_32S, element->values);
Mat _element;
_ielement.convertTo(_element, CV_8U);
- Point anchor(element->anchorX, element->anchorY);
- int border = BORDER_REPLICATE;
+ Point _anchor(element->anchorX, element->anchorY);
+ int _border = BORDER_REPLICATE;
if( optype == CV_MOP_ERODE )
{
- cvtest::erode( src, dst, _element, anchor, border );
+ cvtest::erode( src, dst, _element, _anchor, _border );
}
else if( optype == CV_MOP_DILATE )
{
- cvtest::dilate( src, dst, _element, anchor, border );
+ cvtest::dilate( src, dst, _element, _anchor, _border );
}
else
{
Mat temp;
if( optype == CV_MOP_OPEN )
{
- cvtest::erode( src, temp, _element, anchor, border );
- cvtest::dilate( temp, dst, _element, anchor, border );
+ cvtest::erode( src, temp, _element, _anchor, _border );
+ cvtest::dilate( temp, dst, _element, _anchor, _border );
}
else if( optype == CV_MOP_CLOSE )
{
- cvtest::dilate( src, temp, _element, anchor, border );
- cvtest::erode( temp, dst, _element, anchor, border );
+ cvtest::dilate( src, temp, _element, _anchor, _border );
+ cvtest::erode( temp, dst, _element, _anchor, _border );
}
else if( optype == CV_MOP_GRADIENT )
{
- cvtest::erode( src, temp, _element, anchor, border );
- cvtest::dilate( src, dst, _element, anchor, border );
+ cvtest::erode( src, temp, _element, _anchor, _border );
+ cvtest::dilate( src, dst, _element, _anchor, _border );
cvtest::add( dst, 1, temp, -1, Scalar::all(0), dst, dst.type() );
}
else if( optype == CV_MOP_TOPHAT )
{
- cvtest::erode( src, temp, _element, anchor, border );
- cvtest::dilate( temp, dst, _element, anchor, border );
+ cvtest::erode( src, temp, _element, _anchor, _border );
+ cvtest::dilate( temp, dst, _element, _anchor, _border );
cvtest::add( src, 1, dst, -1, Scalar::all(0), dst, dst.type() );
}
else if( optype == CV_MOP_BLACKHAT )
{
- cvtest::dilate( src, temp, _element, anchor, border );
- cvtest::erode( temp, dst, _element, anchor, border );
+ cvtest::dilate( src, temp, _element, _anchor, _border );
+ cvtest::erode( temp, dst, _element, _anchor, _border );
cvtest::add( dst, 1, src, -1, Scalar::all(0), dst, dst.type() );
}
else
void prepare_to_validation( int );
void fill_array( int test_case_idx, int i, int j, Mat& arr );
-
+
/*int write_default_params(CvFileStorage* fs);
void get_timing_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types
CvSize** whole_sizes, bool *are_images );
RNG& rng = ts->get_rng();
int depth, cn;
int i;
- double buf[8];
+ double buff[8];
cvtest::ArrayTest::get_test_array_types_and_sizes( test_case_idx, sizes, types );
depth = cvtest::randInt(rng) % 3;
types[INPUT_OUTPUT][1] = types[REF_INPUT_OUTPUT][1] = CV_8UC1;
types[OUTPUT][0] = types[REF_OUTPUT][0] = CV_64FC1;
sizes[OUTPUT][0] = sizes[REF_OUTPUT][0] = cvSize(9,1);
-
+
if( !use_mask )
sizes[INPUT_OUTPUT][1] = sizes[REF_INPUT_OUTPUT][1] = cvSize(0,0);
else
CvSize sz = sizes[INPUT_OUTPUT][0];
sizes[INPUT_OUTPUT][1] = sizes[REF_INPUT_OUTPUT][1] = cvSize(sz.width+2,sz.height+2);
}
-
+
seed_pt.x = cvtest::randInt(rng) % sizes[INPUT_OUTPUT][0].width;
seed_pt.y = cvtest::randInt(rng) % sizes[INPUT_OUTPUT][0].height;
l_diff = u_diff = Scalar::all(0.);
else
{
- Mat m( 1, 8, CV_16S, buf );
+ Mat m( 1, 8, CV_16S, buff );
rng.fill( m, RNG::NORMAL, Scalar::all(0), Scalar::all(32) );
for( i = 0; i < 4; i++ )
{
new_val = Scalar::all(0.);
for( i = 0; i < cn; i++ )
new_val.val[i] = cvtest::randReal(rng)*255;
-
+
test_cpp = (cvtest::randInt(rng) & 256) == 0;
}
void CV_FloodFillTest::fill_array( int test_case_idx, int i, int j, Mat& arr )
{
RNG& rng = ts->get_rng();
-
+
if( i != INPUT && i != INPUT_OUTPUT )
{
cvtest::ArrayTest::fill_array( test_case_idx, i, j, arr );
return;
}
-
+
if( j == 0 )
{
Mat tmp = arr;
int flags = connectivity + (mask_only ? CV_FLOODFILL_MASK_ONLY : 0) +
(range_type == 1 ? CV_FLOODFILL_FIXED_RANGE : 0) + (new_mask_val << 8);
double* odata = test_mat[OUTPUT][0].ptr<double>();
-
+
if(!test_cpp)
{
CvConnectedComp comp;
int cols = _img->cols, rows = _img->rows;
int u0 = 0, u1 = 0, u2 = 0;
double s0 = 0, s1 = 0, s2 = 0;
-
+
if( CV_MAT_DEPTH(_img->type) == CV_8U || CV_MAT_DEPTH(_img->type) == CV_32S )
{
tmp = cvCreateMat( rows, cols, CV_MAKETYPE(CV_32F,CV_MAT_CN(_img->type)) );
cvSeqPush( seq, &p );
}
}
- }
+ }
}
r.x = r.width = seed_pt.x;
int prepare_test_case( int test_case_idx );
int validate_test_results( int test_case_idx );
virtual void init_hist( int test_case_idx, int i );
-
+
virtual void get_hist_params( int test_case_idx );
virtual float** get_hist_ranges( int test_case_idx );
int uniform;
int gen_random_hist;
double gen_hist_max_val, gen_hist_sparse_nz_ratio;
-
+
int init_ranges;
int img_type;
int img_max_log_size;
max_log_size = cvtest::clipInt( max_log_size, 1, 20 );
img_max_log_size = cvReadInt( find_param( fs, "max_log_array_size" ), img_max_log_size );
img_max_log_size = cvtest::clipInt( img_max_log_size, 1, 9 );
-
+
max_cdims = cvReadInt( find_param( fs, "max_cdims" ), max_cdims );
max_cdims = cvtest::clipInt( max_cdims, 1, 6 );
max_dim_size = cvRound(pow(hist_size,1./cdims));
total_size = 1;
uniform = cvtest::randInt(rng) % 2;
- hist_type = cvtest::randInt(rng) % 2 ? CV_HIST_SPARSE : CV_HIST_ARRAY;
-
+ hist_type = cvtest::randInt(rng) % 2 ? CV_HIST_SPARSE : CV_HIST_ARRAY;
+
for( i = 0; i < cdims; i++ )
{
dims[i] = cvtest::randInt(rng) % (max_dim_size + 2) + 2;
if( !uniform )
- dims[i] = MIN(dims[i], max_ni_dim_size);
+ dims[i] = MIN(dims[i], max_ni_dim_size);
total_size *= dims[i];
}
float** CV_BaseHistTest::get_hist_ranges( int /*test_case_idx*/ )
{
double _low = low + range_delta, _high = high - range_delta;
-
+
if( !init_ranges )
return 0;
-
+
ranges.resize(cdims);
-
+
if( uniform )
{
_ranges.resize(cdims*2);
for( i = 0; i < cdims; i++ )
dims_sum += dims[i] + 1;
_ranges.resize(dims_sum);
-
+
for( i = 0; i < cdims; i++ )
{
int j, n = dims[i];
if( (pow(q,(double)n)-1)/(q-1.) >= _high-_low )
break;
}
-
+
if( j == 0 )
{
delta = (_high-_low)/n;
q = 1 + j*0.1;
delta = cvFloor((_high-_low)*(q-1)/(pow(q,(double)n) - 1));
delta = MAX(delta, 1.);
- }
+ }
val = _low;
-
+
for( j = 0; j <= n; j++ )
{
_ranges[j+ofs] = (float)MIN(val,_high);
ofs += n + 1;
}
}
-
+
return &ranges[0];
}
if( gen_random_hist )
{
RNG& rng = ts->get_rng();
-
+
if( hist_type == CV_HIST_ARRAY )
{
Mat h = cvarrToMat(hist[hist_i]->bins);
else
{
CvArr* arr = hist[hist_i]->bins;
- int i, j, total_size = 1, nz_count;
+ int i, j, totalSize = 1, nz_count;
int idx[CV_MAX_DIM];
for( i = 0; i < cdims; i++ )
- total_size *= dims[i];
+ totalSize *= dims[i];
- nz_count = cvtest::randInt(rng) % MAX( total_size/4, 100 );
- nz_count = MIN( nz_count, total_size );
+ nz_count = cvtest::randInt(rng) % MAX( totalSize/4, 100 );
+ nz_count = MIN( nz_count, totalSize );
// a zero number of non-zero elements should be allowed
for( i = 0; i < nz_count; i++ )
get_hist_params( test_case_idx );
r = get_hist_ranges( test_case_idx );
hist.resize(hist_count);
-
+
for( i = 0; i < hist_count; i++ )
{
hist[i] = cvCreateHist( cdims, dims, hist_type, r, uniform );
int prepare_test_case( int test_case_idx );
int validate_test_results( int test_case_idx );
void init_hist( int test_case_idx, int i );
-
+
CvMat* indices;
CvMat* values;
CvMat* values0;
iters = (cvtest::randInt(rng) % MAX(total_size/10,100)) + 1;
iters = MIN( iters, total_size*9/10 + 1 );
-
+
indices = cvCreateMat( 1, iters*cdims, CV_32S );
values = cvCreateMat( 1, iters, CV_32F );
values0 = cvCreateMat( 1, iters, CV_32F );
if( GET_BIT(lin_idx) )
values0->data.fl[i] = (float)(lin_idx+1);
}
-
+
cvReleaseMat( &bit_mask );
}
{
int code = cvtest::TS::OK;
int i, j, iters = values->cols;
-
+
for( i = 0; i < iters; i++ )
{
float v = values->data.fl[i], v0 = values0->data.fl[i];
}
if( !eq || total_size == 1 )
break;
- }
+ }
min_val0 = (float)(-cvtest::randReal(rng)*10 - FLT_EPSILON);
max_val0 = (float)(cvtest::randReal(rng)*10 + FLT_EPSILON + gen_hist_max_val);
int CV_MinMaxHistTest::validate_test_results( int /*test_case_idx*/ )
{
int code = cvtest::TS::OK;
-
+
if( cvIsNaN(min_val) || cvIsInf(min_val) ||
cvIsNaN(max_val) || cvIsInf(max_val) )
{
if( hist_type != CV_HIST_ARRAY && test_cpp )
{
cv::SparseMat h((CvSparseMat*)hist[0]->bins);
- cv::normalize(h, h, factor, CV_L1);
+ cv::normalize(h, h, factor, CV_L1);
cvReleaseSparseMat((CvSparseMat**)&hist[0]->bins);
hist[0]->bins = (CvSparseMat*)h;
}
{
int code = cvtest::TS::OK;
double sum = 0;
-
+
if( hist_type == CV_HIST_ARRAY )
{
int i;
CvSparseMat* sparse = (CvSparseMat*)hist[0]->bins;
CvSparseMatIterator iterator;
CvSparseNode *node;
-
+
for( node = cvInitSparseMatIterator( sparse, &iterator );
node != 0; node = cvGetNextSparseNode( &iterator ))
{
if( hist_type == CV_HIST_ARRAY )
{
orig_nz_count = total_size;
-
+
values = cvCreateMat( 1, total_size, CV_32F );
memcpy( values->data.fl, cvPtr1D( hist[0]->bins, 0 ), total_size*sizeof(float) );
}
node != 0; node = cvGetNextSparseNode( &iterator ), i++ )
{
const int* idx = CV_NODE_IDX(sparse,node);
-
+
OPENCV_ASSERT( i < orig_nz_count, "CV_ThreshHistTest::prepare_test_case", "Buffer overflow" );
values->data.fl[i] = *(float*)CV_NODE_VAL(sparse,node);
}
}
}
-
+
if( code > 0 && hist_type == CV_HIST_SPARSE )
{
if( sparse->heap->active_count > 0 )
{
float* ptr0 = (float*)cvPtr1D( hist[0]->bins, 0 );
float* ptr1 = (float*)cvPtr1D( hist[1]->bins, 0 );
-
+
for( i = 0; i < total_size; i++ )
{
double v0 = ptr0[i], v1 = ptr1[i];
const int* idx = CV_NODE_IDX(sparse0, node);
double v0 = *(float*)CV_NODE_VAL(sparse0, node);
double v1 = (float)cvGetRealND(sparse1, idx);
-
+
result0[CV_COMP_CORREL] += v0*v1;
result0[CV_COMP_INTERSECT] += MIN(v0,v1);
if( fabs(v0) > DBL_EPSILON )
void CV_CalcHistTest::clear()
{
int i;
-
+
for( i = 0; i <= CV_MAX_DIM; i++ )
cvReleaseImage( &images[i] );
img_type == CV_8U ? IPL_DEPTH_8U : IPL_DEPTH_32F, nch );
channels[i] = cvtest::randInt(rng) % nch;
Mat images_i = cvarrToMat(images[i]);
-
+
cvtest::randUni( rng, images_i, Scalar::all(low), Scalar::all(high) );
}
else if( i == CV_MAX_DIM && cvtest::randInt(rng) % 2 )
// create mask
images[i] = cvCreateImage( img_size, IPL_DEPTH_8U, 1 );
Mat images_i = cvarrToMat(images[i]);
-
+
// make ~25% pixels in the mask non-zero
cvtest::randUni( rng, images_i, Scalar::all(-2), Scalar::all(2) );
}
{
float val[CV_MAX_DIM];
int idx[CV_MAX_DIM];
-
+
if( mptr && !mptr[x] )
continue;
if( img_depth == IPL_DEPTH_8U )
{
ts->printf( cvtest::TS::LOG, "The histogram does not match to the reference one\n" );
code = cvtest::TS::FAIL_BAD_ACCURACY;
-
+
}
if( code < 0 )
void CV_CalcBackProjectTest::clear()
{
int i;
-
+
for( i = 0; i < CV_MAX_DIM+3; i++ )
cvReleaseImage( &images[i] );
{
int idx = cvtest::randInt(rng) % img_len;
double val = cvtest::randReal(rng)*(high - low) + low;
-
+
if( img_type == CV_8U )
((uchar*)data)[idx] = (uchar)cvRound(val);
else
float val[CV_MAX_DIM];
float bin_val = 0;
int idx[CV_MAX_DIM];
-
+
if( img_depth == IPL_DEPTH_8U )
for( k = 0; k < cdims; k++ )
val[k] = plane[k].ptr[x*nch[k]];
void CV_CalcBackProjectPatchTest::clear()
{
int i;
-
+
for( i = 0; i < CV_MAX_DIM+2; i++ )
cvReleaseImage( &images[i] );
{
int idx = cvtest::randInt(rng) % img_len;
double val = cvtest::randReal(rng)*(high - low) + low;
-
+
if( img_type == CV_8U )
((uchar*)data)[idx] = (uchar)cvRound(val);
else
double factor, int* channels )
{
CvHistogram* model = 0;
-
+
IplImage imgstub[CV_MAX_DIM], *img[CV_MAX_DIM];
IplROI roi;
int i, dims;
for( x = 0; x < size.width; x++ )
{
double result;
-
+
roi.xOffset = x;
roi.yOffset = y;
roi.width = patch_size.width;
cvTsCalcBackProjectPatch( images, images[CV_MAX_DIM+1],
patch_size, hist[0], method, factor, channels );
-
+
Mat a = cvarrToMat(images[CV_MAX_DIM]), b = cvarrToMat(images[CV_MAX_DIM+1]);
code = cvtest::cmpEps2( ts, a, b, err_level, true, "BackProjectPatch result" );
int CV_BayesianProbTest::prepare_test_case( int test_case_idx )
{
RNG& rng = ts->get_rng();
-
+
hist_count = (cvtest::randInt(rng) % (MAX_HIST/2-1) + 2)*2;
hist_count = MIN( hist_count, MAX_HIST );
int code = CV_BaseHistTest::prepare_test_case( test_case_idx );
TEST(Imgproc_Hist_CalcBackProject, accuracy) { CV_CalcBackProjectTest test; test.safe_run(); }
TEST(Imgproc_Hist_CalcBackProjectPatch, accuracy) { CV_CalcBackProjectPatchTest test; test.safe_run(); }
TEST(Imgproc_Hist_BayesianProb, accuracy) { CV_BayesianProbTest test; test.safe_run(); }
-
+
/* End Of File */
if( test_mat[INPUT_OUTPUT][0].cols >= img.cols &&
test_mat[INPUT_OUTPUT][0].rows >= img.rows )
space_scale = spatial_scale_zoom;
-
+
for( i = 0; i < img.rows; i++ )
{
uchar* ptr = img.ptr(i);
}*/
cv::Mat src(1, cols*cn, CV_32F, &buffer[0]);
cv::Mat dst(1, cols*cn, depth, ptr);
- src.convertTo(dst, dst.type());
+ src.convertTo(dst, dst.type());
}
return code;
CvMat* x_idx = cvCreateMat( 1, dst->cols, CV_32SC1 );
CvMat* y_idx = cvCreateMat( 1, dst->rows, CV_32SC1 );
int* x_tab = x_idx->data.i;
- int elem_size = CV_ELEM_SIZE(src->type);
+ int elem_size = CV_ELEM_SIZE(src->type);
int drows = dst->rows, dcols = dst->cols;
if( interpolation == CV_INTER_NN )
{
double scale_x = (double)src->cols/dcols;
double scale_y = (double)src->rows/drows;
-
+
for( j = 0; j < dcols; j++ )
{
double f = ((j+0.5)*scale_x - 0.5);
{
uchar* dptr = dst->data.ptr + dst->step*i;
const uchar* sptr0 = src->data.ptr + src->step*y_idx->data.i[i];
-
+
for( j = 0; j < dcols; j++, dptr += elem_size )
{
const uchar* sptr = sptr0 + x_tab[j];
xs -= ixs;
ys -= iys;
-
+
switch( depth )
{
case CV_8U:
RNG& rng = ts->get_rng();
int code = CV_ImgWarpBaseTest::prepare_test_case( test_case_idx );
const Mat& src = test_mat[INPUT][0];
- const Mat& dst = test_mat[INPUT_OUTPUT][0];
+ const Mat& dst = test_mat[INPUT_OUTPUT][0];
Mat& mat = test_mat[INPUT][1];
CvPoint2D32f center;
double scale, angle;
if( code <= 0 )
return code;
- double buf[6];
- Mat tmp( 2, 3, mat.type(), buf );
+ double buffer[6];
+ Mat tmp( 2, 3, mat.type(), buffer );
center.x = (float)((cvtest::randReal(rng)*1.2 - 0.1)*src.cols);
center.y = (float)((cvtest::randReal(rng)*1.2 - 0.1)*src.rows);
RNG& rng = ts->get_rng();
int code = CV_ImgWarpBaseTest::prepare_test_case( test_case_idx );
const CvMat& src = test_mat[INPUT][0];
- const CvMat& dst = test_mat[INPUT_OUTPUT][0];
+ const CvMat& dst = test_mat[INPUT_OUTPUT][0];
Mat& mat = test_mat[INPUT][1];
Point2f s[4], d[4];
int i;
s[3] = Point2f(0,src.rows-1.f);
d[3] = Point2f(0,dst.rows-1.f);
- float buf[16];
- Mat tmp( 1, 16, CV_32FC1, buf );
+ float bufer[16];
+ Mat tmp( 1, 16, CV_32FC1, bufer );
rng.fill( tmp, CV_RAND_NORMAL, Scalar::all(0.), Scalar::all(0.1) );
for( i = 0; i < 4; i++ )
{
- s[i].x += buf[i*4]*src.cols/2;
- s[i].y += buf[i*4+1]*src.rows/2;
- d[i].x += buf[i*4+2]*dst.cols/2;
- d[i].y += buf[i*4+3]*dst.rows/2;
+ s[i].x += bufer[i*4]*src.cols/2;
+ s[i].y += bufer[i*4+1]*src.rows/2;
+ d[i].x += bufer[i*4+2]*dst.cols/2;
+ d[i].y += bufer[i*4+3]*dst.rows/2;
}
cv::getPerspectiveTransform( s, d ).convertTo( mat, mat.depth() );
double xs = x*m[0] + y*m[1] + m[2];
double ys = x*m[3] + y*m[4] + m[5];
double ds = x*m[6] + y*m[7] + m[8];
-
+
ds = ds ? 1./ds : 0;
xs *= ds;
ys *= ds;
-
+
mapx.at<float>(y, x) = (float)xs;
mapy.at<float>(y, x) = (float)ys;
}
void fill_array( int test_case_idx, int i, int j, Mat& arr );
private:
- bool useCPlus;
- cv::Mat input0;
- cv::Mat input1;
- cv::Mat input2;
- cv::Mat input_new_cam;
- cv::Mat input_output;
-
- bool zero_new_cam;
- bool zero_distortion;
+ bool useCPlus;
+ cv::Mat input0;
+ cv::Mat input1;
+ cv::Mat input2;
+ cv::Mat input_new_cam;
+ cv::Mat input_output;
+
+ bool zero_new_cam;
+ bool zero_distortion;
};
//spatial_scale_zoom = spatial_scale_decimate;
test_array[INPUT].push_back(NULL);
test_array[INPUT].push_back(NULL);
- test_array[INPUT].push_back(NULL);
+ test_array[INPUT].push_back(NULL);
spatial_scale_decimate = spatial_scale_zoom;
}
RNG& rng = ts->get_rng();
CV_ImgWarpBaseTest::get_test_array_types_and_sizes( test_case_idx, sizes, types );
int type = types[INPUT][0];
- type = CV_MAKETYPE( CV_8U, CV_MAT_CN(type) );
+ type = CV_MAKETYPE( CV_8U, CV_MAT_CN(type) );
types[INPUT][0] = types[INPUT_OUTPUT][0] = types[REF_INPUT_OUTPUT][0] = type;
types[INPUT][1] = cvtest::randInt(rng)%2 ? CV_64F : CV_32F;
types[INPUT][2] = cvtest::randInt(rng)%2 ? CV_64F : CV_32F;
sizes[INPUT][1] = cvSize(3,3);
sizes[INPUT][2] = cvtest::randInt(rng)%2 ? cvSize(4,1) : cvSize(1,4);
- types[INPUT][3] = types[INPUT][1];
- sizes[INPUT][3] = sizes[INPUT][1];
+ types[INPUT][3] = types[INPUT][1];
+ sizes[INPUT][3] = sizes[INPUT][1];
interpolation = CV_INTER_LINEAR;
}
void CV_UndistortTest::run_func()
{
- if (!useCPlus)
- {
+ if (!useCPlus)
+ {
CvMat a = test_mat[INPUT][1], k = test_mat[INPUT][2];
- cvUndistort2( test_array[INPUT][0], test_array[INPUT_OUTPUT][0], &a, &k);
- }
- else
- {
- if (zero_distortion)
- {
- cv::undistort(input0,input_output,input1,cv::Mat());
- }
- else
- {
- cv::undistort(input0,input_output,input1,input2);
- }
- }
+ cvUndistort2( test_array[INPUT][0], test_array[INPUT_OUTPUT][0], &a, &k);
+ }
+ else
+ {
+ if (zero_distortion)
+ {
+ cv::undistort(input0,input_output,input1,cv::Mat());
+ }
+ else
+ {
+ cv::undistort(input0,input_output,input1,input2);
+ }
+ }
}
const Mat& src = test_mat[INPUT][0];
double k[4], a[9] = {0,0,0,0,0,0,0,0,1};
- double new_cam[9] = {0,0,0,0,0,0,0,0,1};
+ double new_cam[9] = {0,0,0,0,0,0,0,0,1};
double sz = MAX(src.rows, src.cols);
-
- Mat& _new_cam0 = test_mat[INPUT][3];
+
+ Mat& _new_cam0 = test_mat[INPUT][3];
Mat _new_cam(test_mat[INPUT][3].rows,test_mat[INPUT][3].cols,CV_64F,new_cam);
Mat& _a0 = test_mat[INPUT][1];
Mat _a(3,3,CV_64F,a);
_a.convertTo(_a0, _a0.depth());
- zero_distortion = (cvtest::randInt(rng)%2) == 0 ? false : true;
+ zero_distortion = (cvtest::randInt(rng)%2) == 0 ? false : true;
_k.convertTo(_k0, _k0.depth());
- zero_new_cam = (cvtest::randInt(rng)%2) == 0 ? false : true;
+ zero_new_cam = (cvtest::randInt(rng)%2) == 0 ? false : true;
_new_cam.convertTo(_new_cam0, _new_cam0.depth());
- //Testing C++ code
- useCPlus = ((cvtest::randInt(rng) % 2)!=0);
- if (useCPlus)
- {
- input0 = test_mat[INPUT][0];
- input1 = test_mat[INPUT][1];
- input2 = test_mat[INPUT][2];
- input_new_cam = test_mat[INPUT][3];
- }
+ //Testing C++ code
+ useCPlus = ((cvtest::randInt(rng) % 2)!=0);
+ if (useCPlus)
+ {
+ input0 = test_mat[INPUT][0];
+ input1 = test_mat[INPUT][1];
+ input2 = test_mat[INPUT][2];
+ input_new_cam = test_mat[INPUT][3];
+ }
return code;
}
void CV_UndistortTest::prepare_to_validation( int /*test_case_idx*/ )
{
- if (useCPlus)
- {
+ if (useCPlus)
+ {
Mat& output = test_mat[INPUT_OUTPUT][0];
input_output.convertTo(output, output.type());
- }
+ }
Mat& src = test_mat[INPUT][0];
Mat& dst = test_mat[REF_INPUT_OUTPUT][0];
Mat& dst0 = test_mat[INPUT_OUTPUT][0];
void fill_array( int test_case_idx, int i, int j, Mat& arr );
private:
- bool dualChannel;
+ bool dualChannel;
};
CvSize sz = sizes[OUTPUT][0];
types[INPUT][0] = types[INPUT][1] = depth;
- dualChannel = cvtest::randInt(rng)%2 == 0;
- types[OUTPUT][0] = types[OUTPUT][1] =
+ dualChannel = cvtest::randInt(rng)%2 == 0;
+ types[OUTPUT][0] = types[OUTPUT][1] =
types[REF_OUTPUT][0] = types[REF_OUTPUT][1] = dualChannel ? CV_32FC2 : CV_32F;
sizes[INPUT][0] = cvSize(3,3);
sizes[INPUT][1] = cvtest::randInt(rng)%2 ? cvSize(4,1) : cvSize(1,4);
void CV_UndistortMapTest::run_func()
{
CvMat a = test_mat[INPUT][0], k = test_mat[INPUT][1];
-
- if (!dualChannel )
- cvInitUndistortMap( &a, &k, test_array[OUTPUT][0], test_array[OUTPUT][1] );
- else
- cvInitUndistortMap( &a, &k, test_array[OUTPUT][0], 0 );
+
+ if (!dualChannel )
+ cvInitUndistortMap( &a, &k, test_array[OUTPUT][0], test_array[OUTPUT][1] );
+ else
+ cvInitUndistortMap( &a, &k, test_array[OUTPUT][0], 0 );
}
_a.convertTo(_a0, _a0.depth());
_k.convertTo(_k0, _k0.depth());
- if (dualChannel)
- {
+ if (dualChannel)
+ {
test_mat[REF_OUTPUT][1] = Scalar::all(0);
- test_mat[OUTPUT][1] = Scalar::all(0);
- }
+ test_mat[OUTPUT][1] = Scalar::all(0);
+ }
return code;
}
{
int sstep = (int)(src.step / sizeof(float));
int scols = src.cols, srows = src.rows;
-
+
CV_Assert( src.depth() == CV_32F && src.type() == dst.type() );
int cn = dst.channels();
int src_depth = cvtest::randInt(rng) % 2, dst_depth;
int cn = cvtest::randInt(rng) % 2 ? 3 : 1;
CvSize src_size, dst_size;
-
+
dst_depth = src_depth = src_depth == 0 ? CV_8U : CV_32F;
if( src_depth < CV_32F && cvtest::randInt(rng) % 2 )
dst_depth = CV_32F;
-
+
types[INPUT][0] = CV_MAKETYPE(src_depth,cn);
types[INPUT_OUTPUT][0] = types[REF_INPUT_OUTPUT][0] = CV_MAKETYPE(dst_depth,cn);
dst_size.width = MIN(dst_size.width,src_size.width);
dst_size.height = MIN(dst_size.width,src_size.height);
sizes[INPUT_OUTPUT][0] = sizes[REF_INPUT_OUTPUT][0] = dst_size;
-
+
center.x = (float)(cvtest::randReal(rng)*src_size.width);
center.y = (float)(cvtest::randReal(rng)*src_size.height);
interpolation = CV_INTER_LINEAR;
-
+
test_cpp = (cvtest::randInt(rng) & 256) == 0;
}
RNG& rng = ts->get_rng();
int msz, src_depth = cvtest::randInt(rng) % 2, dst_depth;
int cn = cvtest::randInt(rng) % 2 ? 3 : 1;
-
+
dst_depth = src_depth = src_depth == 0 ? CV_8U : CV_32F;
if( src_depth < CV_32F && cvtest::randInt(rng) % 2 )
dst_depth = CV_32F;
-
+
types[INPUT][0] = CV_MAKETYPE(src_depth,cn);
types[INPUT_OUTPUT][0] = types[REF_INPUT_OUTPUT][0] = CV_MAKETYPE(dst_depth,cn);
center.y = (float)((cvtest::randReal(rng)*1.2 - 0.1)*src.rows);
angle = cvtest::randReal(rng)*360;
scale = cvtest::randReal(rng)*0.2 + 0.9;
-
+
// y = Ax + b -> x = A^-1(y - b) = A^-1*y - A^-1*b
scale = 1./scale;
angle = angle*(CV_PI/180.);
points_vector.push_back(p21);
points_vector.push_back(p22);
- points_vector.push_back(p23);
+ points_vector.push_back(p23);
std::vector<float> line;
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
}
else if( depth == CV_16S )
{
- float min_val = SHRT_MIN-100.f, max_val = SHRT_MAX+100.f;
+ float min_val = SHRT_MIN-100.f;
+ max_val = SHRT_MAX+100.f;
thresh_val = (float)(cvtest::randReal(rng)*(max_val - min_val) + min_val);
max_val = (float)(cvtest::randReal(rng)*(max_val - min_val) + min_val);
if( cvtest::randInt(rng)%4 == 0 )
endif()
add_dependencies(${the_module} ${api_target})
+ocv_warnings_disable(CMAKE_CXX_FLAGS -Wmissing-declarations)
+
# Additional target properties
set_target_properties(${the_module} PROPERTIES
OUTPUT_NAME "${the_module}"
#include <jni.h>
+#include "opencv2/opencv_modules.hpp"
+
+#ifdef HAVE_OPENCV_NONFREE
+# include "opencv2/nonfree/nonfree.hpp"
+#endif
+
+#ifdef HAVE_OPENCV_FEATURES2D
+# include "opencv2/features2d/features2d.hpp"
+#endif
+
+#ifdef HAVE_OPENCV_VIDEO
+# include "opencv2/video/video.hpp"
+#endif
+
+#ifdef HAVE_OPENCV_ML
+# include "opencv2/ml/ml.hpp"
+#endif
+
extern "C" {
JNIEXPORT jint JNICALL
if (vm->GetEnv((void**) &env, JNI_VERSION_1_6) != JNI_OK)
return -1;
+ bool init = true;
+#ifdef HAVE_OPENCV_NONFREE
+ init &= cv::initModule_nonfree();
+#endif
+#ifdef HAVE_OPENCV_FEATURES2D
+ init &= cv::initModule_features2d();
+#endif
+#ifdef HAVE_OPENCV_VIDEO
+ init &= cv::initModule_video();
+#endif
+#ifdef HAVE_OPENCV_ML
+ init &= cv::initModule_ml();
+#endif
+
+ if(!init)
+ return -1;
+
/* get class with (*env)->FindClass */
/* register methods with (*env)->RegisterNatives */
//do nothing
}
-} // extern "C"
-
-#include "opencv2/opencv_modules.hpp"
-
-#if HAVE_OPENCV_MODULES_NONFREE
-#include "opencv2/nonfree/nonfree.hpp"
-static bool makeUseOfNonfree = initModule_nonfree();
-#endif
-
-#if HAVE_OPENCV_MODULES_FEATURES2D
-#include "opencv2/features2d/features2d.hpp"
-static bool makeUseOfNonfree = initModule_features2d();
-#endif
\ No newline at end of file
+} // extern "C"
\ No newline at end of file
#include "opencv2/core/core_c.h"
#include <stdio.h>
-#if _MSC_VER >= 1200 || defined __BORLANDC__
+#if (defined _MSC_VER && _MSC_VER >= 1200) || defined __BORLANDC__
#define cv_stricmp stricmp
#define cv_strnicmp strnicmp
#if defined WINCE
{
public:
CvImage() : image(0), refcount(0) {}
- CvImage( CvSize size, int depth, int channels )
+ CvImage( CvSize _size, int _depth, int _channels )
{
- image = cvCreateImage( size, depth, channels );
+ image = cvCreateImage( _size, _depth, _channels );
refcount = image ? new int(1) : 0;
}
CvImage clone() { return CvImage(image ? cvCloneImage(image) : 0); }
- void create( CvSize size, int depth, int channels )
+ void create( CvSize _size, int _depth, int _channels )
{
if( !image || !refcount ||
- image->width != size.width || image->height != size.height ||
- image->depth != depth || image->nChannels != channels )
- attach( cvCreateImage( size, depth, channels ));
+ image->width != _size.width || image->height != _size.height ||
+ image->depth != _depth || image->nChannels != _channels )
+ attach( cvCreateImage( _size, _depth, _channels ));
}
void release() { detach(); }
int coi() const { return !image || !image->roi ? 0 : image->roi->coi; }
- void set_roi(CvRect roi) { cvSetImageROI(image,roi); }
+ void set_roi(CvRect _roi) { cvSetImageROI(image,_roi); }
void reset_roi() { cvResetImageROI(image); }
- void set_coi(int coi) { cvSetImageCOI(image,coi); }
+ void set_coi(int _coi) { cvSetImageCOI(image,_coi); }
int depth() const { return image ? image->depth : 0; }
int channels() const { return image ? image->nChannels : 0; }
int pix_size() const { return image ? ((image->depth & 255)>>3)*image->nChannels : 0; }
{
public:
CvMatrix() : matrix(0) {}
- CvMatrix( int rows, int cols, int type )
- { matrix = cvCreateMat( rows, cols, type ); }
+ CvMatrix( int _rows, int _cols, int _type )
+ { matrix = cvCreateMat( _rows, _cols, _type ); }
- CvMatrix( int rows, int cols, int type, CvMat* hdr,
- void* data=0, int step=CV_AUTOSTEP )
- { matrix = cvInitMatHeader( hdr, rows, cols, type, data, step ); }
+ CvMatrix( int _rows, int _cols, int _type, CvMat* hdr,
+ void* _data=0, int _step=CV_AUTOSTEP )
+ { matrix = cvInitMatHeader( hdr, _rows, _cols, _type, _data, _step ); }
CvMatrix( int rows, int cols, int type, CvMemStorage* storage, bool alloc_data=true );
- CvMatrix( int rows, int cols, int type, void* data, int step=CV_AUTOSTEP )
- { matrix = cvCreateMatHeader( rows, cols, type );
- cvSetData( matrix, data, step ); }
+ CvMatrix( int _rows, int _cols, int _type, void* _data, int _step=CV_AUTOSTEP )
+ { matrix = cvCreateMatHeader( _rows, _cols, _type );
+ cvSetData( matrix, _data, _step ); }
CvMatrix( CvMat* m )
{ matrix = m; }
addref();
}
- void create( int rows, int cols, int type )
+ void create( int _rows, int _cols, int _type )
{
if( !matrix || !matrix->refcount ||
- matrix->rows != rows || matrix->cols != cols ||
- CV_MAT_TYPE(matrix->type) != type )
- set( cvCreateMat( rows, cols, type ), false );
+ matrix->rows != _rows || matrix->cols != _cols ||
+ CV_MAT_TYPE(matrix->type) != _type )
+ set( cvCreateMat( _rows, _cols, _type ), false );
}
void addref() const
const uchar* data() const { return matrix ? matrix->data.ptr : 0; }
int step() const { return matrix ? matrix->step : 0; }
- void set_data( void* data, int step=CV_AUTOSTEP )
- { cvSetData( matrix, data, step ); }
+ void set_data( void* _data, int _step=CV_AUTOSTEP )
+ { cvSetData( matrix, _data, _step ); }
uchar* row(int i) { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; }
const uchar* row(int i) const
: x(0), y(0), image(NULL)
{}
- BaseKeypoint(int x, int y, IplImage* image)
- : x(x), y(y), image(image)
+ BaseKeypoint(int _x, int _y, IplImage* _image)
+ : x(_x), y(_y), image(_image)
{}
};
class CV_EXPORTS BruteForceMatcher : public BFMatcher
{
public:
- BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {}
+ BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {(void)d;}
virtual ~BruteForceMatcher() {}
};
CvBGCodeBookElem* freeList;
} CvBGCodeBookModel;
-CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel();
+CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel( void );
CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model );
CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,
#include "precomp.hpp"
-#if _MSC_VER >= 1200
-#pragma warning(disable:4786) // Disable MSVC warnings in the standard library.
-#pragma warning(disable:4100)
-#pragma warning(disable:4512)
-#endif
#include <stdio.h>
#include <map>
#include <algorithm>
-#if _MSC_VER >= 1200
-#pragma warning(default:4100)
-#pragma warning(default:4512)
-#endif
#define ARRAY_SIZEOF(a) (sizeof(a)/sizeof((a)[0]))
cvReleaseImage(&gray_img);
CV_CALL(gray_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
}
-
+
CV_CALL(cvCvtColor(samples[c], gray_img, CV_BGR2GRAY));
img = gray_img;
etalon_size, points, &count) != 0;
if (count == 0)
continue;
-
+
// If found is true, it means all the points were found (count = num_points).
// If found is false but count is non-zero, it means that not all points were found.
{ 0.f, 1.f, 0.f, 0.f },
{ 0.f, 0.f, 1.f, 0.f },
{ transVect[0], transVect[1], transVect[2], 1.f } };
-
+
float rmat[4][4] = { { rotMatr[0], rotMatr[1], rotMatr[2], 0.f },
{ rotMatr[3], rotMatr[4], rotMatr[5], 0.f },
{ rotMatr[6], rotMatr[7], rotMatr[8], 0.f },
MultMatrix(camera_info[c].mat, tmat, rmat);
- // change the transformation of the cameras to put them in the world coordinate
+ // change the transformation of the cameras to put them in the world coordinate
// system we want to work with.
// Start with an identity matrix; then fill in the values to accomplish
#include "assert.h"
#include "math.h"
-#if _MSC_VER >= 1400
-#pragma warning(disable: 4512) // suppress "assignment operator could not be generated"
-#endif
-
-// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
-// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
-// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
+// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
+// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
+// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
#undef __deref
#undef __valuetype
private:
struct node {
- int dim; // split dimension; >=0 for nodes, -1 for leaves
- __valuetype value; // if leaf, value of leaf
- int left, right; // node indices of left and right branches
- scalar_type boundary; // left if deref(value,dim)<=boundary, otherwise right
+ int dim; // split dimension; >=0 for nodes, -1 for leaves
+ __valuetype value; // if leaf, value of leaf
+ int left, right; // node indices of left and right branches
+ scalar_type boundary; // left if deref(value,dim)<=boundary, otherwise right
};
typedef std::vector < node > node_array;
- __deref deref; // requires operator() (__valuetype lhs,int dim)
+ __deref deref; // requires operator() (__valuetype lhs,int dim)
- node_array nodes; // node storage
- int point_dim; // dimension of points (the k in kd-tree)
- int root_node; // index of root node, -1 if empty tree
+ node_array nodes; // node storage
+ int point_dim; // dimension of points (the k in kd-tree)
+ int root_node; // index of root node, -1 if empty tree
// for given set of point indices, compute dimension of highest variance
template < class __instype, class __valuector >
int dimension_of_highest_variance(__instype * first, __instype * last,
- __valuector ctor) {
+ __valuector ctor) {
assert(last - first > 0);
accum_type maxvar = -std::numeric_limits < accum_type >::max();
for (int j = 0; j < point_dim; ++j) {
accum_type mean = 0;
for (__instype * k = first; k < last; ++k)
- mean += deref(ctor(*k), j);
+ mean += deref(ctor(*k), j);
mean /= last - first;
accum_type var = 0;
for (__instype * k = first; k < last; ++k) {
- accum_type diff = accum_type(deref(ctor(*k), j)) - mean;
- var += diff * diff;
+ accum_type diff = accum_type(deref(ctor(*k), j)) - mean;
+ var += diff * diff;
}
var /= last - first;
assert(maxj != -1 || var >= maxvar);
if (var >= maxvar) {
- maxvar = var;
- maxj = j;
+ maxvar = var;
+ maxj = j;
}
}
return maxj;
}
- // given point indices and dimension, find index of median; (almost) modifies [first,last)
+ // given point indices and dimension, find index of median; (almost) modifies [first,last)
// such that points_in[first,median]<=point[median], points_in(median,last)>point[median].
// implemented as partial quicksort; expected linear perf.
template < class __instype, class __valuector >
__instype * median_partition(__instype * first, __instype * last,
- int dim, __valuector ctor) {
+ int dim, __valuector ctor) {
assert(last - first > 0);
__instype *k = first + (last - first) / 2;
median_partition(first, last, k, dim, ctor);
bool operator() (const __instype & lhs) const {
return deref(ctor(lhs), dim) <= deref(ctor(pivot), dim);
}
+ private:
+ median_pr& operator=(const median_pr&);
};
template < class __instype, class __valuector >
- void median_partition(__instype * first, __instype * last,
- __instype * k, int dim, __valuector ctor) {
+ void median_partition(__instype * first, __instype * last,
+ __instype * k, int dim, __valuector ctor) {
int pivot = (int)((last - first) / 2);
std::swap(first[pivot], last[-1]);
__instype *middle = std::partition(first, last - 1,
- median_pr < __instype, __valuector >
- (last[-1], dim, deref, ctor));
+ median_pr < __instype, __valuector >
+ (last[-1], dim, deref, ctor));
std::swap(*middle, last[-1]);
if (middle < k)
__instype *median = median_partition(first, last, dim, ctor);
__instype *split = median;
- for (; split != last && deref(ctor(*split), dim) ==
- deref(ctor(*median), dim); ++split);
+ for (; split != last && deref(ctor(*split), dim) ==
+ deref(ctor(*median), dim); ++split);
if (split == last) { // leaf
- int nexti = -1;
- for (--split; split >= first; --split) {
- int i = (int)nodes.size();
- node & n = *nodes.insert(nodes.end(), node());
- n.dim = -1;
- n.value = ctor(*split);
- n.left = -1;
- n.right = nexti;
- nexti = i;
- }
-
- return nexti;
+ int nexti = -1;
+ for (--split; split >= first; --split) {
+ int i = (int)nodes.size();
+ node & n = *nodes.insert(nodes.end(), node());
+ n.dim = -1;
+ n.value = ctor(*split);
+ n.left = -1;
+ n.right = nexti;
+ nexti = i;
+ }
+
+ return nexti;
} else { // node
- int i = (int)nodes.size();
- // note that recursive insert may invalidate this ref
- node & n = *nodes.insert(nodes.end(), node());
+ int i = (int)nodes.size();
+ // note that recursive insert may invalidate this ref
+ node & n = *nodes.insert(nodes.end(), node());
- n.dim = dim;
- n.boundary = deref(ctor(*median), dim);
+ n.dim = dim;
+ n.boundary = deref(ctor(*median), dim);
- int left = insert(first, split, ctor);
- nodes[i].left = left;
- int right = insert(split, last, ctor);
- nodes[i].right = right;
+ int left = insert(first, split, ctor);
+ nodes[i].left = left;
+ int right = insert(split, last, ctor);
+ nodes[i].right = right;
- return i;
+ return i;
}
}
}
if (n.dim >= 0) { // node
if (deref(p, n.dim) <= n.boundary) // left
- r = remove(&n.left, p);
+ r = remove(&n.left, p);
else // right
- r = remove(&n.right, p);
+ r = remove(&n.right, p);
// if terminal, remove this node
if (n.left == -1 && n.right == -1)
- *i = -1;
+ *i = -1;
return r;
} else { // leaf
if (n.value == p) {
- *i = n.right;
- return true;
+ *i = n.right;
+ return true;
} else
- return remove(&n.right, p);
+ return remove(&n.right, p);
}
}
}
// given points, initialize a balanced tree
CvKDTree(__valuetype * first, __valuetype * last, int _point_dim,
- __deref _deref = __deref())
+ __deref _deref = __deref())
: deref(_deref) {
set_data(first, last, _point_dim, identity_ctor());
}
// given points, initialize a balanced tree
template < class __instype, class __valuector >
CvKDTree(__instype * first, __instype * last, int _point_dim,
- __valuector ctor, __deref _deref = __deref())
+ __valuector ctor, __deref _deref = __deref())
: deref(_deref) {
set_data(first, last, _point_dim, ctor);
}
}
template < class __instype, class __valuector >
void set_data(__instype * first, __instype * last, int _point_dim,
- __valuector ctor) {
+ __valuector ctor) {
point_dim = _point_dim;
nodes.clear();
nodes.reserve(last - first);
std::cout << " ";
const node & n = nodes[i];
if (n.dim >= 0) {
- std::cout << "node " << i << ", left " << nodes[i].left << ", right " <<
- nodes[i].right << ", dim " << nodes[i].dim << ", boundary " <<
- nodes[i].boundary << std::endl;
+ std::cout << "node " << i << ", left " << nodes[i].left << ", right " <<
+ nodes[i].right << ", dim " << nodes[i].dim << ", boundary " <<
+ nodes[i].boundary << std::endl;
print(n.left, indent + 3);
print(n.right, indent + 3);
} else
////////////////////////////////////////////////////////////////////////////////////////
// bbf search
public:
- struct bbf_nn { // info on found neighbors (approx k nearest)
- const __valuetype *p; // nearest neighbor
- accum_type dist; // distance from d to query point
+ struct bbf_nn { // info on found neighbors (approx k nearest)
+ const __valuetype *p; // nearest neighbor
+ accum_type dist; // distance from d to query point
bbf_nn(const __valuetype & _p, accum_type _dist)
: p(&_p), dist(_dist) {
}
};
typedef std::vector < bbf_nn > bbf_nn_pqueue;
private:
- struct bbf_node { // info on branches not taken
- int node; // corresponding node
- accum_type dist; // minimum distance from bounds to query point
+ struct bbf_node { // info on branches not taken
+ int node; // corresponding node
+ accum_type dist; // minimum distance from bounds to query point
bbf_node(int _node, accum_type _dist)
: node(_node), dist(_dist) {
}
int bbf_branch(int i, const __desctype * d, bbf_pqueue & pq) const {
const node & n = nodes[i];
// push bbf_node with bounds of alternate branch, then branch
- if (d[n.dim] <= n.boundary) { // left
+ if (d[n.dim] <= n.boundary) { // left
pq_alternate(n.right, pq, n.boundary - d[n.dim]);
return n.left;
- } else { // right
+ } else { // right
pq_alternate(n.left, pq, d[n.dim] - n.boundary);
return n.right;
}
}
// called per candidate nearest neighbor; constructs new bbf_nn for
- // candidate and adds it to priority queue of all candidates; if
+ // candidate and adds it to priority queue of all candidates; if
// queue len exceeds k, drops the point furthest from query point d.
template < class __desctype >
- void bbf_new_nn(bbf_nn_pqueue & nn_pq, int k,
- const __desctype * d, const __valuetype & p) const {
+ void bbf_new_nn(bbf_nn_pqueue & nn_pq, int k,
+ const __desctype * d, const __valuetype & p) const {
bbf_nn nn(p, distance(d, p));
if ((int) nn_pq.size() < k) {
nn_pq.push_back(nn);
}
public:
- // finds (with high probability) the k nearest neighbors of d,
+ // finds (with high probability) the k nearest neighbors of d,
// searching at most emax leaves/bins.
- // ret_nn_pq is an array containing the (at most) k nearest neighbors
+ // ret_nn_pq is an array containing the (at most) k nearest neighbors
// (see bbf_nn structure def above).
template < class __desctype >
- int find_nn_bbf(const __desctype * d,
- int k, int emax,
- bbf_nn_pqueue & ret_nn_pq) const {
+ int find_nn_bbf(const __desctype * d,
+ int k, int emax,
+ bbf_nn_pqueue & ret_nn_pq) const {
assert(k > 0);
ret_nn_pq.clear();
int i;
for (i = bbf.node;
- i != -1 && nodes[i].dim >= 0;
- i = bbf_branch(i, d, tmp_pq));
+ i != -1 && nodes[i].dim >= 0;
+ i = bbf_branch(i, d, tmp_pq));
if (i != -1) {
- // add points in leaf/bin to ret_nn_pq
- do {
- bbf_new_nn(ret_nn_pq, k, d, nodes[i].value);
- } while (-1 != (i = nodes[i].right));
+ // add points in leaf/bin to ret_nn_pq
+ do {
+ bbf_new_nn(ret_nn_pq, k, d, nodes[i].value);
+ } while (-1 != (i = nodes[i].right));
- --emax;
+ --emax;
}
}
// orthogonal range search
private:
void find_ortho_range(int i, scalar_type * bounds_min,
- scalar_type * bounds_max,
- std::vector < __valuetype > &inbounds) const {
+ scalar_type * bounds_max,
+ std::vector < __valuetype > &inbounds) const {
if (i == -1)
return;
const node & n = nodes[i];
if (n.dim >= 0) { // node
if (bounds_min[n.dim] <= n.boundary)
- find_ortho_range(n.left, bounds_min, bounds_max, inbounds);
+ find_ortho_range(n.left, bounds_min, bounds_max, inbounds);
if (bounds_max[n.dim] > n.boundary)
- find_ortho_range(n.right, bounds_min, bounds_max, inbounds);
+ find_ortho_range(n.right, bounds_min, bounds_max, inbounds);
} else { // leaf
do {
- inbounds.push_back(nodes[i].value);
+ inbounds.push_back(nodes[i].value);
} while (-1 != (i = nodes[i].right));
}
}
public:
// return all points that lie within the given bounds; inbounds is cleared
int find_ortho_range(scalar_type * bounds_min,
- scalar_type * bounds_max,
- std::vector < __valuetype > &inbounds) const {
+ scalar_type * bounds_max,
+ std::vector < __valuetype > &inbounds) const {
inbounds.clear();
find_ortho_range(root_node, bounds_min, bounds_max, inbounds);
return (int)inbounds.size();
typedef struct DefBlobFVN
{
- CvBlob blob;
- CvBlob BlobSeq[BLOB_NUM];
- int state;
- int LastFrame;
- int FrameNum;
+ CvBlob blob;
+ CvBlob BlobSeq[BLOB_NUM];
+ int state;
+ int LastFrame;
+ int FrameNum;
} DefBlobFVN;
class CvBlobTrackFVGenN: public CvBlobTrackFVGen
{
private:
- CvBlobSeq m_BlobList;
- CvMemStorage* m_pMem;
- CvSeq* m_pFVSeq;
- float m_FVMax[MAX_FV_SIZE];
- float m_FVMin[MAX_FV_SIZE];
- float m_FVVar[MAX_FV_SIZE];
- int m_Dim;
- CvBlob m_BlobSeq[BLOB_NUM];
- int m_Frame;
- int m_State;
- int m_LastFrame;
- int m_ClearFlag;
- void Clear()
- {
- if(m_pMem)
- {
- cvClearMemStorage(m_pMem);
- m_pFVSeq = cvCreateSeq(0,sizeof(CvSeq),sizeof(float)*(m_Dim+1), m_pMem);
- m_ClearFlag = 1;
- }
- }
+ CvBlobSeq m_BlobList;
+ CvMemStorage* m_pMem;
+ CvSeq* m_pFVSeq;
+ float m_FVMax[MAX_FV_SIZE];
+ float m_FVMin[MAX_FV_SIZE];
+ float m_FVVar[MAX_FV_SIZE];
+ int m_Dim;
+ CvBlob m_BlobSeq[BLOB_NUM];
+ int m_Frame;
+ int m_State;
+ int m_LastFrame;
+ int m_ClearFlag;
+ void Clear()
+ {
+ if(m_pMem)
+ {
+ cvClearMemStorage(m_pMem);
+ m_pFVSeq = cvCreateSeq(0,sizeof(CvSeq),sizeof(float)*(m_Dim+1), m_pMem);
+ m_ClearFlag = 1;
+ }
+ }
public:
- CvBlobTrackFVGenN(int dim = 2 ):m_BlobList(sizeof(DefBlobFVN))
- {
- int i;
- assert(dim <= MAX_FV_SIZE);
- m_Dim = dim;
- for(i=0; i<m_Dim; ++i)
- {
- m_FVVar[i] = 0.01f;
- m_FVMax[i] = 1;
- m_FVMin[i] = 0;
- }
- m_Frame = 0;
- m_State = 0;
- m_pMem = cvCreateMemStorage();
- m_pFVSeq = NULL;
- Clear();
-
- switch(dim) {
- case 2: SetModuleName("P"); break;
- case 4: SetModuleName("PV"); break;
- case 5: SetModuleName("PVS"); break;
- }
- };
-
- ~CvBlobTrackFVGenN()
- {
- if(m_pMem)cvReleaseMemStorage(&m_pMem);
- };
-
- void AddBlob(CvBlob* pBlob)
- {
- float FV[MAX_FV_SIZE+1];
- int i;
- DefBlobFVN* pFVBlob = (DefBlobFVN*)m_BlobList.GetBlobByID(CV_BLOB_ID(pBlob));
-
- if(!m_ClearFlag) Clear();
-
- if(pFVBlob==NULL)
- {
- DefBlobFVN BlobNew;
- BlobNew.blob = pBlob[0];
- BlobNew.LastFrame = m_Frame;
- BlobNew.state = 0;;
- BlobNew.FrameNum = 0;
- m_BlobList.AddBlob((CvBlob*)&BlobNew);
- pFVBlob = (DefBlobFVN*)m_BlobList.GetBlobByID(CV_BLOB_ID(pBlob));
- } /* Add new record if necessary. */
-
- pFVBlob->blob = pBlob[0];
-
- /* Shift: */
- for(i=(BLOB_NUM-1); i>0; --i)
- {
- pFVBlob->BlobSeq[i] = pFVBlob->BlobSeq[i-1];
- }
-
- pFVBlob->BlobSeq[0] = pBlob[0];
-
- if(m_Dim>0)
- { /* Calculate FV position: */
- FV[0] = CV_BLOB_X(pBlob);
- FV[1] = CV_BLOB_Y(pBlob);
- }
-
- if(m_Dim<=2)
- { /* Add new FV if position is enough: */
- *(int*)(FV+m_Dim) = CV_BLOB_ID(pBlob);
- cvSeqPush( m_pFVSeq, FV );
- }
- else if(pFVBlob->FrameNum > BLOB_NUM)
- { /* Calculate velocity for more complex FV: */
- float AverVx = 0;
- float AverVy = 0;
- { /* Average velocity: */
- CvBlob* pBlobSeq = pFVBlob->BlobSeq;
- int i;
- for(i=1;i<BLOB_NUM;++i)
- {
- AverVx += CV_BLOB_X(pBlobSeq+i-1)-CV_BLOB_X(pBlobSeq+i);
- AverVy += CV_BLOB_Y(pBlobSeq+i-1)-CV_BLOB_Y(pBlobSeq+i);
- }
- AverVx /= BLOB_NUM-1;
- AverVy /= BLOB_NUM-1;
-
- FV[2] = AverVx;
- FV[3] = AverVy;
- }
-
- if(m_Dim>4)
- { /* State duration: */
- float T = (CV_BLOB_WX(pBlob)+CV_BLOB_WY(pBlob))*0.01f;
-
- if( fabs(AverVx) < T && fabs(AverVy) < T)
- pFVBlob->state++;
- else
- pFVBlob->state=0;
- FV[4] = (float)pFVBlob->state;
- } /* State duration. */
-
- /* Add new FV: */
- *(int*)(FV+m_Dim) = CV_BLOB_ID(pBlob);
- cvSeqPush( m_pFVSeq, FV );
-
- } /* If velocity is calculated. */
-
- pFVBlob->FrameNum++;
- pFVBlob->LastFrame = m_Frame;
- }; /* AddBlob */
-
- void Process(IplImage* pImg, IplImage* /*pFG*/)
- {
- int i;
- if(!m_ClearFlag) Clear();
- for(i=m_BlobList.GetBlobNum(); i>0; --i)
- { /* Delete unused blob: */
- DefBlobFVN* pFVBlob = (DefBlobFVN*)m_BlobList.GetBlob(i-1);
- if(pFVBlob->LastFrame < m_Frame)
- {
- m_BlobList.DelBlob(i-1);
- }
- } /* Check next blob in list. */
-
- m_FVMin[0] = 0;
- m_FVMin[1] = 0;
- m_FVMax[0] = (float)(pImg->width-1);
- m_FVMax[1] = (float)(pImg->height-1);
- m_FVVar[0] = m_FVMax[0]*0.01f;
- m_FVVar[1] = m_FVMax[1]*0.01f;
- m_FVVar[2] = (float)(pImg->width-1)/1440.0f;
- m_FVMax[2] = (float)(pImg->width-1)*0.02f;
- m_FVMin[2] = -m_FVMax[2];
- m_FVVar[3] = (float)(pImg->width-1)/1440.0f;
- m_FVMax[3] = (float)(pImg->height-1)*0.02f;
- m_FVMin[3] = -m_FVMax[3];
- m_FVMax[4] = 25*32.0f; /* max state is 32 sec */
- m_FVMin[4] = 0;
- m_FVVar[4] = 10;
-
- m_Frame++;
- m_ClearFlag = 0;
- };
- virtual void Release(){delete this;};
- virtual int GetFVSize(){return m_Dim;};
- virtual int GetFVNum()
- {
- return m_pFVSeq->total;
- };
-
- virtual float* GetFV(int index, int* pFVID)
- {
- float* pFV = (float*)cvGetSeqElem( m_pFVSeq, index );
- if(pFVID)pFVID[0] = *(int*)(pFV+m_Dim);
- return pFV;
- };
- virtual float* GetFVMin(){return m_FVMin;}; /* returned pointer to array of minimal values of FV, if return 0 then FVrange is not exist */
- virtual float* GetFVMax(){return m_FVMax;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
- virtual float* GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
+ CvBlobTrackFVGenN(int dim = 2 ):m_BlobList(sizeof(DefBlobFVN))
+ {
+ int i;
+ assert(dim <= MAX_FV_SIZE);
+ m_Dim = dim;
+ for(i=0; i<m_Dim; ++i)
+ {
+ m_FVVar[i] = 0.01f;
+ m_FVMax[i] = 1;
+ m_FVMin[i] = 0;
+ }
+ m_Frame = 0;
+ m_State = 0;
+ m_pMem = cvCreateMemStorage();
+ m_pFVSeq = NULL;
+ Clear();
+
+ switch(dim) {
+ case 2: SetModuleName("P"); break;
+ case 4: SetModuleName("PV"); break;
+ case 5: SetModuleName("PVS"); break;
+ }
+ };
+
+ ~CvBlobTrackFVGenN()
+ {
+ if(m_pMem)cvReleaseMemStorage(&m_pMem);
+ };
+
+ void AddBlob(CvBlob* pBlob)
+ {
+ float FV[MAX_FV_SIZE+1];
+ DefBlobFVN* pFVBlob = (DefBlobFVN*)m_BlobList.GetBlobByID(CV_BLOB_ID(pBlob));
+
+ if(!m_ClearFlag) Clear();
+
+ if(pFVBlob==NULL)
+ {
+ DefBlobFVN BlobNew;
+ BlobNew.blob = pBlob[0];
+ BlobNew.LastFrame = m_Frame;
+ BlobNew.state = 0;;
+ BlobNew.FrameNum = 0;
+ m_BlobList.AddBlob((CvBlob*)&BlobNew);
+ pFVBlob = (DefBlobFVN*)m_BlobList.GetBlobByID(CV_BLOB_ID(pBlob));
+ } /* Add new record if necessary. */
+
+ pFVBlob->blob = pBlob[0];
+
+ /* Shift: */
+ for(int i=(BLOB_NUM-1); i>0; --i)
+ {
+ pFVBlob->BlobSeq[i] = pFVBlob->BlobSeq[i-1];
+ }
+
+ pFVBlob->BlobSeq[0] = pBlob[0];
+
+ if(m_Dim>0)
+ { /* Calculate FV position: */
+ FV[0] = CV_BLOB_X(pBlob);
+ FV[1] = CV_BLOB_Y(pBlob);
+ }
+
+ if(m_Dim<=2)
+ { /* Add new FV if position is enough: */
+ *(int*)(FV+m_Dim) = CV_BLOB_ID(pBlob);
+ cvSeqPush( m_pFVSeq, FV );
+ }
+ else if(pFVBlob->FrameNum > BLOB_NUM)
+ { /* Calculate velocity for more complex FV: */
+ float AverVx = 0;
+ float AverVy = 0;
+ { /* Average velocity: */
+ CvBlob* pBlobSeq = pFVBlob->BlobSeq;
+ for(int i=1;i<BLOB_NUM;++i)
+ {
+ AverVx += CV_BLOB_X(pBlobSeq+i-1)-CV_BLOB_X(pBlobSeq+i);
+ AverVy += CV_BLOB_Y(pBlobSeq+i-1)-CV_BLOB_Y(pBlobSeq+i);
+ }
+ AverVx /= BLOB_NUM-1;
+ AverVy /= BLOB_NUM-1;
+
+ FV[2] = AverVx;
+ FV[3] = AverVy;
+ }
+
+ if(m_Dim>4)
+ { /* State duration: */
+ float T = (CV_BLOB_WX(pBlob)+CV_BLOB_WY(pBlob))*0.01f;
+
+ if( fabs(AverVx) < T && fabs(AverVy) < T)
+ pFVBlob->state++;
+ else
+ pFVBlob->state=0;
+ FV[4] = (float)pFVBlob->state;
+ } /* State duration. */
+
+ /* Add new FV: */
+ *(int*)(FV+m_Dim) = CV_BLOB_ID(pBlob);
+ cvSeqPush( m_pFVSeq, FV );
+
+ } /* If velocity is calculated. */
+
+ pFVBlob->FrameNum++;
+ pFVBlob->LastFrame = m_Frame;
+ }; /* AddBlob */
+
+ void Process(IplImage* pImg, IplImage* /*pFG*/)
+ {
+ int i;
+ if(!m_ClearFlag) Clear();
+ for(i=m_BlobList.GetBlobNum(); i>0; --i)
+ { /* Delete unused blob: */
+ DefBlobFVN* pFVBlob = (DefBlobFVN*)m_BlobList.GetBlob(i-1);
+ if(pFVBlob->LastFrame < m_Frame)
+ {
+ m_BlobList.DelBlob(i-1);
+ }
+ } /* Check next blob in list. */
+
+ m_FVMin[0] = 0;
+ m_FVMin[1] = 0;
+ m_FVMax[0] = (float)(pImg->width-1);
+ m_FVMax[1] = (float)(pImg->height-1);
+ m_FVVar[0] = m_FVMax[0]*0.01f;
+ m_FVVar[1] = m_FVMax[1]*0.01f;
+ m_FVVar[2] = (float)(pImg->width-1)/1440.0f;
+ m_FVMax[2] = (float)(pImg->width-1)*0.02f;
+ m_FVMin[2] = -m_FVMax[2];
+ m_FVVar[3] = (float)(pImg->width-1)/1440.0f;
+ m_FVMax[3] = (float)(pImg->height-1)*0.02f;
+ m_FVMin[3] = -m_FVMax[3];
+ m_FVMax[4] = 25*32.0f; /* max state is 32 sec */
+ m_FVMin[4] = 0;
+ m_FVVar[4] = 10;
+
+ m_Frame++;
+ m_ClearFlag = 0;
+ };
+ virtual void Release(){delete this;};
+ virtual int GetFVSize(){return m_Dim;};
+ virtual int GetFVNum()
+ {
+ return m_pFVSeq->total;
+ };
+
+ virtual float* GetFV(int index, int* pFVID)
+ {
+ float* pFV = (float*)cvGetSeqElem( m_pFVSeq, index );
+ if(pFVID)pFVID[0] = *(int*)(pFV+m_Dim);
+ return pFV;
+ };
+ virtual float* GetFVMin(){return m_FVMin;}; /* returned pointer to array of minimal values of FV, if return 0 then FVrange is not exist */
+ virtual float* GetFVMax(){return m_FVMax;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
+ virtual float* GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
};/* CvBlobTrackFVGenN */
-CvBlobTrackFVGen* cvCreateFVGenP(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(2);}
-CvBlobTrackFVGen* cvCreateFVGenPV(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(4);}
-CvBlobTrackFVGen* cvCreateFVGenPVS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(5);}
+inline CvBlobTrackFVGen* cvCreateFVGenP(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(2);}
+inline CvBlobTrackFVGen* cvCreateFVGenPV(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(4);}
+inline CvBlobTrackFVGen* cvCreateFVGenPVS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(5);}
#undef MAX_FV_SIZE
#define MAX_FV_SIZE 4
class CvBlobTrackFVGenSS: public CvBlobTrackFVGen
{
private:
- CvBlobSeq m_BlobList;
- CvMemStorage* m_pMem;
- CvSeq* m_pFVSeq;
- float m_FVMax[MAX_FV_SIZE];
- float m_FVMin[MAX_FV_SIZE];
- float m_FVVar[MAX_FV_SIZE];
- int m_Dim;
- CvBlob m_BlobSeq[BLOB_NUM];
- int m_Frame;
- int m_State;
- int m_LastFrame;
- int m_ClearFlag;
- void Clear()
- {
- cvClearMemStorage(m_pMem);
- m_pFVSeq = cvCreateSeq(0,sizeof(CvSeq),sizeof(float)*(m_Dim+1), m_pMem);
- m_ClearFlag = 1;
- }
+ CvBlobSeq m_BlobList;
+ CvMemStorage* m_pMem;
+ CvSeq* m_pFVSeq;
+ float m_FVMax[MAX_FV_SIZE];
+ float m_FVMin[MAX_FV_SIZE];
+ float m_FVVar[MAX_FV_SIZE];
+ int m_Dim;
+ CvBlob m_BlobSeq[BLOB_NUM];
+ int m_Frame;
+ int m_State;
+ int m_LastFrame;
+ int m_ClearFlag;
+ void Clear()
+ {
+ cvClearMemStorage(m_pMem);
+ m_pFVSeq = cvCreateSeq(0,sizeof(CvSeq),sizeof(float)*(m_Dim+1), m_pMem);
+ m_ClearFlag = 1;
+ }
public:
- CvBlobTrackFVGenSS(int dim = 2 ):m_BlobList(sizeof(DefBlobFVN))
- {
- int i;
- assert(dim <= MAX_FV_SIZE);
- m_Dim = dim;
- for(i=0;i<m_Dim;++i)
- {
- m_FVVar[i] = 0.01f;
- m_FVMax[i] = 1;
- m_FVMin[i] = 0;
- }
- m_Frame = 0;
- m_State = 0;
- m_pMem = cvCreateMemStorage();
- m_pFVSeq = NULL;
-
- SetModuleName("SS");
- };
- ~CvBlobTrackFVGenSS()
- {
- if(m_pMem)cvReleaseMemStorage(&m_pMem);
- };
-
- void AddBlob(CvBlob* pBlob)
- {
- //float FV[MAX_FV_SIZE+1];
- int i;
- DefBlobFVN* pFVBlob = (DefBlobFVN*)m_BlobList.GetBlobByID(CV_BLOB_ID(pBlob));
-
- if(!m_ClearFlag) Clear();
-
- if(pFVBlob==NULL)
- {
- DefBlobFVN BlobNew;
- BlobNew.blob = pBlob[0];
- BlobNew.LastFrame = m_Frame;
- BlobNew.state = 0;;
- BlobNew.FrameNum = 0;
- m_BlobList.AddBlob((CvBlob*)&BlobNew);
- pFVBlob = (DefBlobFVN*)m_BlobList.GetBlobByID(CV_BLOB_ID(pBlob));
- } /* Add new record if necessary. */
-
- /* Shift: */
- for(i=(BLOB_NUM-1); i>0; --i)
- {
- pFVBlob->BlobSeq[i] = pFVBlob->BlobSeq[i-1];
- }
-
- pFVBlob->BlobSeq[0] = pBlob[0];
-
- if(pFVBlob->FrameNum > BLOB_NUM)
- { /* Average velocity: */
- CvBlob* pBlobSeq = pFVBlob->BlobSeq;
- float T = (CV_BLOB_WX(pBlob)+CV_BLOB_WY(pBlob))*0.01f;
- float AverVx = 0;
- float AverVy = 0;
- int i;
- for(i=1; i<BLOB_NUM; ++i)
- {
- AverVx += CV_BLOB_X(pBlobSeq+i-1)-CV_BLOB_X(pBlobSeq+i);
- AverVy += CV_BLOB_Y(pBlobSeq+i-1)-CV_BLOB_Y(pBlobSeq+i);
- }
- AverVx /= BLOB_NUM-1;
- AverVy /= BLOB_NUM-1;
-
- if( fabs(AverVx) < T && fabs(AverVy) < T)
- pFVBlob->state++;
- else
- pFVBlob->state=0;
- }
-
- if(pFVBlob->state == 5)
- { /* Object is stopped: */
- float FV[MAX_FV_SIZE];
- FV[0] = pFVBlob->blob.x;
- FV[1] = pFVBlob->blob.y;
- FV[2] = pFVBlob->BlobSeq[0].x;
- FV[3] = pFVBlob->BlobSeq[0].y;
- *(int*)(FV+m_Dim) = CV_BLOB_ID(pBlob);
- cvSeqPush( m_pFVSeq, FV );
- } /* Object is stopped. */
-
- pFVBlob->FrameNum++;
- pFVBlob->LastFrame = m_Frame;
- }; /* AddBlob */
- void Process(IplImage* pImg, IplImage* /*pFG*/)
- {
- int i;
-
- if(!m_ClearFlag) Clear();
-
- for(i=m_BlobList.GetBlobNum();i>0;--i)
- { /* Delete unused blob: */
- DefBlobFVN* pFVBlob = (DefBlobFVN*)m_BlobList.GetBlob(i-1);
- if(pFVBlob->LastFrame < m_Frame)
- {
- float FV[MAX_FV_SIZE+1];
- FV[0] = pFVBlob->blob.x;
- FV[1] = pFVBlob->blob.y;
- FV[2] = pFVBlob->BlobSeq[0].x;
- FV[3] = pFVBlob->BlobSeq[0].y;
- *(int*)(FV+m_Dim) = CV_BLOB_ID(pFVBlob);
- cvSeqPush( m_pFVSeq, FV );
- m_BlobList.DelBlob(i-1);
- }
- } /* Check next blob in list. */
-
- /* Set max min range: */
- m_FVMin[0] = 0;
- m_FVMin[1] = 0;
- m_FVMin[2] = 0;
- m_FVMin[3] = 0;
- m_FVMax[0] = (float)(pImg->width-1);
- m_FVMax[1] = (float)(pImg->height-1);
- m_FVMax[2] = (float)(pImg->width-1);
- m_FVMax[3] = (float)(pImg->height-1);
- m_FVVar[0] = m_FVMax[0]*0.01f;
- m_FVVar[1] = m_FVMax[1]*0.01f;
- m_FVVar[2] = m_FVMax[2]*0.01f;
- m_FVVar[3] = m_FVMax[3]*0.01f;
-
- m_Frame++;
- m_ClearFlag = 0;
- };
- virtual void Release(){delete this;};
- virtual int GetFVSize(){return m_Dim;};
- virtual int GetFVNum()
- {
- return m_pFVSeq->total;
- };
-
- virtual float* GetFV(int index, int* pFVID)
- {
- float* pFV = (float*)cvGetSeqElem( m_pFVSeq, index );
- if(pFVID)pFVID[0] = *(int*)(pFV+m_Dim);
- return pFV;
- };
-
- virtual float* GetFVMin(){return m_FVMin;}; /* returned pointer to array of minimal values of FV, if return 0 then FVrange is not exist */
- virtual float* GetFVMax(){return m_FVMax;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
- virtual float* GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
+ CvBlobTrackFVGenSS(int dim = 2 ):m_BlobList(sizeof(DefBlobFVN))
+ {
+ int i;
+ assert(dim <= MAX_FV_SIZE);
+ m_Dim = dim;
+ for(i=0;i<m_Dim;++i)
+ {
+ m_FVVar[i] = 0.01f;
+ m_FVMax[i] = 1;
+ m_FVMin[i] = 0;
+ }
+ m_Frame = 0;
+ m_State = 0;
+ m_pMem = cvCreateMemStorage();
+ m_pFVSeq = NULL;
+
+ SetModuleName("SS");
+ };
+ ~CvBlobTrackFVGenSS()
+ {
+ if(m_pMem)cvReleaseMemStorage(&m_pMem);
+ };
+
+ void AddBlob(CvBlob* pBlob)
+ {
+ //float FV[MAX_FV_SIZE+1];
+ DefBlobFVN* pFVBlob = (DefBlobFVN*)m_BlobList.GetBlobByID(CV_BLOB_ID(pBlob));
+
+ if(!m_ClearFlag) Clear();
+
+ if(pFVBlob==NULL)
+ {
+ DefBlobFVN BlobNew;
+ BlobNew.blob = pBlob[0];
+ BlobNew.LastFrame = m_Frame;
+ BlobNew.state = 0;;
+ BlobNew.FrameNum = 0;
+ m_BlobList.AddBlob((CvBlob*)&BlobNew);
+ pFVBlob = (DefBlobFVN*)m_BlobList.GetBlobByID(CV_BLOB_ID(pBlob));
+ } /* Add new record if necessary. */
+
+ /* Shift: */
+ for(int i=(BLOB_NUM-1); i>0; --i)
+ {
+ pFVBlob->BlobSeq[i] = pFVBlob->BlobSeq[i-1];
+ }
+
+ pFVBlob->BlobSeq[0] = pBlob[0];
+
+ if(pFVBlob->FrameNum > BLOB_NUM)
+ { /* Average velocity: */
+ CvBlob* pBlobSeq = pFVBlob->BlobSeq;
+ float T = (CV_BLOB_WX(pBlob)+CV_BLOB_WY(pBlob))*0.01f;
+ float AverVx = 0;
+ float AverVy = 0;
+ for(int i=1; i<BLOB_NUM; ++i)
+ {
+ AverVx += CV_BLOB_X(pBlobSeq+i-1)-CV_BLOB_X(pBlobSeq+i);
+ AverVy += CV_BLOB_Y(pBlobSeq+i-1)-CV_BLOB_Y(pBlobSeq+i);
+ }
+ AverVx /= BLOB_NUM-1;
+ AverVy /= BLOB_NUM-1;
+
+ if( fabs(AverVx) < T && fabs(AverVy) < T)
+ pFVBlob->state++;
+ else
+ pFVBlob->state=0;
+ }
+
+ if(pFVBlob->state == 5)
+ { /* Object is stopped: */
+ float FV[MAX_FV_SIZE];
+ FV[0] = pFVBlob->blob.x;
+ FV[1] = pFVBlob->blob.y;
+ FV[2] = pFVBlob->BlobSeq[0].x;
+ FV[3] = pFVBlob->BlobSeq[0].y;
+ *(int*)(FV+m_Dim) = CV_BLOB_ID(pBlob);
+ cvSeqPush( m_pFVSeq, FV );
+ } /* Object is stopped. */
+
+ pFVBlob->FrameNum++;
+ pFVBlob->LastFrame = m_Frame;
+ }; /* AddBlob */
+ void Process(IplImage* pImg, IplImage* /*pFG*/)
+ {
+ int i;
+
+ if(!m_ClearFlag) Clear();
+
+ for(i=m_BlobList.GetBlobNum();i>0;--i)
+ { /* Delete unused blob: */
+ DefBlobFVN* pFVBlob = (DefBlobFVN*)m_BlobList.GetBlob(i-1);
+ if(pFVBlob->LastFrame < m_Frame)
+ {
+ float FV[MAX_FV_SIZE+1];
+ FV[0] = pFVBlob->blob.x;
+ FV[1] = pFVBlob->blob.y;
+ FV[2] = pFVBlob->BlobSeq[0].x;
+ FV[3] = pFVBlob->BlobSeq[0].y;
+ *(int*)(FV+m_Dim) = CV_BLOB_ID(pFVBlob);
+ cvSeqPush( m_pFVSeq, FV );
+ m_BlobList.DelBlob(i-1);
+ }
+ } /* Check next blob in list. */
+
+ /* Set max min range: */
+ m_FVMin[0] = 0;
+ m_FVMin[1] = 0;
+ m_FVMin[2] = 0;
+ m_FVMin[3] = 0;
+ m_FVMax[0] = (float)(pImg->width-1);
+ m_FVMax[1] = (float)(pImg->height-1);
+ m_FVMax[2] = (float)(pImg->width-1);
+ m_FVMax[3] = (float)(pImg->height-1);
+ m_FVVar[0] = m_FVMax[0]*0.01f;
+ m_FVVar[1] = m_FVMax[1]*0.01f;
+ m_FVVar[2] = m_FVMax[2]*0.01f;
+ m_FVVar[3] = m_FVMax[3]*0.01f;
+
+ m_Frame++;
+ m_ClearFlag = 0;
+ };
+ virtual void Release(){delete this;};
+ virtual int GetFVSize(){return m_Dim;};
+ virtual int GetFVNum()
+ {
+ return m_pFVSeq->total;
+ };
+
+ virtual float* GetFV(int index, int* pFVID)
+ {
+ float* pFV = (float*)cvGetSeqElem( m_pFVSeq, index );
+ if(pFVID)pFVID[0] = *(int*)(pFV+m_Dim);
+ return pFV;
+ };
+
+ virtual float* GetFVMin(){return m_FVMin;}; /* returned pointer to array of minimal values of FV, if return 0 then FVrange is not exist */
+ virtual float* GetFVMax(){return m_FVMax;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
+ virtual float* GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
};/* CvBlobTrackFVGenSS */
-CvBlobTrackFVGen* cvCreateFVGenSS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenSS;}
+inline CvBlobTrackFVGen* cvCreateFVGenSS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenSS;}
/*======================= TRAJECTORY ANALYZER MODULES =====================*/
/* Trajectory Analyser module */
class DefMat
{
private:
- CvSparseMatIterator m_SparseIterator;
- CvSparseNode* m_pSparseNode;
- int* m_IDXs;
- int m_Dim;
+ CvSparseMatIterator m_SparseIterator;
+ CvSparseNode* m_pSparseNode;
+ int* m_IDXs;
+ int m_Dim;
public:
- CvSparseMat* m_pSparse;
- CvMatND* m_pND;
- int m_Volume;
- int m_Max;
- DefMat(int dim = 0, int* sizes = NULL, int type = SPARSE)
- {
- /* Create sparse or ND matrix but not both: */
- m_pSparseNode = NULL;
- m_pSparse = NULL;
- m_pND = NULL;
- m_Volume = 0;
- m_Max = 0;
- m_IDXs = NULL;
- m_Dim = 0;
- if(dim>0 && sizes != 0)
- Realloc(dim, sizes, type);
- }
- ~DefMat()
- {
- if(m_pSparse)cvReleaseSparseMat(&m_pSparse);
- if(m_pND)cvReleaseMatND(&m_pND);
- if(m_IDXs) cvFree(&m_IDXs);
- }
-
- void Realloc(int dim, int* sizes, int type = SPARSE)
- {
- if(m_pSparse)cvReleaseSparseMat(&m_pSparse);
- if(m_pND)cvReleaseMatND(&m_pND);
-
- if(type == BYSIZE )
- {
- int size = 0;
- int i;
- for(size=1,i=0;i<dim;++i)
- {
- size *= sizes[i];
- }
- size *= sizeof(int);
- if(size > (2<<20))
- { /* if size > 1M */
- type = SPARSE;
- }
- else
- {
- type = ND;
- }
- } /* Define matrix type. */
-
- if(type == SPARSE)
- {
- m_pSparse = cvCreateSparseMat( dim, sizes, CV_32SC1 );
- m_Dim = dim;
- }
- if(type == ND )
- {
- m_pND = cvCreateMatND( dim, sizes, CV_32SC1 );
- cvZero(m_pND);
- m_IDXs = (int*)cvAlloc(sizeof(int)*dim);
- m_Dim = dim;
- }
- m_Volume = 0;
- m_Max = 0;
- }
- void Save(const char* File)
- {
- if(m_pSparse)cvSave(File, m_pSparse );
- if(m_pND)cvSave(File, m_pND );
- }
- void Save(CvFileStorage* fs, const char* name)
- {
- if(m_pSparse)
- {
- cvWrite(fs, name, m_pSparse );
- }
- else if(m_pND)
- {
- cvWrite(fs, name, m_pND );
- }
- }
- void Load(const char* File)
- {
- CvFileStorage* fs = cvOpenFileStorage( File, NULL, CV_STORAGE_READ );
- if(fs)
- {
- void* ptr;
- if(m_pSparse) cvReleaseSparseMat(&m_pSparse);
- if(m_pND) cvReleaseMatND(&m_pND);
- m_Volume = 0;
- m_Max = 0;
- ptr = cvLoad(File);
- if(ptr && CV_IS_MATND_HDR(ptr)) m_pND = (CvMatND*)ptr;
- if(ptr && CV_IS_SPARSE_MAT_HDR(ptr)) m_pSparse = (CvSparseMat*)ptr;
- cvReleaseFileStorage(&fs);
- }
- AfterLoad();
- } /* Load. */
-
- void Load(CvFileStorage* fs, CvFileNode* node, const char* name)
- {
- CvFileNode* n = cvGetFileNodeByName(fs,node,name);
- void* ptr = n?cvRead(fs,n):NULL;
- if(ptr)
- {
- if(m_pSparse) cvReleaseSparseMat(&m_pSparse);
- if(m_pND) cvReleaseMatND(&m_pND);
- m_Volume = 0;
- m_Max = 0;
- if(CV_IS_MATND_HDR(ptr)) m_pND = (CvMatND*)ptr;
- if(CV_IS_SPARSE_MAT_HDR(ptr)) m_pSparse = (CvSparseMat*)ptr;
- }
- else
- {
- printf("WARNING!!! Can't load %s matrix\n",name);
- }
- AfterLoad();
- } /* Load. */
-
- void AfterLoad()
- {
- m_Volume = 0;
- m_Max = 0;
- if(m_pSparse)
- { /* Calculate Volume of loaded hist: */
- CvSparseMatIterator mat_iterator;
- CvSparseNode* node = cvInitSparseMatIterator( m_pSparse, &mat_iterator );
-
- for( ; node != 0; node = cvGetNextSparseNode( &mat_iterator ))
- {
- int val = *(int*)CV_NODE_VAL( m_pSparse, node ); /* get value of the element
+ CvSparseMat* m_pSparse;
+ CvMatND* m_pND;
+ int m_Volume;
+ int m_Max;
+ DefMat(int dim = 0, int* sizes = NULL, int type = SPARSE)
+ {
+ /* Create sparse or ND matrix but not both: */
+ m_pSparseNode = NULL;
+ m_pSparse = NULL;
+ m_pND = NULL;
+ m_Volume = 0;
+ m_Max = 0;
+ m_IDXs = NULL;
+ m_Dim = 0;
+ if(dim>0 && sizes != 0)
+ Realloc(dim, sizes, type);
+ }
+ ~DefMat()
+ {
+ if(m_pSparse)cvReleaseSparseMat(&m_pSparse);
+ if(m_pND)cvReleaseMatND(&m_pND);
+ if(m_IDXs) cvFree(&m_IDXs);
+ }
+
+ void Realloc(int dim, int* sizes, int type = SPARSE)
+ {
+ if(m_pSparse)cvReleaseSparseMat(&m_pSparse);
+ if(m_pND)cvReleaseMatND(&m_pND);
+
+ if(type == BYSIZE )
+ {
+ int size = 0;
+ int i;
+ for(size=1,i=0;i<dim;++i)
+ {
+ size *= sizes[i];
+ }
+ size *= sizeof(int);
+ if(size > (2<<20))
+ { /* if size > 1M */
+ type = SPARSE;
+ }
+ else
+ {
+ type = ND;
+ }
+ } /* Define matrix type. */
+
+ if(type == SPARSE)
+ {
+ m_pSparse = cvCreateSparseMat( dim, sizes, CV_32SC1 );
+ m_Dim = dim;
+ }
+ if(type == ND )
+ {
+ m_pND = cvCreateMatND( dim, sizes, CV_32SC1 );
+ cvZero(m_pND);
+ m_IDXs = (int*)cvAlloc(sizeof(int)*dim);
+ m_Dim = dim;
+ }
+ m_Volume = 0;
+ m_Max = 0;
+ }
+ void Save(const char* File)
+ {
+ if(m_pSparse)cvSave(File, m_pSparse );
+ if(m_pND)cvSave(File, m_pND );
+ }
+ void Save(CvFileStorage* fs, const char* name)
+ {
+ if(m_pSparse)
+ {
+ cvWrite(fs, name, m_pSparse );
+ }
+ else if(m_pND)
+ {
+ cvWrite(fs, name, m_pND );
+ }
+ }
+ void Load(const char* File)
+ {
+ CvFileStorage* fs = cvOpenFileStorage( File, NULL, CV_STORAGE_READ );
+ if(fs)
+ {
+ void* ptr;
+ if(m_pSparse) cvReleaseSparseMat(&m_pSparse);
+ if(m_pND) cvReleaseMatND(&m_pND);
+ m_Volume = 0;
+ m_Max = 0;
+ ptr = cvLoad(File);
+ if(ptr && CV_IS_MATND_HDR(ptr)) m_pND = (CvMatND*)ptr;
+ if(ptr && CV_IS_SPARSE_MAT_HDR(ptr)) m_pSparse = (CvSparseMat*)ptr;
+ cvReleaseFileStorage(&fs);
+ }
+ AfterLoad();
+ } /* Load. */
+
+ void Load(CvFileStorage* fs, CvFileNode* node, const char* name)
+ {
+ CvFileNode* n = cvGetFileNodeByName(fs,node,name);
+ void* ptr = n?cvRead(fs,n):NULL;
+ if(ptr)
+ {
+ if(m_pSparse) cvReleaseSparseMat(&m_pSparse);
+ if(m_pND) cvReleaseMatND(&m_pND);
+ m_Volume = 0;
+ m_Max = 0;
+ if(CV_IS_MATND_HDR(ptr)) m_pND = (CvMatND*)ptr;
+ if(CV_IS_SPARSE_MAT_HDR(ptr)) m_pSparse = (CvSparseMat*)ptr;
+ }
+ else
+ {
+ printf("WARNING!!! Can't load %s matrix\n",name);
+ }
+ AfterLoad();
+ } /* Load. */
+
+ void AfterLoad()
+ {
+ m_Volume = 0;
+ m_Max = 0;
+ if(m_pSparse)
+ { /* Calculate Volume of loaded hist: */
+ CvSparseMatIterator mat_iterator;
+ CvSparseNode* node = cvInitSparseMatIterator( m_pSparse, &mat_iterator );
+
+ for( ; node != 0; node = cvGetNextSparseNode( &mat_iterator ))
+ {
+ int val = *(int*)CV_NODE_VAL( m_pSparse, node ); /* get value of the element
(assume that the type is CV_32SC1) */
- m_Volume += val;
- if(m_Max < val)m_Max = val;
- }
- } /* Calculate Volume of loaded hist. */
-
- if(m_pND)
- { /* Calculate Volume of loaded hist: */
- CvMat mat;
- double max_val;
- double vol;
- cvGetMat( m_pND, &mat, NULL, 1 );
-
- vol = cvSum(&mat).val[0];
- m_Volume = cvRound(vol);
- cvMinMaxLoc( &mat, NULL, &max_val);
- m_Max = cvRound(max_val);
- /* MUST BE WRITTEN LATER */
- } /* Calculate Volume of loaded hist. */
- } /* AfterLoad. */
-
- int* GetPtr(int* indx)
- {
- if(m_pSparse) return (int*)cvPtrND( m_pSparse, indx, NULL, 1, NULL);
- if(m_pND) return (int*)cvPtrND( m_pND, indx, NULL, 1, NULL);
- return NULL;
- } /* GetPtr. */
-
- int GetVal(int* indx)
- {
- int* p = GetPtr(indx);
- if(p)return p[0];
- return -1;
- } /* GetVal. */
-
- int Add(int* indx, int val)
- {
- int NewVal;
- int* pVal = GetPtr(indx);
- if(pVal == NULL) return -1;
- pVal[0] += val;
- NewVal = pVal[0];
- m_Volume += val;
- if(m_Max < NewVal)m_Max = NewVal;
- return NewVal;
- } /* Add. */
-
- void Add(DefMat* pMatAdd)
- {
- int* pIDXS = NULL;
- int Val = 0;
- for(Val = pMatAdd->GetNext(&pIDXS, 1 );pIDXS;Val=pMatAdd->GetNext(&pIDXS, 0 ))
- {
- Add(pIDXS,Val);
- }
- } /* Add. */
-
- int SetMax(int* indx, int val)
- {
- int NewVal;
- int* pVal = GetPtr(indx);
- if(pVal == NULL) return -1;
- if(val > pVal[0])
- {
- m_Volume += val-pVal[0];
- pVal[0] = val;
- }
- NewVal = pVal[0];
- if(m_Max < NewVal)m_Max = NewVal;
- return NewVal;
- } /* Add. */
-
- int GetNext(int** pIDXS, int init = 0)
- {
- int Val = 0;
- pIDXS[0] = NULL;
- if(m_pSparse)
- {
- m_pSparseNode = (init || m_pSparseNode==NULL)?
- cvInitSparseMatIterator( m_pSparse, &m_SparseIterator ):
- cvGetNextSparseNode( &m_SparseIterator );
-
- if(m_pSparseNode)
- {
- int* pVal = (int*)CV_NODE_VAL( m_pSparse, m_pSparseNode );
- if(pVal)Val = pVal[0];
- pIDXS[0] = CV_NODE_IDX( m_pSparse, m_pSparseNode );
- }
- }/* Sparse matrix. */
-
- if(m_pND)
- {
- int i;
- if(init)
- {
- for(i=0;i<m_Dim;++i)
- {
- m_IDXs[i] = cvGetDimSize( m_pND, i )-1;
- }
- pIDXS[0] = m_IDXs;
- Val = GetVal(m_IDXs);
- }
- else
- {
- for(i=0;i<m_Dim;++i)
- {
- if((m_IDXs[i]--)>0)
- break;
- m_IDXs[i] = cvGetDimSize( m_pND, i )-1;
- }
- if(i==m_Dim)
- {
- pIDXS[0] = NULL;
- }
- else
- {
- pIDXS[0] = m_IDXs;
- Val = GetVal(m_IDXs);
- }
-
- } /* Get next ND. */
-
- } /* Sparse matrix. */
-
- return Val;
-
- }; /* GetNext. */
+ m_Volume += val;
+ if(m_Max < val)m_Max = val;
+ }
+ } /* Calculate Volume of loaded hist. */
+
+ if(m_pND)
+ { /* Calculate Volume of loaded hist: */
+ CvMat mat;
+ double max_val;
+ double vol;
+ cvGetMat( m_pND, &mat, NULL, 1 );
+
+ vol = cvSum(&mat).val[0];
+ m_Volume = cvRound(vol);
+ cvMinMaxLoc( &mat, NULL, &max_val);
+ m_Max = cvRound(max_val);
+ /* MUST BE WRITTEN LATER */
+ } /* Calculate Volume of loaded hist. */
+ } /* AfterLoad. */
+
+ int* GetPtr(int* indx)
+ {
+ if(m_pSparse) return (int*)cvPtrND( m_pSparse, indx, NULL, 1, NULL);
+ if(m_pND) return (int*)cvPtrND( m_pND, indx, NULL, 1, NULL);
+ return NULL;
+ } /* GetPtr. */
+
+ int GetVal(int* indx)
+ {
+ int* p = GetPtr(indx);
+ if(p)return p[0];
+ return -1;
+ } /* GetVal. */
+
+ int Add(int* indx, int val)
+ {
+ int NewVal;
+ int* pVal = GetPtr(indx);
+ if(pVal == NULL) return -1;
+ pVal[0] += val;
+ NewVal = pVal[0];
+ m_Volume += val;
+ if(m_Max < NewVal)m_Max = NewVal;
+ return NewVal;
+ } /* Add. */
+
+ void Add(DefMat* pMatAdd)
+ {
+ int* pIDXS = NULL;
+ int Val = 0;
+ for(Val = pMatAdd->GetNext(&pIDXS, 1 );pIDXS;Val=pMatAdd->GetNext(&pIDXS, 0 ))
+ {
+ Add(pIDXS,Val);
+ }
+ } /* Add. */
+
+ int SetMax(int* indx, int val)
+ {
+ int NewVal;
+ int* pVal = GetPtr(indx);
+ if(pVal == NULL) return -1;
+ if(val > pVal[0])
+ {
+ m_Volume += val-pVal[0];
+ pVal[0] = val;
+ }
+ NewVal = pVal[0];
+ if(m_Max < NewVal)m_Max = NewVal;
+ return NewVal;
+ } /* Add. */
+
+ int GetNext(int** pIDXS, int init = 0)
+ {
+ int Val = 0;
+ pIDXS[0] = NULL;
+ if(m_pSparse)
+ {
+ m_pSparseNode = (init || m_pSparseNode==NULL)?
+ cvInitSparseMatIterator( m_pSparse, &m_SparseIterator ):
+ cvGetNextSparseNode( &m_SparseIterator );
+
+ if(m_pSparseNode)
+ {
+ int* pVal = (int*)CV_NODE_VAL( m_pSparse, m_pSparseNode );
+ if(pVal)Val = pVal[0];
+ pIDXS[0] = CV_NODE_IDX( m_pSparse, m_pSparseNode );
+ }
+ }/* Sparse matrix. */
+
+ if(m_pND)
+ {
+ int i;
+ if(init)
+ {
+ for(i=0;i<m_Dim;++i)
+ {
+ m_IDXs[i] = cvGetDimSize( m_pND, i )-1;
+ }
+ pIDXS[0] = m_IDXs;
+ Val = GetVal(m_IDXs);
+ }
+ else
+ {
+ for(i=0;i<m_Dim;++i)
+ {
+ if((m_IDXs[i]--)>0)
+ break;
+ m_IDXs[i] = cvGetDimSize( m_pND, i )-1;
+ }
+ if(i==m_Dim)
+ {
+ pIDXS[0] = NULL;
+ }
+ else
+ {
+ pIDXS[0] = m_IDXs;
+ Val = GetVal(m_IDXs);
+ }
+
+ } /* Get next ND. */
+
+ } /* Sparse matrix. */
+
+ return Val;
+
+ }; /* GetNext. */
};
#define FV_NUM 10
#define FV_SIZE 10
typedef struct DefTrackFG
{
- CvBlob blob;
- // CvBlobTrackFVGen* pFVGen;
- int LastFrame;
- float state;
- DefMat* pHist;
+ CvBlob blob;
+ // CvBlobTrackFVGen* pFVGen;
+ int LastFrame;
+ float state;
+ DefMat* pHist;
} DefTrackFG;
class CvBlobTrackAnalysisHist : public CvBlobTrackAnalysis
{
- /*---------------- Internal functions: --------------------*/
+ /*---------------- Internal functions: --------------------*/
private:
- int m_BinNumParam;
- int m_SmoothRadius;
- const char* m_SmoothKernel;
- float m_AbnormalThreshold;
- int m_TrackNum;
- int m_Frame;
- int m_BinNum;
- char m_DataFileName[1024];
- int m_Dim;
- int* m_Sizes;
- DefMat m_HistMat;
- int m_HistVolumeSaved;
- int* m_pFVi;
- int* m_pFViVar;
- int* m_pFViVarRes;
- CvBlobSeq m_TrackFGList;
- //CvBlobTrackFVGen* (*m_CreateFVGen)();
- CvBlobTrackFVGen* m_pFVGen;
- void SaveHist()
- {
- if(m_DataFileName[0])
- {
- m_HistMat.Save(m_DataFileName);
- m_HistVolumeSaved = m_HistMat.m_Volume;
- }
- };
- void LoadHist()
- {
- if(m_DataFileName[0])m_HistMat.Load(m_DataFileName);
- m_HistVolumeSaved = m_HistMat.m_Volume;
- }
- void AllocData()
- { /* AllocData: */
- m_pFVi = (int*)cvAlloc(sizeof(int)*m_Dim);
- m_pFViVar = (int*)cvAlloc(sizeof(int)*m_Dim);
- m_pFViVarRes = (int*)cvAlloc(sizeof(int)*m_Dim);
- m_Sizes = (int*)cvAlloc(sizeof(int)*m_Dim);
-
- { /* Create init sparce matrix: */
- int i;
- for(i=0;i<m_Dim;++i)m_Sizes[i] = m_BinNum;
- m_HistMat.Realloc(m_Dim,m_Sizes,SPARSE);
- m_HistVolumeSaved = 0;
- } /* Create init sparce matrix. */
- } /* AllocData. */
-
- void FreeData()
- { /* FreeData. */
- int i;
- for(i=m_TrackFGList.GetBlobNum();i>0;--i)
- {
- //DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlob(i-1);
- // pF->pFVGen->Release();
- m_TrackFGList.DelBlob(i-1);
- }
- cvFree(&m_pFVi);
- cvFree(&m_pFViVar);
- cvFree(&m_pFViVarRes);
- cvFree(&m_Sizes);
- } /* FreeData. */
-
- virtual void ParamUpdate()
- {
- if(m_BinNum != m_BinNumParam)
- {
- FreeData();
- m_BinNum = m_BinNumParam;
- AllocData();
- }
- }
+ int m_BinNumParam;
+ int m_SmoothRadius;
+ const char* m_SmoothKernel;
+ float m_AbnormalThreshold;
+ int m_TrackNum;
+ int m_Frame;
+ int m_BinNum;
+ char m_DataFileName[1024];
+ int m_Dim;
+ int* m_Sizes;
+ DefMat m_HistMat;
+ int m_HistVolumeSaved;
+ int* m_pFVi;
+ int* m_pFViVar;
+ int* m_pFViVarRes;
+ CvBlobSeq m_TrackFGList;
+ //CvBlobTrackFVGen* (*m_CreateFVGen)();
+ CvBlobTrackFVGen* m_pFVGen;
+ void SaveHist()
+ {
+ if(m_DataFileName[0])
+ {
+ m_HistMat.Save(m_DataFileName);
+ m_HistVolumeSaved = m_HistMat.m_Volume;
+ }
+ };
+ void LoadHist()
+ {
+ if(m_DataFileName[0])m_HistMat.Load(m_DataFileName);
+ m_HistVolumeSaved = m_HistMat.m_Volume;
+ }
+ void AllocData()
+ { /* AllocData: */
+ m_pFVi = (int*)cvAlloc(sizeof(int)*m_Dim);
+ m_pFViVar = (int*)cvAlloc(sizeof(int)*m_Dim);
+ m_pFViVarRes = (int*)cvAlloc(sizeof(int)*m_Dim);
+ m_Sizes = (int*)cvAlloc(sizeof(int)*m_Dim);
+
+ { /* Create init sparce matrix: */
+ int i;
+ for(i=0;i<m_Dim;++i)m_Sizes[i] = m_BinNum;
+ m_HistMat.Realloc(m_Dim,m_Sizes,SPARSE);
+ m_HistVolumeSaved = 0;
+ } /* Create init sparce matrix. */
+ } /* AllocData. */
+
+ void FreeData()
+ { /* FreeData. */
+ int i;
+ for(i=m_TrackFGList.GetBlobNum();i>0;--i)
+ {
+ //DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlob(i-1);
+ // pF->pFVGen->Release();
+ m_TrackFGList.DelBlob(i-1);
+ }
+ cvFree(&m_pFVi);
+ cvFree(&m_pFViVar);
+ cvFree(&m_pFViVarRes);
+ cvFree(&m_Sizes);
+ } /* FreeData. */
+
+ virtual void ParamUpdate()
+ {
+ if(m_BinNum != m_BinNumParam)
+ {
+ FreeData();
+ m_BinNum = m_BinNumParam;
+ AllocData();
+ }
+ }
public:
- CvBlobTrackAnalysisHist(CvBlobTrackFVGen* (*createFVGen)()):m_TrackFGList(sizeof(DefTrackFG))
- {
- m_pFVGen = createFVGen();
- m_Dim = m_pFVGen->GetFVSize();
- m_Frame = 0;
- m_pFVi = 0;
- m_TrackNum = 0;
- m_BinNum = 32;
- m_DataFileName[0] = 0;
-
- m_AbnormalThreshold = 0.02f;
- AddParam("AbnormalThreshold",&m_AbnormalThreshold);
- CommentParam("AbnormalThreshold","If trajectory histogram value is lesst then <AbnormalThreshold*DataBaseTrackNum> then trajectory is abnormal");
-
- m_SmoothRadius = 1;
- AddParam("SmoothRadius",&m_SmoothRadius);
- CommentParam("AbnormalThreshold","Radius (in bins) for histogram smoothing");
-
- m_SmoothKernel = "L";
- AddParam("SmoothKernel",&m_SmoothKernel);
- CommentParam("SmoothKernel","L - Linear, G - Gaussian");
-
-
- m_BinNumParam = m_BinNum;
- AddParam("BinNum",&m_BinNumParam);
- CommentParam("BinNum","Number of bin for each dimention of feature vector");
-
- AllocData();
- SetModuleName("Hist");
-
- } /* Constructor. */
-
- ~CvBlobTrackAnalysisHist()
- {
- SaveHist();
- FreeData();
- m_pFVGen->Release();
- } /* Destructor. */
-
- /*----------------- Interface: --------------------*/
- virtual void AddBlob(CvBlob* pBlob)
- {
- DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(CV_BLOB_ID(pBlob));
- if(pF == NULL)
- { /* create new filter */
- DefTrackFG F;
- F.state = 0;
- F.blob = pBlob[0];
- F.LastFrame = m_Frame;
- // F.pFVGen = m_CreateFVGen();
- F.pHist = new DefMat(m_Dim,m_Sizes,SPARSE);
- m_TrackFGList.AddBlob((CvBlob*)&F);
- pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(CV_BLOB_ID(pBlob));
- }
-
- assert(pF);
- pF->blob = pBlob[0];
- pF->LastFrame = m_Frame;
- m_pFVGen->AddBlob(pBlob);
- };
- virtual void Process(IplImage* pImg, IplImage* pFG)
- {
- int i;
- m_pFVGen->Process(pImg, pFG);
- int SK = m_SmoothKernel[0];
-
- for(i=0; i<m_pFVGen->GetFVNum(); ++i)
- {
- int BlobID = 0;
- float* pFV = m_pFVGen->GetFV(i,&BlobID);
- float* pFVMax = m_pFVGen->GetFVMax();
- float* pFVMin = m_pFVGen->GetFVMin();
- DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(BlobID);
- int HistVal = 1;
-
- if(pFV==NULL) break;
-
- pF->LastFrame = m_Frame;
-
- { /* Binarize FV: */
- int j;
- for(j=0; j<m_Dim; ++j)
- {
- int index;
- float f0 = pFVMin?pFVMin[j]:0;
- float f1 = pFVMax?pFVMax[j]:1;
- assert(f1>f0);
- index = cvRound((m_BinNum-1)*(pFV[j]-f0)/(f1-f0));
- if(index<0)index=0;
- if(index>=m_BinNum)index=m_BinNum-1;
- m_pFVi[j] = index;
- }
- }
-
- HistVal = m_HistMat.GetVal(m_pFVi);/* get bin value*/
- pF->state = 0;
- { /* Calculate state: */
- float T = m_HistMat.m_Max*m_AbnormalThreshold; /* calc threshold */
-
- if(m_TrackNum>0) T = 256.0f * m_TrackNum*m_AbnormalThreshold;
- if(T>0)
- {
- pF->state = (T - HistVal)/(T*0.2f) + 0.5f;
- }
- if(pF->state<0)pF->state=0;
- if(pF->state>1)pF->state=1;
- }
-
- { /* If it is a new FV then add it to trajectory histogram: */
- int i,flag = 1;
- int r = m_SmoothRadius;
-
- // printf("BLob %3d NEW FV [", CV_BLOB_ID(pF));
- // for(i=0;i<m_Dim;++i) printf("%d,", m_pFVi[i]);
- // printf("]");
-
- for(i=0; i<m_Dim; ++i)
- {
- m_pFViVar[i]=-r;
- }
-
- while(flag)
- {
- float dist = 0;
- int HistAdd = 0;
- int i;
- int good = 1;
- for(i=0; i<m_Dim; ++i)
- {
- m_pFViVarRes[i] = m_pFVi[i]+m_pFViVar[i];
- if(m_pFViVarRes[i]<0) good= 0;
- if(m_pFViVarRes[i]>=m_BinNum) good= 0;
- dist += m_pFViVar[i]*m_pFViVar[i];
- }/* Calculate next dimension. */
-
- if(SK=='G' || SK=='g')
- {
- double dist2 = dist/(r*r);
- HistAdd = cvRound(256*exp(-dist2)); /* Hist Add for (dist=1) = 25.6*/
- }
- else if(SK=='L' || SK=='l')
- {
- dist = (float)(sqrt(dist)/(r+1));
- HistAdd = cvRound(256*(1-dist));
- }
- else
- {
- HistAdd = 255; /* Flat smoothing. */
- }
-
- if(good && HistAdd>0)
- { /* Update histogram: */
- assert(pF->pHist);
- pF->pHist->SetMax(m_pFViVarRes, HistAdd);
- } /* Update histogram. */
-
- for(i=0; i<m_Dim; ++i)
- { /* Next config: */
- if((m_pFViVar[i]++) < r)
- break;
- m_pFViVar[i] = -r;
- } /* Increase next dimension variable. */
- if(i==m_Dim)break;
- } /* Next variation. */
- } /* If new FV. */
- } /* Next FV. */
-
- { /* Check all blobs on list: */
- int i;
- for(i=m_TrackFGList.GetBlobNum(); i>0; --i)
- { /* Add histogram and delete blob from list: */
- DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlob(i-1);
- if(pF->LastFrame+3 < m_Frame && pF->pHist)
- {
- m_HistMat.Add(pF->pHist);
- delete pF->pHist;
- m_TrackNum++;
- m_TrackFGList.DelBlob(i-1);
- }
- }/* next blob */
- }
-
- m_Frame++;
-
- if(m_Wnd)
- { /* Debug output: */
- int* idxs = NULL;
- int Val = 0;
- IplImage* pI = cvCloneImage(pImg);
-
- cvZero(pI);
-
- for(Val = m_HistMat.GetNext(&idxs,1); idxs; Val=m_HistMat.GetNext(&idxs,0))
- { /* Draw all elements: */
- float vf;
- int x,y;
-
- if(!idxs) break;
- if(Val == 0) continue;
-
- vf = (float)Val/(m_HistMat.m_Max?m_HistMat.m_Max:1);
- x = cvRound((float)(pI->width-1)*(float)idxs[0] / (float)m_BinNum);
- y = cvRound((float)(pI->height-1)*(float)idxs[1] / (float)m_BinNum);
-
- cvCircle(pI, cvPoint(x,y), cvRound(vf*pI->height/(m_BinNum*2)),CV_RGB(255,0,0),CV_FILLED);
- if(m_Dim > 3)
- {
- int dx = -2*(idxs[2]-m_BinNum/2);
- int dy = -2*(idxs[3]-m_BinNum/2);
- cvLine(pI,cvPoint(x,y),cvPoint(x+dx,y+dy),CV_RGB(0,cvRound(vf*255),1));
- }
- if( m_Dim==4 &&
- m_pFVGen->GetFVMax()[0]==m_pFVGen->GetFVMax()[2] &&
- m_pFVGen->GetFVMax()[1]==m_pFVGen->GetFVMax()[3])
- {
- int x = cvRound((float)(pI->width-1)*(float)idxs[2] / (float)m_BinNum);
- int y = cvRound((float)(pI->height-1)*(float)idxs[3] / (float)m_BinNum);
- cvCircle(pI, cvPoint(x,y), cvRound(vf*pI->height/(m_BinNum*2)),CV_RGB(0,0,255),CV_FILLED);
- }
- } /* Draw all elements. */
-
- for(i=m_TrackFGList.GetBlobNum();i>0;--i)
- {
- DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlob(i-1);
- DefMat* pHist = pF?pF->pHist:NULL;
-
- if(pHist==NULL) continue;
-
- for(Val = pHist->GetNext(&idxs,1);idxs;Val=pHist->GetNext(&idxs,0))
- { /* Draw all elements: */
- float vf;
- int x,y;
-
- if(!idxs) break;
- if(Val == 0) continue;
-
- vf = (float)Val/(pHist->m_Max?pHist->m_Max:1);
- x = cvRound((float)(pI->width-1)*(float)idxs[0] / (float)m_BinNum);
- y = cvRound((float)(pI->height-1)*(float)idxs[1] / (float)m_BinNum);
-
- cvCircle(pI, cvPoint(x,y), cvRound(2*vf),CV_RGB(0,0,cvRound(255*vf)),CV_FILLED);
- if(m_Dim > 3)
- {
- int dx = -2*(idxs[2]-m_BinNum/2);
- int dy = -2*(idxs[3]-m_BinNum/2);
- cvLine(pI,cvPoint(x,y),cvPoint(x+dx,y+dy),CV_RGB(0,0,255));
- }
- if( m_Dim==4 &&
- m_pFVGen->GetFVMax()[0]==m_pFVGen->GetFVMax()[2] &&
- m_pFVGen->GetFVMax()[1]==m_pFVGen->GetFVMax()[3])
- { /* if SS feature vector */
- int x = cvRound((float)(pI->width-1)*(float)idxs[2] / (float)m_BinNum);
- int y = cvRound((float)(pI->height-1)*(float)idxs[3] / (float)m_BinNum);
- cvCircle(pI, cvPoint(x,y), cvRound(vf*pI->height/(m_BinNum*2)),CV_RGB(0,0,255),CV_FILLED);
- }
- } /* Draw all elements. */
- } /* Next track. */
-
- //cvNamedWindow("Hist",0);
- //cvShowImage("Hist", pI);
- cvReleaseImage(&pI);
- }
- };
-
- float GetState(int BlobID)
- {
- DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(BlobID);
- return pF?pF->state:0.0f;
- };
-
- /* Return 0 if trajectory is normal;
+ CvBlobTrackAnalysisHist(CvBlobTrackFVGen* (*createFVGen)()):m_TrackFGList(sizeof(DefTrackFG))
+ {
+ m_pFVGen = createFVGen();
+ m_Dim = m_pFVGen->GetFVSize();
+ m_Frame = 0;
+ m_pFVi = 0;
+ m_TrackNum = 0;
+ m_BinNum = 32;
+ m_DataFileName[0] = 0;
+
+ m_AbnormalThreshold = 0.02f;
+ AddParam("AbnormalThreshold",&m_AbnormalThreshold);
+ CommentParam("AbnormalThreshold","If trajectory histogram value is lesst then <AbnormalThreshold*DataBaseTrackNum> then trajectory is abnormal");
+
+ m_SmoothRadius = 1;
+ AddParam("SmoothRadius",&m_SmoothRadius);
+ CommentParam("AbnormalThreshold","Radius (in bins) for histogram smoothing");
+
+ m_SmoothKernel = "L";
+ AddParam("SmoothKernel",&m_SmoothKernel);
+ CommentParam("SmoothKernel","L - Linear, G - Gaussian");
+
+
+ m_BinNumParam = m_BinNum;
+ AddParam("BinNum",&m_BinNumParam);
+ CommentParam("BinNum","Number of bin for each dimention of feature vector");
+
+ AllocData();
+ SetModuleName("Hist");
+
+ } /* Constructor. */
+
+ ~CvBlobTrackAnalysisHist()
+ {
+ SaveHist();
+ FreeData();
+ m_pFVGen->Release();
+ } /* Destructor. */
+
+ /*----------------- Interface: --------------------*/
+ virtual void AddBlob(CvBlob* pBlob)
+ {
+ DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(CV_BLOB_ID(pBlob));
+ if(pF == NULL)
+ { /* create new filter */
+ DefTrackFG F;
+ F.state = 0;
+ F.blob = pBlob[0];
+ F.LastFrame = m_Frame;
+ // F.pFVGen = m_CreateFVGen();
+ F.pHist = new DefMat(m_Dim,m_Sizes,SPARSE);
+ m_TrackFGList.AddBlob((CvBlob*)&F);
+ pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(CV_BLOB_ID(pBlob));
+ }
+
+ assert(pF);
+ pF->blob = pBlob[0];
+ pF->LastFrame = m_Frame;
+ m_pFVGen->AddBlob(pBlob);
+ };
+ virtual void Process(IplImage* pImg, IplImage* pFG)
+ {
+ m_pFVGen->Process(pImg, pFG);
+ int SK = m_SmoothKernel[0];
+
+ for(int i=0; i<m_pFVGen->GetFVNum(); ++i)
+ {
+ int BlobID = 0;
+ float* pFV = m_pFVGen->GetFV(i,&BlobID);
+ float* pFVMax = m_pFVGen->GetFVMax();
+ float* pFVMin = m_pFVGen->GetFVMin();
+ DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(BlobID);
+ int HistVal = 1;
+
+ if(pFV==NULL) break;
+
+ pF->LastFrame = m_Frame;
+
+ { /* Binarize FV: */
+ int j;
+ for(j=0; j<m_Dim; ++j)
+ {
+ int index;
+ float f0 = pFVMin?pFVMin[j]:0;
+ float f1 = pFVMax?pFVMax[j]:1;
+ assert(f1>f0);
+ index = cvRound((m_BinNum-1)*(pFV[j]-f0)/(f1-f0));
+ if(index<0)index=0;
+ if(index>=m_BinNum)index=m_BinNum-1;
+ m_pFVi[j] = index;
+ }
+ }
+
+ HistVal = m_HistMat.GetVal(m_pFVi);/* get bin value*/
+ pF->state = 0;
+ { /* Calculate state: */
+ float T = m_HistMat.m_Max*m_AbnormalThreshold; /* calc threshold */
+
+ if(m_TrackNum>0) T = 256.0f * m_TrackNum*m_AbnormalThreshold;
+ if(T>0)
+ {
+ pF->state = (T - HistVal)/(T*0.2f) + 0.5f;
+ }
+ if(pF->state<0)pF->state=0;
+ if(pF->state>1)pF->state=1;
+ }
+
+ { /* If it is a new FV then add it to trajectory histogram: */
+ int flag = 1;
+ int r = m_SmoothRadius;
+
+ // printf("BLob %3d NEW FV [", CV_BLOB_ID(pF));
+ // for(i=0;i<m_Dim;++i) printf("%d,", m_pFVi[i]);
+ // printf("]");
+
+ for(int k=0; k<m_Dim; ++k)
+ {
+ m_pFViVar[k]=-r;
+ }
+
+ while(flag)
+ {
+ float dist = 0;
+ int HistAdd = 0;
+ int good = 1;
+ for(int k=0; k<m_Dim; ++k)
+ {
+ m_pFViVarRes[k] = m_pFVi[k]+m_pFViVar[k];
+ if(m_pFViVarRes[k]<0) good= 0;
+ if(m_pFViVarRes[k]>=m_BinNum) good= 0;
+ dist += m_pFViVar[k]*m_pFViVar[k];
+ }/* Calculate next dimension. */
+
+ if(SK=='G' || SK=='g')
+ {
+ double dist2 = dist/(r*r);
+ HistAdd = cvRound(256*exp(-dist2)); /* Hist Add for (dist=1) = 25.6*/
+ }
+ else if(SK=='L' || SK=='l')
+ {
+ dist = (float)(sqrt(dist)/(r+1));
+ HistAdd = cvRound(256*(1-dist));
+ }
+ else
+ {
+ HistAdd = 255; /* Flat smoothing. */
+ }
+
+ if(good && HistAdd>0)
+ { /* Update histogram: */
+ assert(pF->pHist);
+ pF->pHist->SetMax(m_pFViVarRes, HistAdd);
+ } /* Update histogram. */
+
+ int idx = 0;
+ for( ; idx<m_Dim; ++idx)
+ { /* Next config: */
+ if((m_pFViVar[idx]++) < r)
+ break;
+ m_pFViVar[idx] = -r;
+ } /* Increase next dimension variable. */
+ if(idx==m_Dim)break;
+ } /* Next variation. */
+ } /* If new FV. */
+ } /* Next FV. */
+
+ { /* Check all blobs on list: */
+ int i;
+ for(i=m_TrackFGList.GetBlobNum(); i>0; --i)
+ { /* Add histogram and delete blob from list: */
+ DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlob(i-1);
+ if(pF->LastFrame+3 < m_Frame && pF->pHist)
+ {
+ m_HistMat.Add(pF->pHist);
+ delete pF->pHist;
+ m_TrackNum++;
+ m_TrackFGList.DelBlob(i-1);
+ }
+ }/* next blob */
+ }
+
+ m_Frame++;
+
+ if(m_Wnd)
+ { /* Debug output: */
+ int* idxs = NULL;
+ int Val = 0;
+ IplImage* pI = cvCloneImage(pImg);
+
+ cvZero(pI);
+
+ for(Val = m_HistMat.GetNext(&idxs,1); idxs; Val=m_HistMat.GetNext(&idxs,0))
+ { /* Draw all elements: */
+ if(!idxs) break;
+ if(Val == 0) continue;
+
+ float vf = (float)Val/(m_HistMat.m_Max?m_HistMat.m_Max:1);
+ int x = cvRound((float)(pI->width-1)*(float)idxs[0] / (float)m_BinNum);
+ int y = cvRound((float)(pI->height-1)*(float)idxs[1] / (float)m_BinNum);
+
+ cvCircle(pI, cvPoint(x,y), cvRound(vf*pI->height/(m_BinNum*2)),CV_RGB(255,0,0),CV_FILLED);
+ if(m_Dim > 3)
+ {
+ int dx = -2*(idxs[2]-m_BinNum/2);
+ int dy = -2*(idxs[3]-m_BinNum/2);
+ cvLine(pI,cvPoint(x,y),cvPoint(x+dx,y+dy),CV_RGB(0,cvRound(vf*255),1));
+ }
+ if( m_Dim==4 &&
+ m_pFVGen->GetFVMax()[0]==m_pFVGen->GetFVMax()[2] &&
+ m_pFVGen->GetFVMax()[1]==m_pFVGen->GetFVMax()[3])
+ {
+ int x1 = cvRound((float)(pI->width-1)*(float)idxs[2] / (float)m_BinNum);
+ int y1 = cvRound((float)(pI->height-1)*(float)idxs[3] / (float)m_BinNum);
+ cvCircle(pI, cvPoint(x1,y1), cvRound(vf*pI->height/(m_BinNum*2)),CV_RGB(0,0,255),CV_FILLED);
+ }
+ } /* Draw all elements. */
+
+ for(int i=m_TrackFGList.GetBlobNum();i>0;--i)
+ {
+ DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlob(i-1);
+ DefMat* pHist = pF?pF->pHist:NULL;
+
+ if(pHist==NULL) continue;
+
+ for(Val = pHist->GetNext(&idxs,1);idxs;Val=pHist->GetNext(&idxs,0))
+ { /* Draw all elements: */
+ float vf;
+ int x,y;
+
+ if(!idxs) break;
+ if(Val == 0) continue;
+
+ vf = (float)Val/(pHist->m_Max?pHist->m_Max:1);
+ x = cvRound((float)(pI->width-1)*(float)idxs[0] / (float)m_BinNum);
+ y = cvRound((float)(pI->height-1)*(float)idxs[1] / (float)m_BinNum);
+
+ cvCircle(pI, cvPoint(x,y), cvRound(2*vf),CV_RGB(0,0,cvRound(255*vf)),CV_FILLED);
+ if(m_Dim > 3)
+ {
+ int dx = -2*(idxs[2]-m_BinNum/2);
+ int dy = -2*(idxs[3]-m_BinNum/2);
+ cvLine(pI,cvPoint(x,y),cvPoint(x+dx,y+dy),CV_RGB(0,0,255));
+ }
+ if( m_Dim==4 &&
+ m_pFVGen->GetFVMax()[0]==m_pFVGen->GetFVMax()[2] &&
+ m_pFVGen->GetFVMax()[1]==m_pFVGen->GetFVMax()[3])
+ { /* if SS feature vector */
+ int x1 = cvRound((float)(pI->width-1)*(float)idxs[2] / (float)m_BinNum);
+ int y1 = cvRound((float)(pI->height-1)*(float)idxs[3] / (float)m_BinNum);
+ cvCircle(pI, cvPoint(x1,y1), cvRound(vf*pI->height/(m_BinNum*2)),CV_RGB(0,0,255),CV_FILLED);
+ }
+ } /* Draw all elements. */
+ } /* Next track. */
+
+ //cvNamedWindow("Hist",0);
+ //cvShowImage("Hist", pI);
+ cvReleaseImage(&pI);
+ }
+ };
+
+ float GetState(int BlobID)
+ {
+ DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(BlobID);
+ return pF?pF->state:0.0f;
+ };
+
+ /* Return 0 if trajectory is normal;
rreturn >0 if trajectory abnormal. */
- virtual const char* GetStateDesc(int BlobID)
- {
- if(GetState(BlobID)>0.5) return "abnormal";
- return NULL;
- }
-
- virtual void SetFileName(char* DataBaseName)
- {
- if(m_HistMat.m_Volume!=m_HistVolumeSaved)SaveHist();
- m_DataFileName[0] = m_DataFileName[1000] = 0;
-
- if(DataBaseName)
- {
- strncpy(m_DataFileName,DataBaseName,1000);
- strcat(m_DataFileName, ".yml");
- }
- LoadHist();
- };
-
- virtual void SaveState(CvFileStorage* fs)
- {
- int b, bN = m_TrackFGList.GetBlobNum();
- cvWriteInt(fs,"BlobNum",bN);
- cvStartWriteStruct(fs,"BlobList",CV_NODE_SEQ);
-
- for(b=0; b<bN; ++b)
- {
- DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlob(b);
- cvStartWriteStruct(fs,NULL,CV_NODE_MAP);
- cvWriteStruct(fs,"Blob", &(pF->blob), "ffffi");
- cvWriteInt(fs,"LastFrame",pF->LastFrame);
- cvWriteReal(fs,"State",pF->state);
- pF->pHist->Save(fs, "Hist");
- cvEndWriteStruct(fs);
- }
- cvEndWriteStruct(fs);
- m_HistMat.Save(fs, "Hist");
- };
-
- virtual void LoadState(CvFileStorage* fs, CvFileNode* node)
- {
- CvFileNode* pBLN = cvGetFileNodeByName(fs,node,"BlobList");
-
- if(pBLN && CV_NODE_IS_SEQ(pBLN->tag))
- {
- int b, bN = pBLN->data.seq->total;
- for(b=0; b<bN; ++b)
- {
- DefTrackFG* pF = NULL;
- CvBlob Blob;
- CvFileNode* pBN = (CvFileNode*)cvGetSeqElem(pBLN->data.seq,b);
-
- assert(pBN);
- cvReadStructByName(fs, pBN, "Blob", &Blob, "ffffi");
- AddBlob(&Blob);
- pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(Blob.ID);
- if(pF==NULL) continue;
- assert(pF);
- pF->state = (float)cvReadIntByName(fs,pBN,"State",cvRound(pF->state));
- assert(pF->pHist);
- pF->pHist->Load(fs,pBN,"Hist");
- }
- }
-
- m_HistMat.Load(fs, node, "Hist");
- }; /* LoadState */
-
-
- virtual void Release(){ delete this; };
+ virtual const char* GetStateDesc(int BlobID)
+ {
+ if(GetState(BlobID)>0.5) return "abnormal";
+ return NULL;
+ }
+
+ virtual void SetFileName(char* DataBaseName)
+ {
+ if(m_HistMat.m_Volume!=m_HistVolumeSaved)SaveHist();
+ m_DataFileName[0] = m_DataFileName[1000] = 0;
+
+ if(DataBaseName)
+ {
+ strncpy(m_DataFileName,DataBaseName,1000);
+ strcat(m_DataFileName, ".yml");
+ }
+ LoadHist();
+ };
+
+ virtual void SaveState(CvFileStorage* fs)
+ {
+ int b, bN = m_TrackFGList.GetBlobNum();
+ cvWriteInt(fs,"BlobNum",bN);
+ cvStartWriteStruct(fs,"BlobList",CV_NODE_SEQ);
+
+ for(b=0; b<bN; ++b)
+ {
+ DefTrackFG* pF = (DefTrackFG*)m_TrackFGList.GetBlob(b);
+ cvStartWriteStruct(fs,NULL,CV_NODE_MAP);
+ cvWriteStruct(fs,"Blob", &(pF->blob), "ffffi");
+ cvWriteInt(fs,"LastFrame",pF->LastFrame);
+ cvWriteReal(fs,"State",pF->state);
+ pF->pHist->Save(fs, "Hist");
+ cvEndWriteStruct(fs);
+ }
+ cvEndWriteStruct(fs);
+ m_HistMat.Save(fs, "Hist");
+ };
+
+ virtual void LoadState(CvFileStorage* fs, CvFileNode* node)
+ {
+ CvFileNode* pBLN = cvGetFileNodeByName(fs,node,"BlobList");
+
+ if(pBLN && CV_NODE_IS_SEQ(pBLN->tag))
+ {
+ int b, bN = pBLN->data.seq->total;
+ for(b=0; b<bN; ++b)
+ {
+ DefTrackFG* pF = NULL;
+ CvBlob Blob;
+ CvFileNode* pBN = (CvFileNode*)cvGetSeqElem(pBLN->data.seq,b);
+
+ assert(pBN);
+ cvReadStructByName(fs, pBN, "Blob", &Blob, "ffffi");
+ AddBlob(&Blob);
+ pF = (DefTrackFG*)m_TrackFGList.GetBlobByID(Blob.ID);
+ if(pF==NULL) continue;
+ assert(pF);
+ pF->state = (float)cvReadIntByName(fs,pBN,"State",cvRound(pF->state));
+ assert(pF->pHist);
+ pF->pHist->Load(fs,pBN,"Hist");
+ }
+ }
+
+ m_HistMat.Load(fs, node, "Hist");
+ }; /* LoadState */
+
+
+ virtual void Release(){ delete this; };
};
typedef struct DefTrackSVM
{
- CvBlob blob;
- // CvBlobTrackFVGen* pFVGen;
- int LastFrame;
- float state;
- CvBlob BlobLast;
- CvSeq* pFVSeq;
- CvMemStorage* pMem;
+ CvBlob blob;
+ // CvBlobTrackFVGen* pFVGen;
+ int LastFrame;
+ float state;
+ CvBlob BlobLast;
+ CvSeq* pFVSeq;
+ CvMemStorage* pMem;
} DefTrackSVM;
class CvBlobTrackAnalysisSVM : public CvBlobTrackAnalysis
{
- /*---------------- Internal functions: --------------------*/
+ /*---------------- Internal functions: --------------------*/
private:
- CvMemStorage* m_pMem;
- int m_TrackNum;
- int m_Frame;
- char m_DataFileName[1024];
- int m_Dim;
- float* m_pFV;
- //CvStatModel* m_pStatModel;
- void* m_pStatModel;
- CvBlobSeq m_Tracks;
- CvMat* m_pTrainData;
- int m_LastTrainDataSize;
- // CvBlobTrackFVGen* (*m_CreateFVGen)();
- CvBlobTrackFVGen* m_pFVGen;
- float m_NU;
- float m_RBFWidth;
- IplImage* m_pStatImg; /* for debug purpose */
- CvSize m_ImgSize;
- void RetrainStatModel()
- {
- ///////// !!!!! TODO !!!!! Repair /////////////
+ CvMemStorage* m_pMem;
+ int m_TrackNum;
+ int m_Frame;
+ char m_DataFileName[1024];
+ int m_Dim;
+ float* m_pFV;
+ //CvStatModel* m_pStatModel;
+ void* m_pStatModel;
+ CvBlobSeq m_Tracks;
+ CvMat* m_pTrainData;
+ int m_LastTrainDataSize;
+ // CvBlobTrackFVGen* (*m_CreateFVGen)();
+ CvBlobTrackFVGen* m_pFVGen;
+ float m_NU;
+ float m_RBFWidth;
+ IplImage* m_pStatImg; /* for debug purpose */
+ CvSize m_ImgSize;
+ void RetrainStatModel()
+ {
+ ///////// !!!!! TODO !!!!! Repair /////////////
#if 0
- float nu = 0;
- CvSVMModelParams SVMParams = {0};
- CvStatModel* pM = NULL;
-
-
- memset(&SVMParams,0,sizeof(SVMParams));
- SVMParams.svm_type = CV_SVM_ONE_CLASS;
- SVMParams.kernel_type = CV_SVM_RBF;
- SVMParams.gamma = 2.0/(m_RBFWidth*m_RBFWidth);
- SVMParams.nu = m_NU;
- SVMParams.degree = 3;
- SVMParams.criteria = cvTermCriteria(CV_TERMCRIT_EPS, 100, 1e-3 );
- SVMParams.C = 1;
- SVMParams.p = 0.1;
-
-
- if(m_pTrainData == NULL) return;
- {
- int64 TickCount = cvGetTickCount();
- printf("Frame: %d\n Retrain SVM\nData Size = %d\n",m_Frame, m_pTrainData->rows);
- pM = cvTrainSVM( m_pTrainData,CV_ROW_SAMPLE, NULL, (CvStatModelParams*)&SVMParams, NULL, NULL);
- TickCount = cvGetTickCount() - TickCount ;
- printf("SV Count = %d\n",((CvSVMModel*)pM)->sv_total);
- printf("Processing Time = %.1f(ms)\n",TickCount/(1000*cvGetTickFrequency()));
-
- }
- if(pM==NULL) return;
- if(m_pStatModel) cvReleaseStatModel(&m_pStatModel);
- m_pStatModel = pM;
-
- if(m_pTrainData && m_Wnd)
- {
- float MaxVal = 0;
- IplImage* pW = cvCreateImage(m_ImgSize,IPL_DEPTH_32F,1);
- IplImage* pI = cvCreateImage(m_ImgSize,IPL_DEPTH_8U,1);
- float* pFVVar = m_pFVGen->GetFVVar();
- int i;
- cvZero(pW);
-
- for(i=0; i<m_pTrainData->rows; ++i)
- { /* Draw all elements: */
- float* pFV = (float*)(m_pTrainData->data.ptr + m_pTrainData->step*i);
- int x = cvRound(pFV[0]*pFVVar[0]);
- int y = cvRound(pFV[1]*pFVVar[1]);
- float r;
-
- if(x<0)x=0;
- if(x>=pW->width)x=pW->width-1;
- if(y<0)y=0;
- if(y>=pW->height)y=pW->height-1;
-
- r = ((float*)(pW->imageData + y*pW->widthStep))[x];
- r++;
- ((float*)(pW->imageData + y*pW->widthStep))[x] = r;
-
- if(r>MaxVal)MaxVal=r;
- } /* Next point. */
-
- if(MaxVal>0)cvConvertScale(pW,pI,255/MaxVal,0);
- cvNamedWindow("SVMData",0);
- cvShowImage("SVMData",pI);
- cvSaveImage("SVMData.bmp",pI);
- cvReleaseImage(&pW);
- cvReleaseImage(&pI);
- } /* Prepare for debug. */
-
- if(m_pStatModel && m_Wnd && m_Dim == 2)
- {
- float* pFVVar = m_pFVGen->GetFVVar();
- int x,y;
- if(m_pStatImg==NULL)
- {
- m_pStatImg = cvCreateImage(m_ImgSize,IPL_DEPTH_8U,1);
- }
- cvZero(m_pStatImg);
-
- for(y=0; y<m_pStatImg->height; y+=1) for(x=0; x<m_pStatImg->width; x+=1)
- { /* Draw all elements: */
- float res;
- uchar* pData = (uchar*)m_pStatImg->imageData + x + y*m_pStatImg->widthStep;
- CvMat FVmat;
- float xy[2] = {x/pFVVar[0],y/pFVVar[1]};
- cvInitMatHeader( &FVmat, 1, 2, CV_32F, xy );
- res = cvStatModelPredict( m_pStatModel, &FVmat, NULL );
- pData[0]=((res>0.5)?255:0);
- } /* Next point. */
-
- cvNamedWindow("SVMMask",0);
- cvShowImage("SVMMask",m_pStatImg);
- cvSaveImage("SVMMask.bmp",m_pStatImg);
- } /* Prepare for debug. */
+ float nu = 0;
+ CvSVMModelParams SVMParams = {0};
+ CvStatModel* pM = NULL;
+
+
+ memset(&SVMParams,0,sizeof(SVMParams));
+ SVMParams.svm_type = CV_SVM_ONE_CLASS;
+ SVMParams.kernel_type = CV_SVM_RBF;
+ SVMParams.gamma = 2.0/(m_RBFWidth*m_RBFWidth);
+ SVMParams.nu = m_NU;
+ SVMParams.degree = 3;
+ SVMParams.criteria = cvTermCriteria(CV_TERMCRIT_EPS, 100, 1e-3 );
+ SVMParams.C = 1;
+ SVMParams.p = 0.1;
+
+
+ if(m_pTrainData == NULL) return;
+ {
+ int64 TickCount = cvGetTickCount();
+ printf("Frame: %d\n Retrain SVM\nData Size = %d\n",m_Frame, m_pTrainData->rows);
+ pM = cvTrainSVM( m_pTrainData,CV_ROW_SAMPLE, NULL, (CvStatModelParams*)&SVMParams, NULL, NULL);
+ TickCount = cvGetTickCount() - TickCount ;
+ printf("SV Count = %d\n",((CvSVMModel*)pM)->sv_total);
+ printf("Processing Time = %.1f(ms)\n",TickCount/(1000*cvGetTickFrequency()));
+
+ }
+ if(pM==NULL) return;
+ if(m_pStatModel) cvReleaseStatModel(&m_pStatModel);
+ m_pStatModel = pM;
+
+ if(m_pTrainData && m_Wnd)
+ {
+ float MaxVal = 0;
+ IplImage* pW = cvCreateImage(m_ImgSize,IPL_DEPTH_32F,1);
+ IplImage* pI = cvCreateImage(m_ImgSize,IPL_DEPTH_8U,1);
+ float* pFVVar = m_pFVGen->GetFVVar();
+ int i;
+ cvZero(pW);
+
+ for(i=0; i<m_pTrainData->rows; ++i)
+ { /* Draw all elements: */
+ float* pFV = (float*)(m_pTrainData->data.ptr + m_pTrainData->step*i);
+ int x = cvRound(pFV[0]*pFVVar[0]);
+ int y = cvRound(pFV[1]*pFVVar[1]);
+ float r;
+
+ if(x<0)x=0;
+ if(x>=pW->width)x=pW->width-1;
+ if(y<0)y=0;
+ if(y>=pW->height)y=pW->height-1;
+
+ r = ((float*)(pW->imageData + y*pW->widthStep))[x];
+ r++;
+ ((float*)(pW->imageData + y*pW->widthStep))[x] = r;
+
+ if(r>MaxVal)MaxVal=r;
+ } /* Next point. */
+
+ if(MaxVal>0)cvConvertScale(pW,pI,255/MaxVal,0);
+ cvNamedWindow("SVMData",0);
+ cvShowImage("SVMData",pI);
+ cvSaveImage("SVMData.bmp",pI);
+ cvReleaseImage(&pW);
+ cvReleaseImage(&pI);
+ } /* Prepare for debug. */
+
+ if(m_pStatModel && m_Wnd && m_Dim == 2)
+ {
+ float* pFVVar = m_pFVGen->GetFVVar();
+ int x,y;
+ if(m_pStatImg==NULL)
+ {
+ m_pStatImg = cvCreateImage(m_ImgSize,IPL_DEPTH_8U,1);
+ }
+ cvZero(m_pStatImg);
+
+ for(y=0; y<m_pStatImg->height; y+=1) for(x=0; x<m_pStatImg->width; x+=1)
+ { /* Draw all elements: */
+ float res;
+ uchar* pData = (uchar*)m_pStatImg->imageData + x + y*m_pStatImg->widthStep;
+ CvMat FVmat;
+ float xy[2] = {x/pFVVar[0],y/pFVVar[1]};
+ cvInitMatHeader( &FVmat, 1, 2, CV_32F, xy );
+ res = cvStatModelPredict( m_pStatModel, &FVmat, NULL );
+ pData[0]=((res>0.5)?255:0);
+ } /* Next point. */
+
+ cvNamedWindow("SVMMask",0);
+ cvShowImage("SVMMask",m_pStatImg);
+ cvSaveImage("SVMMask.bmp",m_pStatImg);
+ } /* Prepare for debug. */
#endif
- };
- void SaveStatModel()
- {
- if(m_DataFileName[0])
- {
- if(m_pTrainData)cvSave(m_DataFileName, m_pTrainData);
- }
- };
- void LoadStatModel()
- {
- if(m_DataFileName[0])
- {
- CvMat* pTrainData = (CvMat*)cvLoad(m_DataFileName);
- if(CV_IS_MAT(pTrainData) && pTrainData->width == m_Dim)
- {
- if(m_pTrainData) cvReleaseMat(&m_pTrainData);
- m_pTrainData = pTrainData;
- RetrainStatModel();
- }
- }
- }
+ };
+ void SaveStatModel()
+ {
+ if(m_DataFileName[0])
+ {
+ if(m_pTrainData)cvSave(m_DataFileName, m_pTrainData);
+ }
+ };
+ void LoadStatModel()
+ {
+ if(m_DataFileName[0])
+ {
+ CvMat* pTrainData = (CvMat*)cvLoad(m_DataFileName);
+ if(CV_IS_MAT(pTrainData) && pTrainData->width == m_Dim)
+ {
+ if(m_pTrainData) cvReleaseMat(&m_pTrainData);
+ m_pTrainData = pTrainData;
+ RetrainStatModel();
+ }
+ }
+ }
public:
- CvBlobTrackAnalysisSVM(CvBlobTrackFVGen* (*createFVGen)()):m_Tracks(sizeof(DefTrackSVM))
- {
- m_pFVGen = createFVGen();
- m_Dim = m_pFVGen->GetFVSize();
- m_pFV = (float*)cvAlloc(sizeof(float)*m_Dim);
- m_Frame = 0;
- m_TrackNum = 0;
- m_pTrainData = NULL;
- m_pStatModel = NULL;
- m_DataFileName[0] = 0;
- m_pStatImg = NULL;
- m_LastTrainDataSize = 0;
-
- m_NU = 0.2f;
- AddParam("Nu",&m_NU);
- CommentParam("Nu","Parameters that tunes SVM border elastic");
-
- m_RBFWidth = 1;
- AddParam("RBFWidth",&m_RBFWidth);
- CommentParam("RBFWidth","Parameters that tunes RBF kernel function width.");
-
- SetModuleName("SVM");
-
- } /* Constructor. */
-
- ~CvBlobTrackAnalysisSVM()
- {
- int i;
- SaveStatModel();
- for(i=m_Tracks.GetBlobNum();i>0;--i)
- {
- DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlob(i-1);
- if(pF->pMem) cvReleaseMemStorage(&pF->pMem);
- //pF->pFVGen->Release();
- }
- if(m_pStatImg)cvReleaseImage(&m_pStatImg);
- cvFree(&m_pFV);
- } /* Destructor. */
-
- /*----------------- Interface: --------------------*/
- virtual void AddBlob(CvBlob* pBlob)
- {
- DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlobByID(CV_BLOB_ID(pBlob));
-
- m_pFVGen->AddBlob(pBlob);
-
- if(pF == NULL)
- { /* Create new record: */
- DefTrackSVM F;
- F.state = 0;
- F.blob = pBlob[0];
- F.LastFrame = m_Frame;
- //F.pFVGen = m_CreateFVGen();
- F.pMem = cvCreateMemStorage();
- F.pFVSeq = cvCreateSeq(0,sizeof(CvSeq),sizeof(float)*m_Dim,F.pMem);
-
- F.BlobLast.x = -1;
- F.BlobLast.y = -1;
- F.BlobLast.w = -1;
- F.BlobLast.h = -1;
- m_Tracks.AddBlob((CvBlob*)&F);
- pF = (DefTrackSVM*)m_Tracks.GetBlobByID(CV_BLOB_ID(pBlob));
- }
-
- assert(pF);
- pF->blob = pBlob[0];
- pF->LastFrame = m_Frame;
- };
-
- virtual void Process(IplImage* pImg, IplImage* pFG)
- {
- int i;
- float* pFVVar = m_pFVGen->GetFVVar();
-
- m_pFVGen->Process(pImg, pFG);
- m_ImgSize = cvSize(pImg->width,pImg->height);
-
- for(i=m_pFVGen->GetFVNum(); i>0; --i)
- {
- int BlobID = 0;
- float* pFV = m_pFVGen->GetFV(i,&BlobID);
- DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlobByID(BlobID);
-
- if(pF && pFV)
- { /* Process: */
- float dx,dy;
- CvMat FVmat;
-
- pF->state = 0;
-
- if(m_pStatModel)
- {
- int j;
- for(j=0; j<m_Dim; ++j)
- {
- m_pFV[j] = pFV[j]/pFVVar[j];
- }
-
- cvInitMatHeader( &FVmat, 1, m_Dim, CV_32F, m_pFV );
- //pF->state = cvStatModelPredict( m_pStatModel, &FVmat, NULL )<0.5;
- pF->state = 1.f;
- }
-
- dx = (pF->blob.x - pF->BlobLast.x);
- dy = (pF->blob.y - pF->BlobLast.y);
-
- if(pF->BlobLast.x<0 || (dx*dx+dy*dy) >= 2*2)
- { /* Add feature vector to train data base: */
- pF->BlobLast = pF->blob;
- cvSeqPush(pF->pFVSeq,pFV);
- }
- } /* Process one blob. */
- } /* Next FV. */
-
- for(i=m_Tracks.GetBlobNum(); i>0; --i)
- { /* Check each blob record: */
- DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlob(i-1);
-
- if(pF->LastFrame+3 < m_Frame )
- { /* Retrain stat model and delete blob filter: */
- int mult = 1+m_Dim;
- int old_height = m_pTrainData?m_pTrainData->height:0;
- int height = old_height + pF->pFVSeq->total*mult;
- CvMat* pTrainData = cvCreateMat(height, m_Dim, CV_32F);
- int j;
- if(m_pTrainData && pTrainData)
- { /* Create new train data matrix: */
- int h = pTrainData->height;
- pTrainData->height = MIN(pTrainData->height, m_pTrainData->height);
- cvCopy(m_pTrainData,pTrainData);
- pTrainData->height = h;
- }
-
- for(j=0; j<pF->pFVSeq->total; ++j)
- { /* Copy new data to train data: */
- float* pFVVar = m_pFVGen->GetFVVar();
- float* pFV = (float*)cvGetSeqElem(pF->pFVSeq,j);
- int k;
-
- for(k=0; k<mult; ++k)
- {
- int t;
- float* pTD = (float*)CV_MAT_ELEM_PTR( pTrainData[0], old_height+j*mult+k, 0);
- memcpy(pTD,pFV,sizeof(float)*m_Dim);
-
- if(pFVVar)for(t=0;t<m_Dim;++t)
- { /* Scale FV: */
- pTD[t] /= pFVVar[t];
- }
-
- if(k>0)
- { /* Variate: */
- for(t=0; t<m_Dim; ++t)
- {
- pTD[t] += m_RBFWidth*0.5f*(1-2.0f*rand()/(float)RAND_MAX);
- }
- }
- }
- } /* Next new datum. */
-
- if(m_pTrainData) cvReleaseMat(&m_pTrainData);
- m_pTrainData = pTrainData;
-
- /* delete track record */
- cvReleaseMemStorage(&pF->pMem);
- m_TrackNum++;
- m_Tracks.DelBlob(i-1);
-
- } /* End delete. */
- } /* Next track. */
-
- /* Retrain data each 1 minute if new data exist: */
- if(m_Frame%(25*60) == 0 && m_pTrainData && m_pTrainData->rows > m_LastTrainDataSize)
- {
- RetrainStatModel();
- }
-
- m_Frame++;
-
- if(m_Wnd && m_Dim==2)
- { /* Debug output: */
- int x,y;
- IplImage* pI = cvCloneImage(pImg);
-
- if(m_pStatModel && m_pStatImg)
-
- for(y=0; y<pI->height; y+=2)
- {
- uchar* pStatData = (uchar*)m_pStatImg->imageData + y*m_pStatImg->widthStep;
- uchar* pData = (uchar*)pI->imageData + y*pI->widthStep;
-
- for(x=0;x<pI->width;x+=2)
- { /* Draw all elements: */
- int d = pStatData[x];
- d = (d<<8) | (d^0xff);
- *(ushort*)(pData + x*3) = (ushort)d;
- }
- } /* Next line. */
-
- //cvNamedWindow("SVMMap",0);
- //cvShowImage("SVMMap", pI);
- cvReleaseImage(&pI);
- } /* Debug output. */
- };
- float GetState(int BlobID)
- {
- DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlobByID(BlobID);
- return pF?pF->state:0.0f;
- };
-
- /* Return 0 if trajectory is normal;
+ CvBlobTrackAnalysisSVM(CvBlobTrackFVGen* (*createFVGen)()):m_Tracks(sizeof(DefTrackSVM))
+ {
+ m_pFVGen = createFVGen();
+ m_Dim = m_pFVGen->GetFVSize();
+ m_pFV = (float*)cvAlloc(sizeof(float)*m_Dim);
+ m_Frame = 0;
+ m_TrackNum = 0;
+ m_pTrainData = NULL;
+ m_pStatModel = NULL;
+ m_DataFileName[0] = 0;
+ m_pStatImg = NULL;
+ m_LastTrainDataSize = 0;
+
+ m_NU = 0.2f;
+ AddParam("Nu",&m_NU);
+ CommentParam("Nu","Parameters that tunes SVM border elastic");
+
+ m_RBFWidth = 1;
+ AddParam("RBFWidth",&m_RBFWidth);
+ CommentParam("RBFWidth","Parameters that tunes RBF kernel function width.");
+
+ SetModuleName("SVM");
+
+ } /* Constructor. */
+
+ ~CvBlobTrackAnalysisSVM()
+ {
+ int i;
+ SaveStatModel();
+ for(i=m_Tracks.GetBlobNum();i>0;--i)
+ {
+ DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlob(i-1);
+ if(pF->pMem) cvReleaseMemStorage(&pF->pMem);
+ //pF->pFVGen->Release();
+ }
+ if(m_pStatImg)cvReleaseImage(&m_pStatImg);
+ cvFree(&m_pFV);
+ } /* Destructor. */
+
+ /*----------------- Interface: --------------------*/
+ virtual void AddBlob(CvBlob* pBlob)
+ {
+ DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlobByID(CV_BLOB_ID(pBlob));
+
+ m_pFVGen->AddBlob(pBlob);
+
+ if(pF == NULL)
+ { /* Create new record: */
+ DefTrackSVM F;
+ F.state = 0;
+ F.blob = pBlob[0];
+ F.LastFrame = m_Frame;
+ //F.pFVGen = m_CreateFVGen();
+ F.pMem = cvCreateMemStorage();
+ F.pFVSeq = cvCreateSeq(0,sizeof(CvSeq),sizeof(float)*m_Dim,F.pMem);
+
+ F.BlobLast.x = -1;
+ F.BlobLast.y = -1;
+ F.BlobLast.w = -1;
+ F.BlobLast.h = -1;
+ m_Tracks.AddBlob((CvBlob*)&F);
+ pF = (DefTrackSVM*)m_Tracks.GetBlobByID(CV_BLOB_ID(pBlob));
+ }
+
+ assert(pF);
+ pF->blob = pBlob[0];
+ pF->LastFrame = m_Frame;
+ };
+
+ virtual void Process(IplImage* pImg, IplImage* pFG)
+ {
+ int i;
+ float* pFVVar = m_pFVGen->GetFVVar();
+
+ m_pFVGen->Process(pImg, pFG);
+ m_ImgSize = cvSize(pImg->width,pImg->height);
+
+ for(i=m_pFVGen->GetFVNum(); i>0; --i)
+ {
+ int BlobID = 0;
+ float* pFV = m_pFVGen->GetFV(i,&BlobID);
+ DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlobByID(BlobID);
+
+ if(pF && pFV)
+ { /* Process: */
+ float dx,dy;
+ CvMat FVmat;
+
+ pF->state = 0;
+
+ if(m_pStatModel)
+ {
+ int j;
+ for(j=0; j<m_Dim; ++j)
+ {
+ m_pFV[j] = pFV[j]/pFVVar[j];
+ }
+
+ cvInitMatHeader( &FVmat, 1, m_Dim, CV_32F, m_pFV );
+ //pF->state = cvStatModelPredict( m_pStatModel, &FVmat, NULL )<0.5;
+ pF->state = 1.f;
+ }
+
+ dx = (pF->blob.x - pF->BlobLast.x);
+ dy = (pF->blob.y - pF->BlobLast.y);
+
+ if(pF->BlobLast.x<0 || (dx*dx+dy*dy) >= 2*2)
+ { /* Add feature vector to train data base: */
+ pF->BlobLast = pF->blob;
+ cvSeqPush(pF->pFVSeq,pFV);
+ }
+ } /* Process one blob. */
+ } /* Next FV. */
+
+ for(i=m_Tracks.GetBlobNum(); i>0; --i)
+ { /* Check each blob record: */
+ DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlob(i-1);
+
+ if(pF->LastFrame+3 < m_Frame )
+ { /* Retrain stat model and delete blob filter: */
+ int mult = 1+m_Dim;
+ int old_height = m_pTrainData?m_pTrainData->height:0;
+ int height = old_height + pF->pFVSeq->total*mult;
+ CvMat* pTrainData = cvCreateMat(height, m_Dim, CV_32F);
+ int j;
+ if(m_pTrainData && pTrainData)
+ { /* Create new train data matrix: */
+ int h = pTrainData->height;
+ pTrainData->height = MIN(pTrainData->height, m_pTrainData->height);
+ cvCopy(m_pTrainData,pTrainData);
+ pTrainData->height = h;
+ }
+
+ for(j=0; j<pF->pFVSeq->total; ++j)
+ { /* Copy new data to train data: */
+ float* pFVvar = m_pFVGen->GetFVVar();
+ float* pFV = (float*)cvGetSeqElem(pF->pFVSeq,j);
+ int k;
+
+ for(k=0; k<mult; ++k)
+ {
+ int t;
+ float* pTD = (float*)CV_MAT_ELEM_PTR( pTrainData[0], old_height+j*mult+k, 0);
+ memcpy(pTD,pFV,sizeof(float)*m_Dim);
+
+ if(pFVvar)for(t=0;t<m_Dim;++t)
+ { /* Scale FV: */
+ pTD[t] /= pFVvar[t];
+ }
+
+ if(k>0)
+ { /* Variate: */
+ for(t=0; t<m_Dim; ++t)
+ {
+ pTD[t] += m_RBFWidth*0.5f*(1-2.0f*rand()/(float)RAND_MAX);
+ }
+ }
+ }
+ } /* Next new datum. */
+
+ if(m_pTrainData) cvReleaseMat(&m_pTrainData);
+ m_pTrainData = pTrainData;
+
+ /* delete track record */
+ cvReleaseMemStorage(&pF->pMem);
+ m_TrackNum++;
+ m_Tracks.DelBlob(i-1);
+
+ } /* End delete. */
+ } /* Next track. */
+
+ /* Retrain data each 1 minute if new data exist: */
+ if(m_Frame%(25*60) == 0 && m_pTrainData && m_pTrainData->rows > m_LastTrainDataSize)
+ {
+ RetrainStatModel();
+ }
+
+ m_Frame++;
+
+ if(m_Wnd && m_Dim==2)
+ { /* Debug output: */
+ int x,y;
+ IplImage* pI = cvCloneImage(pImg);
+
+ if(m_pStatModel && m_pStatImg)
+
+ for(y=0; y<pI->height; y+=2)
+ {
+ uchar* pStatData = (uchar*)m_pStatImg->imageData + y*m_pStatImg->widthStep;
+ uchar* pData = (uchar*)pI->imageData + y*pI->widthStep;
+
+ for(x=0;x<pI->width;x+=2)
+ { /* Draw all elements: */
+ int d = pStatData[x];
+ d = (d<<8) | (d^0xff);
+ *(ushort*)(pData + x*3) = (ushort)d;
+ }
+ } /* Next line. */
+
+ //cvNamedWindow("SVMMap",0);
+ //cvShowImage("SVMMap", pI);
+ cvReleaseImage(&pI);
+ } /* Debug output. */
+ };
+ float GetState(int BlobID)
+ {
+ DefTrackSVM* pF = (DefTrackSVM*)m_Tracks.GetBlobByID(BlobID);
+ return pF?pF->state:0.0f;
+ };
+
+ /* Return 0 if trajectory is normal;
return >0 if trajectory abnormal. */
- virtual const char* GetStateDesc(int BlobID)
- {
- if(GetState(BlobID)>0.5) return "abnormal";
- return NULL;
- }
-
- virtual void SetFileName(char* DataBaseName)
- {
- if(m_pTrainData)SaveStatModel();
- m_DataFileName[0] = m_DataFileName[1000] = 0;
- if(DataBaseName)
- {
- strncpy(m_DataFileName,DataBaseName,1000);
- strcat(m_DataFileName, ".yml");
- }
- LoadStatModel();
- };
-
-
- virtual void Release(){ delete this; };
+ virtual const char* GetStateDesc(int BlobID)
+ {
+ if(GetState(BlobID)>0.5) return "abnormal";
+ return NULL;
+ }
+
+ virtual void SetFileName(char* DataBaseName)
+ {
+ if(m_pTrainData)SaveStatModel();
+ m_DataFileName[0] = m_DataFileName[1000] = 0;
+ if(DataBaseName)
+ {
+ strncpy(m_DataFileName,DataBaseName,1000);
+ strcat(m_DataFileName, ".yml");
+ }
+ LoadStatModel();
+ };
+
+
+ virtual void Release(){ delete this; };
}; /* CvBlobTrackAnalysisSVM. */
-
+#if 0
CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMP()
{return (CvBlobTrackAnalysis*) new CvBlobTrackAnalysisSVM(cvCreateFVGenP);}
CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMSS()
{return (CvBlobTrackAnalysis*) new CvBlobTrackAnalysisSVM(cvCreateFVGenSS);}
+#endif
virtual void Process(IplImage* pImg, IplImage* /*pFG*/)
{
- int i;
double MinTv = pImg->width/1440.0; /* minimal threshold for speed difference */
double MinTv2 = MinTv*MinTv;
- for(i=m_Tracks.GetBlobNum(); i>0; --i)
+ for(int i=m_Tracks.GetBlobNum(); i>0; --i)
{
DefTrackForDist* pF = (DefTrackForDist*)m_Tracks.GetBlob(i-1);
pF->state = 0;
if(m_Wnd)
{ /* Debug output: */
- int i;
if(m_pDebugImg==NULL)
m_pDebugImg = cvCloneImage(pImg);
else
cvCopy(pImg, m_pDebugImg);
- for(i=m_TrackDataBase.GetBlobNum(); i>0; --i)
+ for(int i=m_TrackDataBase.GetBlobNum(); i>0; --i)
{ /* Draw all elements in track data base: */
int j;
DefTrackForDist* pF = (DefTrackForDist*)m_TrackDataBase.GetBlob(i-1);
pF->close = 0;
} /* Draw all elements in track data base. */
- for(i=m_Tracks.GetBlobNum(); i>0; --i)
+ for(int i=m_Tracks.GetBlobNum(); i>0; --i)
{ /* Draw all elements for all trajectories: */
DefTrackForDist* pF = (DefTrackForDist*)m_Tracks.GetBlob(i-1);
int j;
void CvBlobTrackerAuto1::Process(IplImage* pImg, IplImage* pMask)
{
int CurBlobNum = 0;
- int i;
IplImage* pFG = pMask;
/* Bump frame counter: */
TIME_BEGIN()
if(m_pBT)
{
- int i;
m_pBT->Process(pImg, pFG);
- for(i=m_BlobList.GetBlobNum(); i>0; --i)
+ for(int i=m_BlobList.GetBlobNum(); i>0; --i)
{ /* Update data of tracked blob list: */
CvBlob* pB = m_BlobList.GetBlob(i-1);
int BlobID = CV_BLOB_ID(pB);
- int i = m_pBT->GetBlobIndexByID(BlobID);
- m_pBT->ProcessBlob(i, pB, pImg, pFG);
+ int idx = m_pBT->GetBlobIndexByID(BlobID);
+ m_pBT->ProcessBlob(idx, pB, pImg, pFG);
pB->ID = BlobID;
}
CurBlobNum = m_pBT->GetBlobNum();
/* This part should be removed: */
if(m_BTReal && m_pBT)
{ /* Update blob list (detect new blob for real blob tracker): */
- int i;
-
- for(i=m_pBT->GetBlobNum(); i>0; --i)
+ for(int i=m_pBT->GetBlobNum(); i>0; --i)
{ /* Update data of tracked blob list: */
CvBlob* pB = m_pBT->GetBlob(i-1);
if(pB && m_BlobList.GetBlobByID(CV_BLOB_ID(pB)) == NULL )
} /* Next blob. */
/* Delete blobs: */
- for(i=m_BlobList.GetBlobNum(); i>0; --i)
+ for(int i=m_BlobList.GetBlobNum(); i>0; --i)
{ /* Update tracked-blob list: */
CvBlob* pB = m_BlobList.GetBlob(i-1);
if(pB && m_pBT->GetBlobByID(CV_BLOB_ID(pB)) == NULL )
TIME_BEGIN()
if(m_pBTPostProc)
{ /* Post-processing module: */
- int i;
- for(i=m_BlobList.GetBlobNum(); i>0; --i)
+ for(int i=m_BlobList.GetBlobNum(); i>0; --i)
{ /* Update tracked-blob list: */
CvBlob* pB = m_BlobList.GetBlob(i-1);
m_pBTPostProc->AddBlob(pB);
}
m_pBTPostProc->Process();
- for(i=m_BlobList.GetBlobNum(); i>0; --i)
+ for(int i=m_BlobList.GetBlobNum(); i>0; --i)
{ /* Update tracked-blob list: */
CvBlob* pB = m_BlobList.GetBlob(i-1);
int BlobID = CV_BLOB_ID(pB);
if(m_pBD->DetectNewBlob(pImg, pFG, &NewBlobList, &m_BlobList))
{ /* Add new blob to tracker and blob list: */
int i;
- IplImage* pMask = pFG;
+ IplImage* pmask = pFG;
/*if(0)if(NewBlobList.GetBlobNum()>0 && pFG )
{// erode FG mask (only for FG_0 and MS1||MS2)
- pMask = cvCloneImage(pFG);
- cvErode(pFG,pMask,NULL,2);
+ pmask = cvCloneImage(pFG);
+ cvErode(pFG,pmask,NULL,2);
}*/
for(i=0; i<NewBlobList.GetBlobNum(); ++i)
if(pBN && pBN->w >= CV_BLOB_MINW && pBN->h >= CV_BLOB_MINH)
{
- CvBlob* pB = m_pBT->AddBlob(pBN, pImg, pMask );
+ CvBlob* pB = m_pBT->AddBlob(pBN, pImg, pmask );
if(pB)
{
NewB.blob = pB[0];
}
} /* Add next blob from list of detected blob. */
- if(pMask != pFG) cvReleaseImage(&pMask);
+ if(pmask != pFG) cvReleaseImage(&pmask);
} /* Create and add new blobs and trackers. */
TIME_BEGIN()
if(m_pBTGen)
{ /* Run track generator: */
- for(i=m_BlobList.GetBlobNum(); i>0; --i)
+ for(int i=m_BlobList.GetBlobNum(); i>0; --i)
{ /* Update data of tracked blob list: */
CvBlob* pB = m_BlobList.GetBlob(i-1);
m_pBTGen->AddBlob(pB);
{ /* Find a neighbour on current frame
* for each blob from previous frame:
*/
- CvBlob* pB = m_BlobList.GetBlob(i-1);
- DefBlobTracker* pBT = (DefBlobTracker*)pB;
+ CvBlob* pBl = m_BlobList.GetBlob(i-1);
+ DefBlobTracker* pBT = (DefBlobTracker*)pBl;
//int BlobID = CV_BLOB_ID(pB);
//CvBlob* pBBest = NULL;
//double DistBest = -1;
{
CvSeq* cnts;
CvSeq* cnt;
- int i;
//CvMat* pMC = NULL;
if(m_BlobList.GetBlobNum() <= 0 ) return;
cvReleaseImage(&pBin);
}
- for(i=m_BlobList.GetBlobNum(); i>0; --i)
+ for(int i=m_BlobList.GetBlobNum(); i>0; --i)
{ /* Predict new blob position. */
CvBlob* pB = NULL;
DefBlobTrackerCR* pBT = (DefBlobTrackerCR*)m_BlobList.GetBlob(i-1);
if(m_BlobList.GetBlobNum()>0 && m_BlobListNew.GetBlobNum()>0)
{ /* Resolve new blob to old: */
- int i,j;
int NOld = m_BlobList.GetBlobNum();
int NNew = m_BlobListNew.GetBlobNum();
- for(i=0; i<NOld; i++)
+ for(int i=0; i<NOld; i++)
{ /* Set 0 collision and clear all hyp: */
DefBlobTrackerCR* pF = (DefBlobTrackerCR*)m_BlobList.GetBlob(i);
pF->Collision = 0;
} /* Set 0 collision. */
/* Create correspondence records: */
- for(j=0; j<NNew; ++j)
+ for(int j=0; j<NNew; ++j)
{
CvBlob* pB1 = m_BlobListNew.GetBlob(j);
DefBlobTrackerCR* pFLast = NULL;
- for(i=0; i<NOld; i++)
+ for(int i=0; i<NOld; i++)
{ /* Check intersection: */
int Intersection = 0;
DefBlobTrackerCR* pF = (DefBlobTrackerCR*)m_BlobList.GetBlob(i);
} /* Check next new blob. */
} /* Resolve new blob to old. */
- for(i=m_BlobList.GetBlobNum(); i>0; --i)
+ for(int i=m_BlobList.GetBlobNum(); i>0; --i)
{ /* Track each blob. */
CvBlob* pB = m_BlobList.GetBlob(i-1);
DefBlobTrackerCR* pBT = (DefBlobTrackerCR*)pB;
int BlobID = CV_BLOB_ID(pB);
//CvBlob* pBBest = NULL;
//double DistBest = -1;
- int j;
if(pBT->pResolver)
{
CvBlob* pBBest = NULL;
double DistBest = -1;
double CMax = 0;
- for(j=pBT->pBlobHyp->GetBlobNum();j>0;--j)
+ for(int j=pBT->pBlobHyp->GetBlobNum();j>0;--j)
{ /* Find best CC: */
CvBlob* pBNew = pBT->pBlobHyp->GetBlob(j-1);
if(pBT->pResolver)
if(m_Wnd)
{
IplImage* pI = cvCloneImage(pImg);
- int i;
- for(i=m_BlobListNew.GetBlobNum(); i>0; --i)
+ for(int i=m_BlobListNew.GetBlobNum(); i>0; --i)
{ /* Draw each new CC: */
CvBlob* pB = m_BlobListNew.GetBlob(i-1);
CvPoint p = cvPointFrom32f(CV_BLOB_CENTER(pB));
CV_RGB(255,255,0), 1 );
}
- for(i=m_BlobList.GetBlobNum(); i>0; --i)
+ for(int i=m_BlobList.GetBlobNum(); i>0; --i)
{ /* Draw each new CC: */
DefBlobTrackerCR* pF = (DefBlobTrackerCR*)m_BlobList.GetBlob(i-1);
CvBlob* pB = &(pF->BlobPredict);
}
}; /* class CvBlobTrackerOneKalman */
+#if 0
static CvBlobTrackerOne* cvCreateModuleBlobTrackerOneKalman()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneKalman;
}
+
CvBlobTracker* cvCreateBlobTrackerKalman()
{
return cvCreateBlobTrackerList(cvCreateModuleBlobTrackerOneKalman);
}
+#endif
}; /* CollectHist */
#endif
-CvBlobTrackerOne* cvCreateBlobTrackerOneMSFG()
+static CvBlobTrackerOne* cvCreateBlobTrackerOneMSFG()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMSFG;
}
};
};
-CvBlobTrackerOne* cvCreateBlobTrackerOneMS()
+static CvBlobTrackerOne* cvCreateBlobTrackerOneMS()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMS;
}
}; /* CvBlobTrackerOneMSPF */
+CvBlobTrackerOne* cvCreateBlobTrackerOneMSPF();
CvBlobTrackerOne* cvCreateBlobTrackerOneMSPF()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMSPF;
#define DefHistTypeMat CV_32F
#define HIST_INDEX(_pData) (((_pData)[0]>>m_ByteShift) + (((_pData)[1]>>(m_ByteShift))<<m_BinBit)+((pImgData[2]>>m_ByteShift)<<(m_BinBit*2)))
-void calcKernelEpanechnikov(CvMat* pK)
+static void calcKernelEpanechnikov(CvMat* pK)
{ /* Allocate kernel for histogramm creation: */
int x,y;
int w = pK->width;
{ /* Mean shift in scale space: */
float news = 0;
- float sum = 0;
+ float sum1 = 0;
float scale;
Center = cvPoint(cvRound(m_Blob.x),cvRound(m_Blob.y));
{
double W = cvDotProduct(m_Weights, m_KernelMeanShiftG[si]);;
int s = si-SCALE_RANGE;
- sum += (float)fabs(W);
+ sum1 += (float)fabs(W);
news += (float)(s*W);
}
- if(sum>0)
+ if(sum1>0)
{
- news /= sum;
+ news /= sum1;
}
scale = (float)pow((double)SCALE_BASE,(double)news);
virtual void Release(){delete this;};
}; /*CvBlobTrackerOneMSFGS*/
-CvBlobTrackerOne* cvCreateBlobTrackerOneMSFGS()
+static CvBlobTrackerOne* cvCreateBlobTrackerOneMSFGS()
{
return (CvBlobTrackerOne*) new CvBlobTrackerOneMSFGS;
}
delete this;
}
-CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcKalmanOne()
+static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcKalmanOne()
{
return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcKalman;
}
{
float WSum = 0;
int i;
- int index = m_Frame % TIME_WND;
+ int idx = m_Frame % TIME_WND;
int size = MIN((m_Frame+1), TIME_WND);
- m_pBlobs[index] = pBlob[0];
+ m_pBlobs[idx] = pBlob[0];
m_Blob.x = m_Blob.y = m_Blob.w = m_Blob.h = 0;
for(i=0; i<size; ++i)
}
}; /* class CvBlobTrackPostProcTimeAver */
-CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverRectOne()
+static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverRectOne()
{
return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcTimeAver(0);
}
-CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverExpOne()
+static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverExpOne()
{
return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcTimeAver(1);
}
#undef quad
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4701 )
-#endif
-
CvCalibFilter::CvCalibFilter()
{
/* etalon data */
bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
- int pointCount, CvPoint2D32f* points )
+ int pointCount, CvPoint2D32f* _points )
{
int i, arrSize;
Stop();
- if (latestPoints != NULL)
- {
- for( i = 0; i < MAX_CAMERAS; i++ )
- cvFree( latestPoints + i );
- }
+ if (latestPoints != NULL)
+ {
+ for( i = 0; i < MAX_CAMERAS; i++ )
+ cvFree( latestPoints + i );
+ }
if( type == CV_CALIB_ETALON_USER || type != etalonType )
{
- if (etalonParams != NULL)
- {
- cvFree( &etalonParams );
- }
+ if (etalonParams != NULL)
+ {
+ cvFree( &etalonParams );
+ }
}
etalonType = type;
case CV_CALIB_ETALON_USER:
etalonParamCount = 0;
- if( !points || pointCount < 4 )
+ if( !_points || pointCount < 4 )
{
assert(0);
return false;
if( etalonPointCount != pointCount )
{
- if (etalonPoints != NULL)
- {
- cvFree( &etalonPoints );
- }
+ if (etalonPoints != NULL)
+ {
+ cvFree( &etalonPoints );
+ }
etalonPointCount = pointCount;
etalonPoints = (CvPoint2D32f*)cvAlloc( arrSize );
}
break;
case CV_CALIB_ETALON_USER:
- if (params != NULL)
- {
- memcpy( etalonParams, params, arrSize );
- }
- if (points != NULL)
- {
- memcpy( etalonPoints, points, arrSize );
- }
- break;
+ if (params != NULL)
+ {
+ memcpy( etalonParams, params, arrSize );
+ }
+ if (_points != NULL)
+ {
+ memcpy( etalonPoints, _points, arrSize );
+ }
+ break;
default:
assert(0);
CvCalibEtalonType
CvCalibFilter::GetEtalon( int* paramCount, const double** params,
- int* pointCount, const CvPoint2D32f** points ) const
+ int* pointCount, const CvPoint2D32f** _points ) const
{
if( paramCount )
*paramCount = etalonParamCount;
if( pointCount )
*pointCount = etalonPointCount;
- if( points )
- *points = etalonPoints;
+ if( _points )
+ *_points = etalonPoints;
return etalonType;
}
void CvCalibFilter::SetCameraCount( int count )
{
Stop();
-
+
if( count != cameraCount )
{
for( int i = 0; i < cameraCount; i++ )
}
}
-
+
bool CvCalibFilter::SetFrames( int frames )
{
if( frames < 5 )
assert(0);
return false;
}
-
+
framesTotal = frames;
return true;
}
cameraParams[i].imgSize[0] = (float)imgSize.width;
cameraParams[i].imgSize[1] = (float)imgSize.height;
-
+
// cameraParams[i].focalLength[0] = cameraParams[i].matrix[0];
// cameraParams[i].focalLength[1] = cameraParams[i].matrix[4];
memcpy( cameraParams[i].transVect, transVect, 3 * sizeof(transVect[0]));
mat.data.ptr = (uchar*)(cameraParams + i);
-
+
/* check resultant camera parameters: if there are some INF's or NAN's,
stop and reset results */
if( !cvCheckArr( &mat, CV_CHECK_RANGE | CV_CHECK_QUIET, -10000, 10000 ))
{
stereo.fundMatr[i] = stereo.fundMatr[i];
}
-
+
}
}
int* count, bool* found )
{
int n;
-
+
if( (unsigned)idx >= (unsigned)cameraCount ||
!pts || !count || !found )
{
assert(0);
return false;
}
-
+
n = latestCounts[idx];
-
+
*found = n > 0;
*count = abs(n);
*pts = latestPoints[idx];
assert(0);
return 0;
}
-
+
return isCalibrated ? cameraParams + idx : 0;
}
assert(0);
return 0;
}
-
+
return &stereo;
}
{
CvMat mat;
int arrSize;
-
+
Stop();
-
+
if( !params )
{
assert(0);
if( isCalibrated )
{
int i, j;
-
+
FILE* f = fopen( filename, "w" );
if( !f ) return false;
return false;
SetCameraCount( d );
-
+
for( i = 0; i < cameraCount; i++ )
{
for( j = 0; j < (int)(sizeof(cameraParams[i])/sizeof(float)); j++ )
CV_Assert(values_read == 1);
}
}
-
-
-
-
+
+
+
+
fclose(f);
stereo.warpSize = cvSize( cvRound(cameraParams[0].imgSize[0]), cvRound(cameraParams[0].imgSize[1]));
isCalibrated = true;
-
+
return true;
}
return true;
}
-
+
}
void RandomizedTree::train(std::vector<BaseKeypoint> const& base_set,
- RNG &rng, int depth, int views, size_t reduced_num_dim,
+ RNG &rng, int _depth, int views, size_t reduced_num_dim,
int num_quant_bits)
{
PatchGenerator make_patch;
- train(base_set, rng, make_patch, depth, views, reduced_num_dim, num_quant_bits);
+ train(base_set, rng, make_patch, _depth, views, reduced_num_dim, num_quant_bits);
}
void RandomizedTree::train(std::vector<BaseKeypoint> const& base_set,
RNG &rng, PatchGenerator &make_patch,
- int depth, int views, size_t reduced_num_dim,
+ int _depth, int views, size_t reduced_num_dim,
int num_quant_bits)
{
- init((int)base_set.size(), depth, rng);
+ init((int)base_set.size(), _depth, rng);
Mat patch;
classes_ = -1;
}
-void RandomizedTree::init(int num_classes, int depth, RNG &rng)
+void RandomizedTree::init(int num_classes, int _depth, RNG &rng)
{
- depth_ = depth;
- num_leaves_ = 1 << depth; // 2**d
+ depth_ = _depth;
+ num_leaves_ = 1 << _depth; // 2**d
int num_nodes = num_leaves_ - 1; // 2**d - 1
// Initialize probabilities and counts to 0
for (int i=0; i<num_leaves_; i++) {
float *post = posteriors_[i];
char buf[20];
- for (int i=0; i<classes_; i++) {
+ for (int j=0; j<classes_; j++) {
sprintf(buf, "%.10e", *post++);
- file << buf << ((i<classes_-1) ? " " : "");
+ file << buf << ((j<classes_-1) ? " " : "");
}
file << std::endl;
}
std::ofstream file(url.c_str(), (append?std::ios::app:std::ios::out));
for (int i=0; i<num_leaves_; i++) {
uchar *post = posteriors2_[i];
- for (int i=0; i<classes_; i++)
- file << int(*post++) << (i<classes_-1?" ":"");
+ for (int j=0; j<classes_; j++)
+ file << int(*post++) << (j<classes_-1?" ":"");
file << std::endl;
}
file.close();
//#include <limits.h>
//#include "cv.h"
//#include "highgui.h"
-
+#if 0
#include <stdio.h>
/* Valery Mosyagin */
/* ===== Function for find corresponding between images ===== */
/* Create feature points on image and return number of them. Array points fills by found points */
-int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
+static int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
{
int foundFeaturePoints = 0;
IplImage *grayImage = 0;
/* For given points1 (with pntStatus) on image1 finds corresponding points2 on image2 and set pntStatus2 for them */
/* Returns number of corresponding points */
-int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
+static int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
IplImage *image2,/* Image 2 */
- CvMat *points1,
+ CvMat *points1,
CvMat *pntStatus1,
CvMat *points2,
CvMat *pntStatus2,
/* Test input data for errors */
/* Test for null pointers */
- if( image1 == 0 || image2 == 0 ||
+ if( image1 == 0 || image2 == 0 ||
points1 == 0 || points2 == 0 ||
pntStatus1 == 0 || pntStatus2 == 0)
{
}
/* Test for matrices */
- if( !CV_IS_MAT(points1) || !CV_IS_MAT(points2) ||
+ if( !CV_IS_MAT(points1) || !CV_IS_MAT(points2) ||
!CV_IS_MAT(pntStatus1) || !CV_IS_MAT(pntStatus2) )
{
CV_ERROR( CV_StsUnsupportedFormat, "Input parameters (points and status) must be a matrices" );
pyrImage1, pyrImage2,
cornerPoints1, cornerPoints2,
numVisPoints, cvSize(10,10), 3,
- status, errors,
+ status, errors,
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03),
0/*CV_LKFLOW_PYR_A_READY*/ );
-
+
memset(stat2,0,sizeof(uchar)*numPoints);
int currVis = 0;
CvMat fundMatr;
double fundMatr_dat[9];
fundMatr = cvMat(3,3,CV_64F,fundMatr_dat);
-
+
CV_CALL( pStatus = cvCreateMat(1,totalCorns,CV_32F) );
int num = cvFindFundamentalMat(tmpPoints1,tmpPoints2,&fundMatr,CV_FM_RANSAC,threshold,0.99,pStatus);
return resNumCorrPoints;
}
+
/*-------------------------------------------------------------------------------------*/
-int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,CvMat *addStatus,int addCreateNum)
+static int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,CvMat *addStatus,int addCreateNum)
{
/* Add to existing points and status arrays new points or just grow */
CvMat *newOldPoint = 0;
CV_FUNCNAME( "icvGrowPointsAndStatus" );
__BEGIN__;
-
+
/* Test for errors */
if( oldPoints == 0 || oldStatus == 0 )
{
return newTotalNumber;
}
+
/*-------------------------------------------------------------------------------------*/
-int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
+static int icvRemoveDoublePoins( CvMat *oldPoints,/* Points on prev image */
CvMat *newPoints,/* New points */
CvMat *oldStatus,/* Status for old points */
CvMat *newStatus,
CvSeq* seq = 0;
int originalPoints = 0;
-
+
CV_FUNCNAME( "icvRemoveDoublePoins" );
__BEGIN__;
{
CV_ERROR( CV_StsOutOfRange, "Statuses must have 1 row" );
}
-
+
/* we have points on image and wants add new points */
/* use subdivision for find nearest points */
/* Point is double. Turn it off */
/* Set status */
//newStatus->data.ptr[i] = 0;
-
+
/* No this is a double point */
//originalPoints--;
flag = 0;
__END__;
cvReleaseMemStorage( &storage );
-
+
return originalPoints;
void icvComputeProjectMatrix(CvMat* objPoints,CvMat* projPoints,CvMat* projMatr);
/*-------------------------------------------------------------------------------------*/
-void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *status, CvMat *projMatr)
+static void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *status, CvMat *projMatr)
{
/* Compute number of good points */
int num = cvCountNonZero(status);
-
+
/* Create arrays */
CvMat *objPoints = 0;
objPoints = cvCreateMat(4,num,CV_64F);
currVis++;
}
-
+
fprintf(file,"\n");
}
/*-------------------------------------------------------------------------------------*/
-/* For given N images
+/* For given N images
we have corresponding points on N images
computed projection matrices
reconstructed 4D points
- we must to compute
-
+ we must to compute
-*/
-void icvAddNewImageToPrevious____(
+*/
+static void icvAddNewImageToPrevious____(
IplImage *newImage,//Image to add
IplImage *oldImage,//Previous image
CvMat *oldPoints,// previous 2D points on prev image (some points may be not visible)
int corrNum;
corrNum = icvFindCorrForGivenPoints( oldImage,/* Image 1 */
newImage,/* Image 2 */
- oldPoints,
+ oldPoints,
oldPntStatus,
points2,
status,
// icvComputeProjectMatrix(objPoints4D,points2,&projMatr);
icvComputeProjectMatrixStatus(objPoints4D,points2,status,&projMatr);
cvCopy(&projMatr,newProjMatr);
-
+
/* Create new points and find correspondence */
icvCreateFeaturePoints(newImage, newFPoints2D2,newFPointsStatus);
-
+
/* Good if we test new points before find corr points */
/* Find correspondence for new found points */
//CreateGood
/*-------------------------------------------------------------------------------------*/
-int icvDeleteSparsInPoints( int numImages,
+static int icvDeleteSparsInPoints( int numImages,
CvMat **points,
CvMat **status,
CvMat *wasStatus)/* status of previous configuration */
int numCoord;
numCoord = points[0]->rows;// !!! may be number of coordinates is not correct !!!
-
+
int i;
int currExistPoint;
currExistPoint = 0;
return comNumber;
}
-#if 0
+
/*-------------------------------------------------------------------------------------*/
void icvGrowPointsArray(CvMat **points)
{
cvConvert(pntStatus,status);
int corrNum = FindCorrForGivenPoints(oldImage,newImage,oldPoints,newPoints,status);
-
+
/* Status has new status of points */
CvMat projMatr;
Stan Birchfield and Carlo Tomasi
International Journal of Computer Vision,
35(3): 269-293, December 1999.
-
+
This implementation uses different cost function that results in
O(pixPerRow*maxDisparity) complexity of dynamic programming stage versus
O(pixPerRow*log(pixPerRow)*maxDisparity) in the above paper.
typedef struct _CvDPCell
{
uchar step; //local-optimal step
- int sum; //current sum
+ int sum; //current sum
}_CvDPCell;
typedef struct _CvRightImData
#define CV_IMAX3(a,b,c) ((temp3 = (a) >= (b) ? (a) : (b)),(temp3 >= (c) ? temp3 : (c)))
#define CV_IMIN3(a,b,c) ((temp3 = (a) <= (b) ? (a) : (b)),(temp3 <= (c) ? temp3 : (c)))
-void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
+static void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
uchar* disparities,
CvSize size, int widthStep,
- int maxDisparity,
- float _param1, float _param2,
+ int maxDisparity,
+ float _param1, float _param2,
float _param3, float _param4,
float _param5 )
{
int x, y, i, j, temp3;
int d, s;
- int dispH = maxDisparity + 3;
+ int dispH = maxDisparity + 3;
uchar *dispdata;
int imgW = size.width;
int imgH = size.height;
int param5 = cvRound(_param5);
#define CELL(d,x) cells[(d)+(x)*dispH]
-
+
uchar* dsi = (uchar*)cvAlloc(sizeof(uchar)*imgW*dispH);
uchar* edges = (uchar*)cvAlloc(sizeof(uchar)*imgW*imgH);
_CvDPCell* cells = (_CvDPCell*)cvAlloc(sizeof(_CvDPCell)*imgW*MAX(dispH,(imgH+1)/2));
_CvRightImData* rData = (_CvRightImData*)cvAlloc(sizeof(_CvRightImData)*imgW);
int* reliabilities = (int*)cells;
-
- for( y = 0; y < imgH; y++ )
- {
+
+ for( y = 0; y < imgH; y++ )
+ {
uchar* srcdata1 = src1 + widthStep * y;
- uchar* srcdata2 = src2 + widthStep * y;
+ uchar* srcdata2 = src2 + widthStep * y;
//init rData
prevval = prev = srcdata2[0];
for( j = 1; j < imgW; j++ )
- {
+ {
curr = srcdata2[j];
val = (uchar)((curr + prev)>>1);
rData[j-1].max_val = (uchar)CV_IMAX3( val, prevval, prev );
// fill dissimularity space image
for( i = 1; i <= maxDisparity + 1; i++ )
- {
+ {
dsi += imgW;
rData--;
for( j = i - 1; j < imgW - 1; j++ )
- {
- int t;
+ {
+ int t;
if( (t = srcdata1[j] - rData[j+1].max_val) >= 0 )
{
dsi[j] = (uchar)t;
for( j = 3; j < imgW-4; j++ )
{
edges[y*imgW+j] = 0;
-
- if( ( CV_IMAX3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) -
+
+ if( ( CV_IMAX3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) -
CV_IMIN3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) ) >= ICV_BIRCH_DIFF_LUM )
{
edges[y*imgW+j] |= 1;
}
- if( ( CV_IMAX3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) -
+ if( ( CV_IMAX3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) -
CV_IMIN3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) ) >= ICV_BIRCH_DIFF_LUM )
{
edges[y*imgW+j] |= 2;
- }
- }
+ }
+ }
//find correspondence using dynamical programming
//init DP table
- for( x = 0; x < imgW; x++ )
+ for( x = 0; x < imgW; x++ )
{
CELL(0,x).sum = CELL(dispH-1,x).sum = ICV_MAX_DP_SUM_VAL;
CELL(0,x).step = CELL(dispH-1,x).step = ICV_DP_STEP_LEFT;
}
- for( d = 2; d < dispH; d++ )
+ for( d = 2; d < dispH; d++ )
{
CELL(d,d-2).sum = ICV_MAX_DP_SUM_VAL;
CELL(d,d-2).step = ICV_DP_STEP_UP;
- }
+ }
CELL(1,0).sum = 0;
CELL(1,0).step = ICV_DP_STEP_LEFT;
for( x = 1; x < imgW; x++ )
- {
- int d = MIN( x + 1, maxDisparity + 1);
+ {
+ int dp = MIN( x + 1, maxDisparity + 1);
uchar* _edges = edges + y*imgW + x;
int e0 = _edges[0] & 1;
_CvDPCell* _cell = cells + x*dispH;
do
{
- int s = dsi[d*imgW+x];
+ int _s = dsi[dp*imgW+x];
int sum[3];
//check left step
- sum[0] = _cell[d-dispH].sum - param2;
+ sum[0] = _cell[dp-dispH].sum - param2;
//check up step
- if( _cell[d+1].step != ICV_DP_STEP_DIAG && e0 )
+ if( _cell[dp+1].step != ICV_DP_STEP_DIAG && e0 )
{
- sum[1] = _cell[d+1].sum + param1;
+ sum[1] = _cell[dp+1].sum + param1;
- if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
+ if( _cell[dp-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-dp] & 2) )
{
int t;
-
- sum[2] = _cell[d-1-dispH].sum + param1;
+
+ sum[2] = _cell[dp-1-dispH].sum + param1;
t = sum[1] < sum[0];
//choose local-optimal pass
if( sum[t] <= sum[2] )
{
- _cell[d].step = (uchar)t;
- _cell[d].sum = sum[t] + s;
+ _cell[dp].step = (uchar)t;
+ _cell[dp].sum = sum[t] + _s;
}
else
- {
- _cell[d].step = ICV_DP_STEP_DIAG;
- _cell[d].sum = sum[2] + s;
+ {
+ _cell[dp].step = ICV_DP_STEP_DIAG;
+ _cell[dp].sum = sum[2] + _s;
}
}
else
{
if( sum[0] <= sum[1] )
{
- _cell[d].step = ICV_DP_STEP_LEFT;
- _cell[d].sum = sum[0] + s;
+ _cell[dp].step = ICV_DP_STEP_LEFT;
+ _cell[dp].sum = sum[0] + _s;
}
else
{
- _cell[d].step = ICV_DP_STEP_UP;
- _cell[d].sum = sum[1] + s;
+ _cell[dp].step = ICV_DP_STEP_UP;
+ _cell[dp].sum = sum[1] + _s;
}
}
}
- else if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
+ else if( _cell[dp-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-dp] & 2) )
{
- sum[2] = _cell[d-1-dispH].sum + param1;
+ sum[2] = _cell[dp-1-dispH].sum + param1;
if( sum[0] <= sum[2] )
{
- _cell[d].step = ICV_DP_STEP_LEFT;
- _cell[d].sum = sum[0] + s;
+ _cell[dp].step = ICV_DP_STEP_LEFT;
+ _cell[dp].sum = sum[0] + _s;
}
else
{
- _cell[d].step = ICV_DP_STEP_DIAG;
- _cell[d].sum = sum[2] + s;
+ _cell[dp].step = ICV_DP_STEP_DIAG;
+ _cell[dp].sum = sum[2] + _s;
}
}
else
{
- _cell[d].step = ICV_DP_STEP_LEFT;
- _cell[d].sum = sum[0] + s;
+ _cell[dp].step = ICV_DP_STEP_LEFT;
+ _cell[dp].sum = sum[0] + _s;
}
}
- while( --d );
+ while( --dp );
}// for x
//extract optimal way and fill disparity image
min_val = CELL(i,imgW-1).sum;
}
}
-
+
//track optimal pass
for( x = imgW - 1; x > 0; x-- )
- {
+ {
dispdata[x] = (uchar)(d - 1);
while( CELL(d,x).step == ICV_DP_STEP_UP ) d++;
if ( CELL(d,x).step == ICV_DP_STEP_DIAG )
{
s = x;
- while( CELL(d,x).step == ICV_DP_STEP_DIAG )
+ while( CELL(d,x).step == ICV_DP_STEP_DIAG )
{
- d--;
- x--;
+ d--;
+ x--;
}
for( i = x; i < s; i++ )
{
dispdata[i] = (uchar)(d-1);
- }
- }
+ }
+ }
}//for x
}// for y
{
for( y = 1; y < imgH - 1; y++ )
{
- if( ( CV_IMAX3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
- src1[(y+1)*widthStep+x] ) -
- CV_IMIN3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
+ if( ( CV_IMAX3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
+ src1[(y+1)*widthStep+x] ) -
+ CV_IMIN3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
src1[(y+1)*widthStep+x] ) ) >= ICV_BIRCH_DIFF_LUM )
{
edges[y*imgW+x] |= 4;
}
}
- //remove along any particular row, every gradient
+ //remove along any particular row, every gradient
//for which two adjacent columns do not agree.
for( y = 0; y < imgH; y++ )
{
prev = edges[y*imgW];
for( x = 1; x < imgW - 1; x++ )
{
- curr = edges[y*imgW+x];
+ curr = edges[y*imgW+x];
if( (curr & 4) &&
( !( prev & 4 ) ||
!( edges[y*imgW+x+1] & 4 ) ) )
;
s = y - i;
for( ; i < y; i++ )
- {
+ {
reliabilities[i*imgW+x] = s;
- }
+ }
}
- }
-
- //Y - propagate reliable regions
+ }
+
+ //Y - propagate reliable regions
for( x = 0; x < imgW; x++ )
- {
+ {
for( y = 0; y < imgH; y++ )
- {
+ {
d = dest[y*widthStep+x];
if( reliabilities[y*imgW+x] >= param4 && !(edges[y*imgW+x] & 4) &&
d > 0 )//highly || moderately
- {
+ {
disparities[y*widthStep+x] = (uchar)d;
//up propagation
for( i = y - 1; i >= 0; i-- )
{
if( ( edges[i*imgW+x] & 4 ) ||
- ( dest[i*widthStep+x] < d &&
+ ( dest[i*widthStep+x] < d &&
reliabilities[i*imgW+x] >= param3 ) ||
- ( reliabilities[y*imgW+x] < param5 &&
+ ( reliabilities[y*imgW+x] < param5 &&
dest[i*widthStep+x] - 1 == d ) ) break;
- disparities[i*widthStep+x] = (uchar)d;
- }
-
+ disparities[i*widthStep+x] = (uchar)d;
+ }
+
//down propagation
for( i = y + 1; i < imgH; i++ )
{
if( ( edges[i*imgW+x] & 4 ) ||
- ( dest[i*widthStep+x] < d &&
+ ( dest[i*widthStep+x] < d &&
reliabilities[i*imgW+x] >= param3 ) ||
- ( reliabilities[y*imgW+x] < param5 &&
+ ( reliabilities[y*imgW+x] < param5 &&
dest[i*widthStep+x] - 1 == d ) ) break;
disparities[i*widthStep+x] = (uchar)d;
for( ; x < imgW && dest[y*widthStep+x] == dest[y*widthStep+x-1]; x++ );
s = x - i;
for( ; i < x; i++ )
- {
+ {
reliabilities[y*imgW+i] = s;
- }
+ }
}
- }
-
- //X - propagate reliable regions
- for( y = 0; y < imgH; y++ )
- {
+ }
+
+ //X - propagate reliable regions
+ for( y = 0; y < imgH; y++ )
+ {
for( x = 0; x < imgW; x++ )
- {
+ {
d = dest[y*widthStep+x];
if( reliabilities[y*imgW+x] >= param4 && !(edges[y*imgW+x] & 1) &&
d > 0 )//highly || moderately
- {
+ {
disparities[y*widthStep+x] = (uchar)d;
//up propagation
for( i = x - 1; i >= 0; i-- )
{
if( (edges[y*imgW+i] & 1) ||
- ( dest[y*widthStep+i] < d &&
+ ( dest[y*widthStep+i] < d &&
reliabilities[y*imgW+i] >= param3 ) ||
- ( reliabilities[y*imgW+x] < param5 &&
+ ( reliabilities[y*imgW+x] < param5 &&
dest[y*widthStep+i] - 1 == d ) ) break;
disparities[y*widthStep+i] = (uchar)d;
- }
-
+ }
+
//down propagation
for( i = x + 1; i < imgW; i++ )
{
if( (edges[y*imgW+i] & 1) ||
- ( dest[y*widthStep+i] < d &&
+ ( dest[y*widthStep+i] < d &&
reliabilities[y*imgW+i] >= param3 ) ||
- ( reliabilities[y*imgW+x] < param5 &&
+ ( reliabilities[y*imgW+x] < param5 &&
dest[y*widthStep+i] - 1 == d ) ) break;
disparities[y*widthStep+i] = (uchar)d;
}
//release resources
- cvFree( &dsi );
- cvFree( &edges );
- cvFree( &cells );
- cvFree( &rData );
+ cvFree( &dsi );
+ cvFree( &edges );
+ cvFree( &cells );
+ cvFree( &rData );
}
// rightImage - right image of stereo-pair (format 8uC1).
// mode -mode of correspondance retrieval (now CV_RETR_DP_BIRCHFIELD only)
// dispImage - destination disparity image
-// maxDisparity - maximal disparity
+// maxDisparity - maximal disparity
// param1, param2, param3, param4, param5 - parameters of algorithm
// Returns:
// Notes:
// All images must have format 8uC1.
//F*/
CV_IMPL void
-cvFindStereoCorrespondence(
+cvFindStereoCorrespondence(
const CvArr* leftImage, const CvArr* rightImage,
int mode,
CvArr* depthImage,
- int maxDisparity,
- double param1, double param2, double param3,
+ int maxDisparity,
+ double param1, double param2, double param3,
double param4, double param5 )
-{
+{
CV_FUNCNAME( "cvFindStereoCorrespondence" );
__BEGIN__;
- CvMat *src1, *src2;
+ CvMat *src1, *src2;
CvMat *dst;
CvMat src1_stub, src2_stub, dst_stub;
- int coi;
+ int coi;
CV_CALL( src1 = cvGetMat( leftImage, &src1_stub, &coi ));
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
CV_CALL( src2 = cvGetMat( rightImage, &src2_stub, &coi ));
- if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
+ if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
CV_CALL( dst = cvGetMat( depthImage, &dst_stub, &coi ));
if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
- // check args
- if( CV_MAT_TYPE( src1->type ) != CV_8UC1 ||
- CV_MAT_TYPE( src2->type ) != CV_8UC1 ||
+ // check args
+ if( CV_MAT_TYPE( src1->type ) != CV_8UC1 ||
+ CV_MAT_TYPE( src2->type ) != CV_8UC1 ||
CV_MAT_TYPE( dst->type ) != CV_8UC1) CV_ERROR(CV_StsUnsupportedFormat,
- "All images must be single-channel and have 8u" );
+ "All images must be single-channel and have 8u" );
if( !CV_ARE_SIZES_EQ( src1, src2 ) || !CV_ARE_SIZES_EQ( src1, dst ) )
CV_ERROR( CV_StsUnmatchedSizes, "" );
-
+
if( maxDisparity <= 0 || maxDisparity >= src1->width || maxDisparity > 255 )
- CV_ERROR(CV_StsOutOfRange,
+ CV_ERROR(CV_StsOutOfRange,
"parameter /maxDisparity/ is out of range");
-
+
if( mode == CV_DISPARITY_BIRCHFIELD )
{
if( param1 == CV_UNDEF_SC_PARAM ) param1 = CV_IDP_BIRCHFIELD_PARAM1;
if( param4 == CV_UNDEF_SC_PARAM ) param4 = CV_IDP_BIRCHFIELD_PARAM4;
if( param5 == CV_UNDEF_SC_PARAM ) param5 = CV_IDP_BIRCHFIELD_PARAM5;
- CV_CALL( icvFindStereoCorrespondenceByBirchfieldDP( src1->data.ptr,
- src2->data.ptr, dst->data.ptr,
+ CV_CALL( icvFindStereoCorrespondenceByBirchfieldDP( src1->data.ptr,
+ src2->data.ptr, dst->data.ptr,
cvGetMatSize( src1 ), src1->step,
- maxDisparity, (float)param1, (float)param2, (float)param3,
+ maxDisparity, (float)param1, (float)param2, (float)param3,
(float)param4, (float)param5 ) );
}
else
CV_ERROR( CV_StsBadArg, "Unsupported mode of function" );
}
- __END__;
+ __END__;
}
/* End of file. */
#include "precomp.hpp"
-CvStatus CV_STDCALL
+static CvStatus
icvJacobiEigens_32f(float *A, float *V, float *E, int n, float eps)
{
int i, j, k, ind;
//
// Returns: CV_NO_ERR or error code
//
-// Notes:
+// Notes:
//F*/
static CvStatus CV_STDCALL
icvCalcCovarMatrixEx_8u32fR( int nObjects, void *input, int objStep1,
/* Buffer size determination */
if( ioFlags )
{
- int size = icvDefaultBufferSize();
- ioBufSize = MIN( size, n );
+ ioBufSize = MIN( icvDefaultBufferSize(), n );
}
/* memory allocation (if necesseay) */
for( igr = 0; igr < ngr; igr++ )
{
- int i, io, ie, imin = igr * nio, imax = imin + nio;
+ int io, ie, imin = igr * nio, imax = imin + nio;
if( imax > m1 )
imax = m1;
- for( i = 0; i < eigSize.width * (imax - imin); i++ )
- ((float *) buffer)[i] = 0.f;
+ for(int k = 0; k < eigSize.width * (imax - imin); k++ )
+ ((float *) buffer)[k] = 0.f;
for( io = 0; io < nObjects; io++ )
{
int ioBufSize,
void* userData,
CvTermCriteria* calcLimit,
- IplImage* avg,
+ IplImage* avg,
float* eigVals )
{
float *avg_data;
avg_data, avg_step, obj_size );
__END__;
-
+
return coeff;
}
cvEigenDecomposite( IplImage* obj,
int nEigObjs,
void* eigInput,
- int ioFlags,
- void* userData,
- IplImage* avg,
+ int ioFlags,
+ void* userData,
+ IplImage* avg,
float* coeffs )
{
float *avg_data;
int nEigObjs,
int ioFlags,
void* userData,
- float* coeffs,
+ float* coeffs,
IplImage* avg,
IplImage* proj )
{
Mat samples = cvarrToMat(_samples), labels0, labels;
if( _labels )
labels0 = labels = cvarrToMat(_labels);
-
+
bool isOk = train(samples, Mat(), _params, _labels ? &labels : 0);
CV_Assert( labels0.data == labels.data );
{
CV_Assert(_sample_idx.empty());
Mat prbs, weights, means, logLikelihoods;
- std::vector<Mat> covsHdrs;
- init_params(_params, prbs, weights, means, covsHdrs);
+ std::vector<Mat> covshdrs;
+ init_params(_params, prbs, weights, means, covshdrs);
emObj = EM(_params.nclusters, _params.cov_mat_type, _params.term_crit);
bool isOk = false;
isOk = emObj.train(_samples,
logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
else if( _params.start_step == EM::START_E_STEP )
- isOk = emObj.trainE(_samples, means, covsHdrs, weights,
+ isOk = emObj.trainE(_samples, means, covshdrs, weights,
logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
else if( _params.start_step == EM::START_M_STEP )
isOk = emObj.trainM(_samples, prbs,
logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
else
CV_Error(CV_StsBadArg, "Bad start type of EM algorithm");
-
+
if(isOk)
{
logLikelihood = sum(logLikelihoods).val[0];
return (dx < wt && dy < ht);
}
-void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
+static void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
{ /* Create contours: */
IplImage* pIB = NULL;
CvSeq* cnt = NULL;
for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur)
{
CvRect rect;
- CvSeq* cnt;
+ CvSeq* cont;
int k = *(int*)cvGetSeqElem( clasters, cnt_cur );
if(k!=claster_cur) continue;
- cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur );
- rect = ((CvContour*)cnt)->rect;
+ cont = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur );
+ rect = ((CvContour*)cont)->rect;
if(rect_res.height<0)
{
if(Good)
do{ /* For each configuration: */
CvBlob* pBL[EBD_FRAME_NUM];
- int Good = 1;
+ int good = 1;
double Error = 0;
CvBlob* pBNew = m_pBlobLists[EBD_FRAME_NUM-1]->GetBlob(pBLIndex[EBD_FRAME_NUM-1]);
Count++;
/* Check intersection last blob with existed: */
- if(Good && pOldBlobList)
+ if(good && pOldBlobList)
{ /* Check intersection last blob with existed: */
int k;
for(k=pOldBlobList->GetBlobNum(); k>0; --k)
CvBlob* pBOld = pOldBlobList->GetBlob(k-1);
if((fabs(pBOld->x-pBNew->x) < (CV_BLOB_RX(pBOld)+CV_BLOB_RX(pBNew))) &&
(fabs(pBOld->y-pBNew->y) < (CV_BLOB_RY(pBOld)+CV_BLOB_RY(pBNew))))
- Good = 0;
+ good = 0;
}
} /* Check intersection last blob with existed. */
/* Check distance to image border: */
- if(Good)
+ if(good)
{ /* Check distance to image border: */
CvBlob* pB = pBNew;
float dx = MIN(pB->x,S.width-pB->x)/CV_BLOB_RX(pB);
float dy = MIN(pB->y,S.height-pB->y)/CV_BLOB_RY(pB);
- if(dx < 1.1 || dy < 1.1) Good = 0;
+ if(dx < 1.1 || dy < 1.1) good = 0;
} /* Check distance to image border. */
/* Check uniform motion: */
- if(Good)
+ if(good)
{
int N = EBD_FRAME_NUM;
float sum[2] = {0,0};
if( Error > S.width*0.01 ||
fabs(a[0])>S.width*0.1 ||
fabs(a[1])>S.height*0.1)
- Good = 0;
+ good = 0;
} /* Check configuration. */
/* New best trajectory: */
- if(Good && (BestError == -1 || BestError > Error))
+ if(good && (BestError == -1 || BestError > Error))
{
for(i=0; i<EBD_FRAME_NUM; ++i)
{
if(i==EBD_FRAME_NUM)finish=1;
- } while(!finish); /* Check next time configuration of connected components. */
+ } while(!finish); /* Check next time configuration of connected components. */
#if 0
{/**/
};
/* Blob detector constructor: */
-CvBlobDetector* cvCreateBlobDetectorReal(CvTestSeq* pTestSeq){return new CvBlobDetectorReal(pTestSeq);}
-
-
-
-
+//CvBlobDetector* cvCreateBlobDetectorReal(CvTestSeq* pTestSeq){return new CvBlobDetectorReal(pTestSeq);}
double invPartAll;
double alphabetta = alpha*betta;
-
+
partAll = alpha - betta;
if( fabs(partAll) > 0.00001 ) /* alpha must be > betta */
{
partY = coeffs->Ycoef + coeffs->YcoefA *alpha +
coeffs->YcoefB*betta + coeffs->YcoefAB*alphabetta;
-
+
partZ = coeffs->Zcoef + coeffs->ZcoefA *alpha +
coeffs->ZcoefB*betta + coeffs->ZcoefAB*alphabetta;
icvMulMatrix_64d( convRotMatr,
3,3,
transVect2,
- 1,3,
+ 1,3,
tmpVect);
-
+
icvSubVector_64d(transVect1,tmpVect,convTransVect,3);
-
+
return CV_NO_ERR;
}
icvMulMatrix_64d( rotMatr,
3,3,
(double*)&M2,
- 1,3,
+ 1,3,
tmpVect);
icvAddVector_64d(tmpVect,transVect,(double*)M1,3);
-
+
return CV_NO_ERR;
}
/*--------------------------------------------------------------------------------------*/
-int icvComputeCoeffForStereoV3( double quad1[4][2],
+static int icvComputeCoeffForStereoV3( double quad1[4][2],
double quad2[4][2],
int numScanlines,
CvMatr64d camMatr1,
point2.x = (1.0 - alpha) * quad1[1][0] + alpha * quad1[2][0];
point2.y = (1.0 - alpha) * quad1[1][1] + alpha * quad1[2][1];
-
+
point3.x = (1.0 - alpha) * quad2[0][0] + alpha * quad2[3][0];
point3.y = (1.0 - alpha) * quad2[0][1] + alpha * quad2[3][1];
&startCoeffs[currLine],
needSwapCamera);
}
- return CV_NO_ERR;
+ return CV_NO_ERR;
}
/*--------------------------------------------------------------------------------------*/
-int icvComputeCoeffForStereoNew( double quad1[4][2],
+static int icvComputeCoeffForStereoNew( double quad1[4][2],
double quad2[4][2],
int numScanlines,
CvMatr32f camMatr1,
double camMatr1_64d[9];
double camMatr2_64d[9];
-
+
double rotMatr1_64d[9];
double transVect1_64d[3];
-
+
double rotMatr2_64d[9];
double transVect2_64d[3];
{
/* Get direction for all points */
/* Direction for camera 1 */
-
+
CvPoint3D64f direct1;
CvPoint3D64f direct2;
CvPoint3D64f camPoint1;
-
+
CvPoint3D64f directS3;
CvPoint3D64f directS4;
CvPoint3D64f direct3;
CvPoint3D64f direct4;
CvPoint3D64f camPoint2;
-
+
icvGetDirectionForPoint( point1,
camMatr1,
&direct1);
-
+
icvGetDirectionForPoint( point2,
camMatr1,
&direct2);
icvGetDirectionForPoint( point3,
camMatr2,
&directS3);
-
+
icvGetDirectionForPoint( point4,
camMatr2,
&directS4);
/* Create convertion for camera 2: two direction and camera point */
-
+
double convRotMatr[9];
double convTransVect[3];
CvPoint3D64f zeroVect;
zeroVect.x = zeroVect.y = zeroVect.z = 0.0;
camPoint1.x = camPoint1.y = camPoint1.z = 0.0;
-
+
icvConvertPointSystem(directS3,&direct3,convRotMatr,convTransVect);
icvConvertPointSystem(directS4,&direct4,convRotMatr,convTransVect);
icvConvertPointSystem(zeroVect,&camPoint2,convRotMatr,convTransVect);
CvPoint3D64f pointB;
-
+
int postype = 0;
-
+
/* Changed order */
/* Compute point B: xB,yB,zB */
icvGetCrossLines(camPoint1,direct2,
double gamma;
-
+
double xA,yA,zA;
double xB,yB,zB;
double xC,yC,zC;
camPoint1,
gamma,
coeffs);
-
+
return CV_NO_ERR;
}
{
/* */
double invMatr[9];
-
+
/* Invert matrix */
icvInvertMatrix_64d(camMatr,3,invMatr);
icvMulMatrix_64d( invMatr,
3,3,
vect,
- 1,3,
+ 1,3,
(double*)direct);
- return CV_NO_ERR;
+ return CV_NO_ERR;
}
/*--------------------------------------------------------------------------------------*/
double alpha,betta;
delta = a11*a22-a12*a21;
-
+
if( fabs(delta) < EPS64D )
{
/*return ERROR;*/
/* Find four lines */
CvPoint2D64d pa,pb,pc,pd;
-
+
pa.x = 0;
pa.y = 0;
pc.x = 0;
pc.y = imageSize.height-1;
-
+
/* We can compute points for angle */
/* Test for place section */
-
+
if( startPoint.x < 0 )
{/* 1,4,7 */
if( startPoint.y < 0)
/*---------------------------------------------------------------------------------------*/
/* Get common area of rectifying */
-void icvGetCommonArea( CvSize imageSize,
+static void icvGetCommonArea( CvSize imageSize,
CvPoint3D64d epipole1,CvPoint3D64d epipole2,
CvMatr64d fundMatr,
CvVect64d coeff11,CvVect64d coeff12,
double transFundMatr[3*3];
/* Compute transpose of fundamental matrix */
icvTransposeMatrix_64d( fundMatr, 3, 3, transFundMatr );
-
+
CvPoint2D64d epipole1_2d;
CvPoint2D64d epipole2_2d;
-
+
if( fabs(epipole1.z) < 1e-8 )
{/* epipole1 in infinity */
*result = 0;
pointW11[2] = 1.0;
icvTransformVector_64d( transFundMatr, /* !!! Modified from not transposed */
- pointW11,
+ pointW11,
corr21,
3,3);
corr21[0],corr21[1],corr21[2],
&start,&end,
&res);
-
+
if( res == 0 )
{/* We have not cross */
/* We must define new angle */
/* corr11 = Fund * p21 */
icvTransformVector_64d( fundMatr, /* !!! Modified */
- pointW21,
+ pointW21,
corr11,
3,3);
coeff11[0] = corr11[0];
coeff11[1] = corr11[1];
coeff11[2] = corr11[2];
-
+
/* Set coefs for line 1 image 2 */
icvGetCoefForPiece( epipole2_2d,point21,
&coeff21[0],&coeff21[1],&coeff21[2],
*result = 0;
return;/* Error */
}
-
+
/* Set coefs for line 1 image 2 */
coeff21[0] = corr21[0];
coeff21[1] = corr21[1];
coeff21[2] = corr21[2];
-
+
}
/* ============= Computation for line 2 ================ */
pointW12[2] = 1.0;
icvTransformVector_64d( transFundMatr,
- pointW12,
+ pointW12,
corr22,
3,3);
corr22[0],corr22[1],corr22[2],
&start,&end,
&res);
-
+
if( res == 0 )
{/* We have not cross */
/* We must define new angle */
/* corr2 = Fund' * p1 */
icvTransformVector_64d( fundMatr,
- pointW22,
+ pointW22,
corr12,
3,3);
-
+
/* We have cross. And it's result cross for down line. Set result coefs */
/* Set coefs for line 2 image 1 */
coeff12[0] = corr12[0];
coeff12[1] = corr12[1];
coeff12[2] = corr12[2];
-
+
/* Set coefs for line 1 image 2 */
icvGetCoefForPiece( epipole2_2d,point22,
&coeff22[0],&coeff22[1],&coeff22[2],
*result = 0;
return;/* Error */
}
-
+
/* Set coefs for line 1 image 2 */
coeff22[0] = corr22[0];
coeff22[1] = corr22[1];
coeff22[2] = corr22[2];
-
+
}
/* Now we know common area */
{/* Have cross */
double det;
double detxc,detyc;
-
+
det = a * (p_end.x - p_start.x) + b * (p_end.y - p_start.y);
-
+
if( fabs(det) < EPS64D )
{/* lines are parallel and may be equal or line is point */
if( fabs(a*p_start.x + b*p_start.y + c) < EPS64D )
}
else
{
- *result = 2;
+ *result = 2;
}
return;
}
cross->x = delX / del;
cross->y = delY / del;
-
+
*result = 1;
return;
}
CvPoint2D64d frameEnd;
CvPoint2D64d cross[4];
int haveCross[4];
-
+
haveCross[0] = 0;
haveCross[1] = 0;
haveCross[2] = 0;
frameEnd.x = imageSize.width;
frameEnd.y = 0;
- icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[0],&haveCross[0]);
-
+ icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[0],&haveCross[0]);
+
frameBeg.x = imageSize.width;
frameBeg.y = 0;
frameEnd.x = imageSize.width;
frameEnd.y = imageSize.height;
- icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[1],&haveCross[1]);
+ icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[1],&haveCross[1]);
frameBeg.x = imageSize.width;
frameBeg.y = imageSize.height;
frameEnd.x = 0;
frameEnd.y = imageSize.height;
- icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[2],&haveCross[2]);
+ icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[2],&haveCross[2]);
frameBeg.x = 0;
frameBeg.y = imageSize.height;
frameEnd.x = 0;
frameEnd.y = 0;
- icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[3],&haveCross[3]);
+ icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[3],&haveCross[3]);
double maxDist;
int i,j;
maxDist = -1.0;
-
+
double distance;
for( i = 0; i < 3; i++ )
double tmpVect1[3];
double tmpVect2[3];
-
+
icvMulMatrix_64d ( rotMatr,
3,3,
(double*)&point,
projPoint->x = tmpVect1[0] / tmpVect1[2];
projPoint->y = tmpVect1[1] / tmpVect1[2];
-
+
return;
}
/*---------------------------------------------------------------------------------------*/
/* Get quads for transform images */
-void icvGetQuadsTransform(
+void icvGetQuadsTransform(
CvSize imageSize,
CvMatr64d camMatr1,
CvMatr64d rotMatr1,
fundMatr_32f,
camMatr1_32f,
camMatr2_32f);
-
+
CvPoint3D32f epipole1_32f;
CvPoint3D32f epipole2_32f;
-
+
cvComputeEpipolesFromFundMatrix( fundMatr_32f,
&epipole1_32f,
&epipole2_32f);
epipole2->x = epipole2_32f.x;
epipole2->y = epipole2_32f.y;
epipole2->z = epipole2_32f.z;
-
+
/* Convert fundamental matrix */
icvCvt_32f_64d(fundMatr_32f,fundMatr,9);
}
/* -------------Compute for first image-------------- */
CvPoint2D32f pointb1;
CvPoint2D32f pointe1;
-
+
CvPoint2D32f pointb2;
CvPoint2D32f pointe2;
double dxOld,dyOld;
double dxNew,dyNew;
double distOld,distNew;
-
+
dxOld = quad2[1][0] - quad2[0][0];
dyOld = quad2[1][1] - quad2[0][1];
distOld = dxOld*dxOld + dyOld*dyOld;
-
+
dxNew = quad2[1][0] - pointb2.x;
dyNew = quad2[1][1] - pointb2.y;
distNew = dxNew*dxNew + dyNew*dyNew;
newQuad2[0][1] = quad2[0][1];
newQuad2[3][0] = quad2[3][0];
newQuad2[3][1] = quad2[3][1];
-
+
newQuad1[0][0] = pointb1.x;
newQuad1[0][1] = pointb1.y;
newQuad1[3][0] = pointe1.x;
&pointe2);
/* Compute distances */
-
+
dxOld = quad2[0][0] - quad2[1][0];
dyOld = quad2[0][1] - quad2[1][1];
distOld = dxOld*dxOld + dyOld*dyOld;
-
+
dxNew = quad2[0][0] - pointb2.x;
dyNew = quad2[0][1] - pointb2.y;
distNew = dxNew*dxNew + dyNew*dyNew;
newQuad2[1][1] = quad2[1][1];
newQuad2[2][0] = quad2[2][0];
newQuad2[2][1] = quad2[2][1];
-
+
newQuad1[1][0] = pointb1.x;
newQuad1[1][1] = pointb1.y;
newQuad1[2][0] = pointe1.x;
/*---------------------------------------------------------------------------------------*/
-void icvGetQuadsTransformNew( CvSize imageSize,
+static void icvGetQuadsTransformNew( CvSize imageSize,
CvMatr32f camMatr1,
CvMatr32f camMatr2,
CvMatr32f rotMatr1,
/* Convert fundamental matrix */
icvCvt_64d_32f(fundMatr_64d,fundMatr,9);
-
+
return;
}
/*---------------------------------------------------------------------------------------*/
void icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera)
{
- /* For given intrinsic and extrinsic parameters computes rest parameters
+ /* For given intrinsic and extrinsic parameters computes rest parameters
** such as fundamental matrix. warping coeffs, epipoles, ...
*/
icvCvt_32f_64d(stereoCamera->camera[0]->transVect,transVect1,3);
icvCvt_32f_64d(stereoCamera->camera[1]->transVect,transVect2,3);
-
+
icvCreateConvertMatrVect( rotMatr1,
transVect1,
rotMatr2,
transVect2,
convRotMatr,
convTransVect);
-
+
/* copy to stereo camera params */
icvCvt_64d_32f(convRotMatr,stereoCamera->rotMatrix,9);
icvCvt_64d_32f(convTransVect,stereoCamera->transVector,3);
/* Find middle line of sector */
double midLine[3]={0,0,0};
-
+
/* Different way */
CvPoint2D64d pointOnLine1; pointOnLine1.x = pointOnLine1.y = 0;
CvPoint2D64d pointOnLine2; pointOnLine2.x = pointOnLine2.y = 0;
candPoints[numPoints] = cornerPoint;
numPoints++;
}
-
+
cornerPoint.x = imageSize.width;
cornerPoint.y = imageSize.height;
icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
areaLineCoef2[0],areaLineCoef2[1],areaLineCoef2[2],
&tmpPoints[0], &tmpPoints[1],
&res);
-
+
for( i = 0; i < res; i++ )
{
candPoints[numPoints++] = tmpPoints[i];
double maxDist = 0;
double minDist = 10000000;
-
+
for( i = 0; i < numPoints; i++ )
{
icvProjectPointToDirect(candPoints[i], midLine, &projPoint);
}
/* We know maximum and minimum points. Now we can compute cut lines */
-
+
icvGetNormalDirect(midLine,minPoint,cutLine1);
icvGetNormalDirect(midLine,maxPoint,cutLine2);
CvPoint2D64d point1,CvPoint2D64d point2,
CvPoint2D64d* midPoint)
{/* !!! May be need to return error */
-
+
double dist1;
double dist2;
icvGetPieceLength(basePoint,point1,&dist1);
{
normDirect[0] = direct[1];
normDirect[1] = - direct[0];
- normDirect[2] = -(normDirect[0]*point.x + normDirect[1]*point.y);
+ normDirect[2] = -(normDirect[0]*point.x + normDirect[1]*point.y);
return;
}
{
*result = 0;
}
-
+
return;
}
{
double a = lineCoeff[0];
double b = lineCoeff[1];
-
+
double det = 1.0 / ( a*a + b*b );
double delta = a*point.y - b*point.x;
CvSize src_size ;
src_size.width = src->width;
src_size.height = src->height;
-
+
CvSize dst_size = src_size;
if( dst )
return dst;
}
-int
+static int
icvCvt_32f_64d( float *src, double *dst, int size )
{
int t;
/*======================================================================================*/
/* Type conversion double -> float */
-int
+static int
icvCvt_64d_32f( double *src, float *dst, int size )
{
int t;
/*----------------------------------------------------------------------------------*/
-
+#if 0
/* Find line which cross frame by line(a,b,c) */
-void FindLineForEpiline( CvSize imageSize,
+static void FindLineForEpiline( CvSize imageSize,
float a,float b,float c,
CvPoint2D32f *start,CvPoint2D32f *end,
int*)
frameEnd.x = (float)(imageSize.width);
frameEnd.y = 0;
haveCross[0] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[0]);
-
+
frameBeg.x = (float)(imageSize.width);
frameBeg.y = 0;
frameEnd.x = (float)(imageSize.width);
frameEnd.x = 0;
frameEnd.y = (float)(imageSize.height);
haveCross[2] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[2]);
-
+
frameBeg.x = 0;
frameBeg.y = (float)(imageSize.height);
frameEnd.x = 0;
}
return;
-
+
}
/*----------------------------------------------------------------------------------*/
-
-int GetAngleLinee( CvPoint2D32f epipole, CvSize imageSize,CvPoint2D32f point1,CvPoint2D32f point2)
+static int GetAngleLinee( CvPoint2D32f epipole, CvSize imageSize,CvPoint2D32f point1,CvPoint2D32f point2)
{
float width = (float)(imageSize.width);
float height = (float)(imageSize.height);
/* Find four lines */
CvPoint2D32f pa,pb,pc,pd;
-
+
pa.x = 0;
pa.y = 0;
float x,y;
x = epipole.x;
y = epipole.y;
-
+
if( x < 0 )
{/* 1,4,7 */
if( y < 0)
return 2;
}
-
+
}
-
+
return 0;
}
/*--------------------------------------------------------------------------------------*/
-void icvComputePerspectiveCoeffs(const CvPoint2D32f srcQuad[4],const CvPoint2D32f dstQuad[4],double coeffs[3][3])
+static void icvComputePerspectiveCoeffs(const CvPoint2D32f srcQuad[4],const CvPoint2D32f dstQuad[4],double coeffs[3][3])
{/* Computes perspective coeffs for transformation from src to dst quad */
double Y = dstQuad[i].y;
#endif
double* a = A + i*16;
-
+
a[0] = x;
a[1] = y;
a[2] = 1;
CV_CALL( cvPseudoInverse( &matA, &matInvA ));
CV_CALL( cvMatMulAdd( &matInvA, &matB, 0, &matX ));
}
-
+
coeffs[0][0] = c[0];
coeffs[0][1] = c[1];
coeffs[0][2] = c[2];
return;
}
+#endif
/*--------------------------------------------------------------------------------------*/
size = cvGetMatSize(mapx);
assert( fabs(c[2][2] - 1.) < FLT_EPSILON );
-
+
for( i = 0; i < size.height; i++ )
{
float* mx = (float*)(mapx->data.ptr + mapx->step*i);
double Y = quad[i].y;
#endif
double* a = A + i*16;
-
+
a[0] = x;
a[1] = y;
a[2] = 1;
CV_CALL( cvPseudoInverse( &matA, &matInvA ));
CV_CALL( cvMatMulAdd( &matInvA, &matB, 0, &matX ));
}
-
+
matrix[0][0] = c[0];
matrix[0][1] = c[1];
matrix[0][2] = c[2];
icvMulMatrix_64d( invMatr1,
3,3,
p1,
- 1,3,
+ 1,3,
P1);
double invR[9];
icvMulMatrix_64d( invR,
3,3,
P1,
- 1,3,
+ 1,3,
P2);
/* Now we can project this point to image 2 */
icvMulMatrix_64d( camMatr2,
3,3,
P2,
- 1,3,
+ 1,3,
projP);
point2->x = (float)(projP[0] / projP[2]);
icvMulMatrix_64d( invMatr2,
3,3,
p2,
- 1,3,
+ 1,3,
P2);
/* Change system 1 to system 2 */
icvMulMatrix_64d( rotMatr,
3,3,
P2,
- 1,3,
+ 1,3,
P1);
/* Now we can project this point to image 2 */
icvMulMatrix_64d( camMatr1,
3,3,
P1,
- 1,3,
+ 1,3,
projP);
point1->x = (float)(projP[0] / projP[2]);
/* Select best R and t for given cameras, points, ... */
/* For both cameras */
-int icvSelectBestRt( int numImages,
+static int icvSelectBestRt( int numImages,
int* numPoints,
CvPoint2D32f* imagePoints1,
CvPoint2D32f* imagePoints2,
/* Need to convert input data 32 -> 64 */
CvPoint3D64d* objectPoints_64d;
-
+
double* rotMatrs1_64d;
double* rotMatrs2_64d;
/* allocate memory for 64d data */
int totalNum = 0;
- int i;
- for( i = 0; i < numImages; i++ )
+ for(int i = 0; i < numImages; i++ )
{
totalNum += numPoints[i];
}
objectPoints_64d = (CvPoint3D64d*)calloc(totalNum,sizeof(CvPoint3D64d));
-
+
rotMatrs1_64d = (double*)calloc(numImages,sizeof(double)*9);
rotMatrs2_64d = (double*)calloc(numImages,sizeof(double)*9);
transVects2_64d = (double*)calloc(numImages,sizeof(double)*3);
/* Convert input data to 64d */
-
+
icvCvt_32f_64d((float*)objectPoints, (double*)objectPoints_64d, totalNum*3);
icvCvt_32f_64d(rotMatrs1, rotMatrs1_64d, numImages*9);
int currRt;
for( currRt = 0; currRt < numImages; currRt++ )
{
- int begPoint = 0;
+ int begPoint = 0;
for(currImagePair = 0; currImagePair < numImages; currImagePair++ )
{
/* For current R,t R,t compute relative position of cameras */
double convRotMatr[9];
double convTransVect[3];
-
+
icvCreateConvertMatrVect( rotMatrs1_64d + currRt*9,
transVects1_64d + currRt*3,
rotMatrs2_64d + currRt*9,
points2 = (CvPoint3D64d*)calloc(numberPnt,sizeof(CvPoint3D64d));
/* Transform object points to first camera position */
- int i;
- for( i = 0; i < numberPnt; i++ )
+ for(int i = 0; i < numberPnt; i++ )
{
/* Create second camera point */
CvPoint3D64d tmpPoint;
tmpPoint.x = (double)(objectPoints[i].x);
tmpPoint.y = (double)(objectPoints[i].y);
tmpPoint.z = (double)(objectPoints[i].z);
-
+
icvConvertPointSystem( tmpPoint,
points2+i,
rotMatrs2_64d + currImagePair*9,
dy = tmpPoint2.y - points1[i].y;
dz = tmpPoint2.z - points1[i].z;
err = sqrt(dx*dx + dy*dy + dz*dz);*/
-
-
}
-
+
#if 0
cvProjectPointsSimple( numPoints[currImagePair],
objectPoints_64d + begPoint,
cameraMatrix2_64d,
nodist,
projImagePoints2);
-
+
}
#endif
double err;
for( currPoint = 0; currPoint < numberPnt; currPoint++ )
{
- double len1,len2;
+ double len1,len2;
double dx1,dy1;
dx1 = imagePoints1[begPoint+currPoint].x - projImagePoints1[currPoint].x;
dy1 = imagePoints1[begPoint+currPoint].y - projImagePoints1[currPoint].y;
int direction)
{
double x,y;
- double det;
+ double det;
if( direction == CV_WARP_TO_CAMERA )
{/* convert from camera image to warped image coordinates */
x = warpPoint->x;
y = warpPoint->y;
-
+
det = (coeffs[2][0] * x + coeffs[2][1] * y + coeffs[2][2]);
if( fabs(det) > 1e-8 )
{
return CV_OK;
}
}
-
+
return CV_BADFACTOR_ERR;
}
corns[3].x = 0;
corns[3].y = (float)(stereoparams->camera[0]->imgSize[1]-1);
- int i;
- for( i = 0; i < 4; i++ )
+ for(int i = 0; i < 4; i++ )
{
/* For first camera */
icvConvertWarpCoordinates( stereoparams->coeffs[0],
return CV_NO_ERR;
}
+#if 0
/* Find line from epipole */
-void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D32f *start,CvPoint2D32f *end)
+static void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D32f *start,CvPoint2D32f *end)
{
CvPoint2D32f frameBeg;
CvPoint2D32f frameEnd;
frameEnd.x = (float)(imageSize.width);
frameEnd.y = 0;
haveCross[0] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[0]);
-
+
frameBeg.x = (float)(imageSize.width);
frameBeg.y = 0;
frameEnd.x = (float)(imageSize.width);
frameEnd.x = 0;
frameEnd.y = (float)(imageSize.height);
haveCross[2] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[2]);
-
+
frameBeg.x = 0;
frameBeg.y = (float)(imageSize.height);
frameEnd.x = 0;
int maxN = -1;
int minN = -1;
-
+
for( n = 0; n < 4; n++ )
{
if( haveCross[n] > 0 )
return;
}
-
/* Find line which cross frame by line(a,b,c) */
-void FindLineForEpiline(CvSize imageSize,float a,float b,float c,CvPoint2D32f *start,CvPoint2D32f *end)
+static void FindLineForEpiline(CvSize imageSize,float a,float b,float c,CvPoint2D32f *start,CvPoint2D32f *end)
{
CvPoint2D32f frameBeg;
CvPoint2D32f frameEnd;
frameEnd.x = (float)(imageSize.width);
frameEnd.y = 0;
haveCross[0] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[0]);
-
+
frameBeg.x = (float)(imageSize.width);
frameBeg.y = 0;
frameEnd.x = (float)(imageSize.width);
frameEnd.x = 0;
frameEnd.y = (float)(imageSize.height);
haveCross[2] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[2]);
-
+
frameBeg.x = 0;
frameBeg.y = (float)(imageSize.height);
frameEnd.x = 0;
}
return;
-
+
}
/* Cross lines */
-int GetCrossLines(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f p2_start,CvPoint2D32f p2_end,CvPoint2D32f *cross)
+static int GetCrossLines(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f p2_start,CvPoint2D32f p2_end,CvPoint2D32f *cross)
{
double ex1,ey1,ex2,ey2;
double px1,py1,px2,py2;
cross->y = (float)(-delY / del);
return 1;
}
-
+#endif
int icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross)
{
cross->x = (float)X;
cross->y = (float)Y;
-
+
return 1;
}
-int cvComputeEpipoles( CvMatr32f camMatr1, CvMatr32f camMatr2,
+#if 0
+static int cvComputeEpipoles( CvMatr32f camMatr1, CvMatr32f camMatr2,
CvMatr32f rotMatr1, CvMatr32f rotMatr2,
CvVect32f transVect1,CvVect32f transVect2,
CvVect32f epipole1,
cvmMul( &ccamMatr1, &crotMatr1, &cmatrP1);
cvmInvert( &cmatrP1,&cinvP1 );
cvmMul( &ccamMatr1, &ctransVect1, &cvectp1 );
-
+
/* Compute second */
cvmMul( &ccamMatr2, &crotMatr2, &cmatrP2 );
cvmInvert( &cmatrP2,&cinvP2 );
return CV_NO_ERR;
}/* cvComputeEpipoles */
-
+#endif
/* Compute epipoles for fundamental matrix */
int cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr,
epipole1->x = matrU->data.fl[6];
epipole1->y = matrU->data.fl[7];
epipole1->z = matrU->data.fl[8];
-
+
/* Get last row from V' and compute epipole2 */
epipole2->x = matrV->data.fl[6];
epipole2->y = matrV->data.fl[7];
cvReleaseMat(&matrW);
cvReleaseMat(&matrU);
- cvReleaseMat(&matrV);
+ cvReleaseMat(&matrV);
return CV_OK;
}
CvMat* invCM1T = cvCreateMat(3,3,CV_MAT32F);
cvTranspose(&cameraMatr1C,tmpMatr);
- cvInvert(tmpMatr,invCM1T);
+ cvInvert(tmpMatr,invCM1T);
cvmMul(invCM1T,&essMatrC,tmpMatr);
cvInvert(&cameraMatr2C,invCM2);
cvmMul(tmpMatr,invCM2,&fundMatrC);
cvReleaseMat(&invCM2);
cvReleaseMat(&tmpMatr);
cvReleaseMat(&invCM1T);
-
+
return CV_OK;
}
transMatr[0] = 0;
transMatr[1] = - transVect[2];
transMatr[2] = transVect[1];
-
+
transMatr[3] = transVect[2];
transMatr[4] = 0;
transMatr[5] = - transVect[0];
-
+
transMatr[6] = - transVect[1];
transMatr[7] = transVect[0];
transMatr[8] = 0;
#include "precomp.hpp"
-#ifdef WIN32 /* make sure it builds under Linux whenever it is included into Makefile.am or not. */
+#if 0
+//#ifdef WIN32 /* make sure it builds under Linux whenever it is included into Makefile.am or not. */
//void icvCutContour( CvSeq* current, IplImage* image );
CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* image );
//create lists of segments of all contours from image
-CvSeq* cvExtractSingleEdges( IplImage* image, //bw image - it's content will be destroyed by cvFindContours
+CvSeq* cvExtractSingleEdges( IplImage* image, //bw image - it's content will be destroyed by cvFindContours
CvMemStorage* storage )
{
CvMemStorage* tmp_storage = cvCreateChildMemStorage( storage );
cvZero( image );
//iterate through contours
- //iterate through tree
+ //iterate through tree
CvSeq* current = contours;
int number = 0;
int level = 1;
CvSeq* output = 0;
- CvSeq* tail_seq = 0;
+ CvSeq* tail_seq = 0;
//actually this loop can iterates through tree,
//but still we use CV_RETR_LIST it is not useful
while( current )
{
- number++;
-
+ number++;
+
//get vertical list of segments for one contour
CvSeq* new_seq = icvCutContourRaster( current, storage, image );
//add this vertical list to horisontal list
if( new_seq )
{
- if( tail_seq )
- {
- tail_seq->h_next = new_seq;
+ if( tail_seq )
+ {
+ tail_seq->h_next = new_seq;
new_seq->h_prev = tail_seq;
tail_seq = new_seq;
}
}
//iteration through tree
- if( current->v_next )
- {
+ if( current->v_next )
+ {
//goto child
current = current->v_next;
level++;
}
- else
+ else
{
//go parent
while( !current->h_next )
level--;
if( !level ) break;
}
-
+
if( current ) //go brother
current = current->h_next;
}
//free temporary memstorage with initial contours
cvReleaseMemStorage( &tmp_storage );
- return output;
+ return output;
}
//makes vertical list of segments for 1 contour
CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* image /*tmp image*/)
{
//iplSet(image, 0 ); // this can cause double edges if two contours have common edge
- // for example if object is circle with 1 pixel width
- // to remove such problem - remove this iplSet
+ // for example if object is circle with 1 pixel width
+ // to remove such problem - remove this iplSet
//approx contour by single edges
CvSeqReader reader;
CvSeqWriter writer;
-
+
int writing = 0;
cvStartReadSeq( current, &reader, 0 );
//below line just to avoid warning
cvStartWriteSeq( current->flags, sizeof(CvContour), sizeof(CvPoint), storage, &writer );
-
+
CvSeq* output = 0;
CvSeq* tail = 0;
//mark point
((uchar*)image->imageData)[image->widthStep * cur.y + cur.x]++;
assert( ((uchar*)image->imageData)[image->widthStep * cur.y + cur.x] != 255 );
-
+
}
//second pass - create separate edges
uchar flag = image->imageData[image->widthStep * cur.y + cur.x];
if( flag != 255 && flag < 3) //
{
- if(!writing)
+ if(!writing)
{
cvStartWriteSeq( current->flags, sizeof(CvContour), sizeof(CvPoint), storage, &writer );
- writing = 1 ;
+ writing = 1 ;
}
//mark point
if( flag < 3 ) ((uchar*)image->imageData)[image->widthStep * cur.y + cur.x] = 255;
//add it to another seq
CV_WRITE_SEQ_ELEM( cur, writer );
-
+
}
else
{
//exclude this point from contour
- if( writing )
+ if( writing )
{
CvSeq* newseq = cvEndWriteSeq( &writer );
writing = 0;
{
output = tail = newseq;
}
- }
+ }
}
}
{
output = tail = newseq;
}
- }
+ }
return output;
//approx contour by single edges
CvSeqReader reader;
CvSeqReader rev_reader;
-
+
cvStartReadSeq( current, &reader, 0 );
int64* cur_pt = (int64*)reader.ptr;
int64* prev_pt = (int64*)reader.prev_elem;
-
+
//search for point a in aba position
for( int i = 0; i < current->total; i++ )
{
{
//return to prev pos
CV_PREV_SEQ_ELEM( sizeof(int64), reader );
-
+
//this point is end of edge
//start going both directions and collect edge
int pos = cvGetSeqReaderPos( &reader );
cvSetSeqReaderPos( &rev_reader, pos );
-
+
//walk in both directions
while(1);
}
}
-
+
*/
#endif /* WIN32 */
-
+
m_iNumLayers = 16;
assert(m_iNumLayers <= MAX_LAYERS);
m_pFaceList = new FaceDetectionList();
-
+
m_bBoosting = false;
if (m_mstgRects)
cvReleaseMemStorage(&m_mstgRects);
-
+
}// ~FaceDetection()
m_mstgRects = cvCreateMemStorage();
if (NULL == m_mstgRects)
return;
- m_seqRects = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvContourRect), m_mstgRects);
+ m_seqRects = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvContourRect), m_mstgRects);
if (NULL == m_seqRects)
return;
// find contours
buffImg += imgGray->widthStep;
}
// params
-
+
for (i = 0; i <= GIST_NUM; i ++)
{
if (gistImg[i] >= GIST_MIN)
break;
}
-
+
iMinLevel = i * GIST_STEP;
-
+
for (i = GIST_NUM; i >= 0; i --)
{
if (gistImg[i] >= GIST_MIN)
break;
}
-
+
iMaxLevel = i * GIST_STEP;
-
+
int dLevels = iMaxLevel - iMinLevel;
if (dLevels <= 0)
{
void FaceDetection::CreateResults(CvSeq * lpSeq)
{
-
+
Face * tmp;
-
+
double Max = 0;
double CurStat = 0;
-
+
FaceData tmpData;
if (m_bBoosting)
{
if (CurStat > Max)
Max = CurStat;
}
-
+
while ( (tmp = m_pFaceList->GetData()) != 0 )
{
tmp->CreateFace(&tmpData);
CurStat = tmp->GetWeight();
-
+
if (CurStat == Max)
{
CvFace tmpFace;
tmpFace.RightEyeRect = tmpData.RightEyeRect;
cvSeqPush(lpSeq,&tmpFace);
-
+
}
}
}
cvSeqPush(m_seqRects, &cr);
for (CvSeq* internal = external->v_next; internal; internal = internal->h_next)
{
- cr.r = cvContourBoundingRect(internal, 0);
+ cr.r = cvContourBoundingRect(internal, 0);
cr.pCenter.x = cr.r.x + cr.r.width / 2;
cr.pCenter.y = cr.r.y + cr.r.height / 2;
cr.iNumber = iLayer;
if (m_bBoosting)
PostBoostingFindCandidats(img);
else
- FindCandidats();
-
+ FindCandidats();
+
}// void FaceDetection::FindFace(IplImage *img)
RFace * lpFace1 = 0;
bool bInvalidRect1 = false;
CvRect * lpRect1 = NULL;
-
+
try
{
for (int i = 0; i < m_seqRects->total; i++)
3*(double)rect.width/(double)4,
(double)rect.width/(double)2,
(double)rect.width/(double)2);
-
+
lpFace1 = new RFace(lpFaceTemplate1);
-
+
for (int j = 0; j < m_seqRects->total; j++)
{
- CvContourRect* pRect = (CvContourRect*)cvGetSeqElem(m_seqRects, j);
-
+ CvContourRect* prect = (CvContourRect*)cvGetSeqElem(m_seqRects, j);
+
if ( !bInvalidRect1 )
{
lpRect1 = NULL;
lpRect1 = new CvRect();
- *lpRect1 = pRect->r;
+ *lpRect1 = prect->r;
}else
{
delete lpRect1;
lpRect1 = new CvRect();
- *lpRect1 = pRect->r;
+ *lpRect1 = prect->r;
}
-
-
+
+
if ( lpFace1->isFeature(lpRect1) )
- {
+ {
bFound1 = true;
bInvalidRect1 = false;
}else
bInvalidRect1 = true;
-
+
}
-
+
if (bFound1)
{
m_pFaceList->AddElem(lpFace1);
lpFace1 = NULL;
}
-
+
delete lpFaceTemplate1;
}
-
+
}
}
catch(...)
void FaceDetection::PostBoostingFindCandidats(IplImage * FaceImage)
{
BoostingFaceTemplate * lpFaceTemplate1 = 0;
- RFace * lpFace1 = 0;
+ RFace * lpFace1 = 0;
bool bInvalidRect1 = false;
CvRect * lpRect1 = NULL;
-
+
try
{
if ( ( !FaceImage->roi ) )
else
lpFaceTemplate1 = new BoostingFaceTemplate(3,cvRect(FaceImage->roi->xOffset,FaceImage->roi->yOffset,
FaceImage->roi->width,FaceImage->roi->height));
-
+
lpFace1 = new RFace(lpFaceTemplate1);
for (int i = 0; i < m_seqRects->total; i++)
{
CvContourRect* pRect = (CvContourRect*)cvGetSeqElem(m_seqRects, i);
-
+
if ( !bInvalidRect1 )
{
lpRect1 = NULL;
lpRect1 = new CvRect();
*lpRect1 = pRect->r;
}
-
-
+
+
if ( lpFace1->isFeature(lpRect1) )
- {
+ {
//bFound1 = true;
bInvalidRect1 = false;
}else
bInvalidRect1 = true;
-
+
}
-
+
m_pFaceList->AddElem(lpFace1);
lpFace1 = NULL;
-
+
delete lpFaceTemplate1;
}
catch(...)
#define _CV_CAUSAL 2
#define _CV_LAST_STATE 1
-#define _CV_BEST_STATE 2
+#define _CV_BEST_STATE 2
//*F///////////////////////////////////////////////////////////////////////////////////////
// Name: _cvCreateObsInfo
-// Purpose: The function allocates memory for CvImgObsInfo structure
+// Purpose: The function allocates memory for CvImgObsInfo structure
// and its inner stuff
// Context:
// Parameters: obs_info - addres of pointer to CvImgObsInfo structure
//
// Returns: error status
//
-// Notes:
-//F*/
-static CvStatus CV_STDCALL icvCreateObsInfo( CvImgObsInfo** obs_info,
+// Notes:
+//F*/
+static CvStatus CV_STDCALL icvCreateObsInfo( CvImgObsInfo** obs_info,
CvSize num_obs, int obs_size )
{
int total = num_obs.height * num_obs.width;
-
+
CvImgObsInfo* obs = (CvImgObsInfo*)cvAlloc( sizeof( CvImgObsInfo) );
-
+
obs->obs_x = num_obs.width;
obs->obs_y = num_obs.height;
obs->obs = (float*)cvAlloc( total * obs_size * sizeof(float) );
obs->state = (int*)cvAlloc( 2 * total * sizeof(int) );
- obs->mix = (int*)cvAlloc( total * sizeof(int) );
-
+ obs->mix = (int*)cvAlloc( total * sizeof(int) );
+
obs->obs_size = obs_size;
obs_info[0] = obs;
-
+
return CV_NO_ERR;
}
cvFree( &(obs_info->obs) );
cvFree( &(obs_info->mix) );
- cvFree( &(obs_info->state) );
+ cvFree( &(obs_info->state) );
cvFree( &(obs_info) );
p_obs_info[0] = NULL;
return CV_NO_ERR;
-}
+}
+
-
//*F///////////////////////////////////////////////////////////////////////////////////////
// Name: icvCreate2DHMM
-// Purpose: The function allocates memory for 2-dimensional embedded HMM model
+// Purpose: The function allocates memory for 2-dimensional embedded HMM model
// and its inner stuff
// Context:
// Parameters: hmm - addres of pointer to CvEHMM structure
// state_number - array of hmm sizes (size of array == state_number[0]+1 )
-// num_mix - number of gaussian mixtures in low-level HMM states
+// num_mix - number of gaussian mixtures in low-level HMM states
// size of array is defined by previous array values
// obs_size - length of observation vectors
//
//
// Notes: state_number[0] - number of states in external HMM.
// state_number[i] - number of states in embedded HMM
-//
+//
// example for face recognition: state_number = { 5 3 6 6 6 3 },
// length of num_mix array = 3+6+6+6+3 = 24//
//
/* allocate memory for all hmms (from all levels) */
hmm = (CvEHMM*)cvAlloc( (state_number[0] + 1) * sizeof(CvEHMM) );
-
+
/* set number of superstates */
hmm[0].num_states = state_number[0];
hmm[0].level = 1;
-
+
/* allocate memory for all states */
all_states = (CvEHMMState *)cvAlloc( real_states * sizeof( CvEHMMState ) );
for( i = 0; i < real_states; i++ )
{
total_mix += num_mix[i];
- }
+ }
/* allocate memory for states stuff */
- pointers = (float*)cvAlloc( total_mix * (2/*for mu invvar */ * obs_size +
+ pointers = (float*)cvAlloc( total_mix * (2/*for mu invvar */ * obs_size +
2/*for weight and log_var_val*/ ) * sizeof( float) );
-
+
/* organize memory */
for( i = 0; i < real_states; i++ )
{
- all_states[i].mu = pointers; pointers += num_mix[i] * obs_size;
+ all_states[i].mu = pointers; pointers += num_mix[i] * obs_size;
all_states[i].inv_var = pointers; pointers += num_mix[i] * obs_size;
all_states[i].log_var_val = pointers; pointers += num_mix[i];
all_states[i].weight = pointers; pointers += num_mix[i];
- }
-
+ }
+
/* set pointer to embedded hmm array */
hmm->u.ehmm = hmm + 1;
-
+
for( i = 0; i < hmm[0].num_states; i++ )
{
hmm[i+1].u.state = all_states;
all_states += state_number[i+1];
hmm[i+1].num_states = state_number[i+1];
- }
-
+ }
+
for( i = 0; i <= state_number[0]; i++ )
{
hmm[i].transP = icvCreateMatrix_32f( hmm[i].num_states, hmm[i].num_states );
hmm[i].obsProb = NULL;
hmm[i].level = i ? 0 : 1;
}
-
+
/* if all ok - return pointer */
*this_hmm = hmm;
return CV_NO_ERR;
-}
+}
static CvStatus CV_STDCALL icvRelease2DHMM( CvEHMM** phmm )
{
- CvEHMM* hmm = phmm[0];
+ CvEHMM* hmm = phmm[0];
int i;
for( i = 0; i < hmm[0].num_states + 1; i++ )
{
icvDeleteMatrix( hmm[i].transP );
- }
+ }
if (hmm->obsProb != NULL)
{
phmm[0] = NULL;
return CV_NO_ERR;
-}
+}
/* distance between 2 vectors */
static float icvSquareDistance( CvVect32f v1, CvVect32f v2, int len )
}
return (float)(dist0 + dist1);
-}
+}
/*can be used in CHMM & DHMM */
static CvStatus CV_STDCALL
if ( !obs_info || !hmm ) return CV_NULLPTR_ERR;
first_state = hmm->u.ehmm->u.state;
-
+
for (i = 0; i < obs_info->obs_y; i++)
{
//bad line (division )
int superstate = (int)((i * hmm->num_states)*inv_y);/* /obs_info->obs_y; */
-
+
int index = (int)(hmm->u.ehmm[superstate].u.state - first_state);
for (j = 0; j < obs_info->obs_x; j++, counter++)
{
int state = (int)((j * hmm->u.ehmm[superstate].num_states)* inv_x); /* / obs_info->obs_x; */
-
+
obs_info->state[2 * counter] = superstate;
obs_info->state[2 * counter + 1] = state + index;
}
- }
+ }
#else
//this is not ready yet
int i,j,k,m;
- CvEHMMState* first_state = hmm->u.ehmm->u.state;
+ CvEHMMState* first_state = hmm->u.ehmm->u.state;
/* check bad arguments */
if ( hmm->num_states > obs_info->obs_y ) return CV_BADSIZE_ERR;
//compute vertical subdivision
float row_per_state = (float)obs_info->obs_y / hmm->num_states;
float col_per_state[1024]; /* maximum 1024 superstates */
-
+
//for every horizontal band compute subdivision
for( i = 0; i < hmm->num_states; i++ )
{
obs_info->state[row * obs_info->obs_x + 2 * k] = i;
obs_info->state[row * obs_info->obs_x + 2 * k + 1] = j + index;
}
- col = es_bound[j];
+ col = es_bound[j];
}
//copy the same to other rows of superstate
for( m = row; m < ss_bound[i]; m++ )
{
- memcpy( &(obs_info->state[m * obs_info->obs_x * 2]),
+ memcpy( &(obs_info->state[m * obs_info->obs_x * 2]),
&(obs_info->state[row * obs_info->obs_x * 2]), obs_info->obs_x * 2 * sizeof(int) );
}
- row = ss_bound[i];
- }
+ row = ss_bound[i];
+ }
#endif
return CV_NO_ERR;
}
-
+
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: InitMixSegm
// Context: used with the Viterbi training of the embedded HMM
// Function uses K-Means algorithm for clustering
//
-// Parameters: obs_info_array - array of pointers to image observations
+// Parameters: obs_info_array - array of pointers to image observations
// num_img - length of above array
-// hmm - pointer to HMM structure
-//
+// hmm - pointer to HMM structure
+//
// Returns: error status
//
-// Notes:
+// Notes:
//F*/
static CvStatus CV_STDCALL
icvInitMixSegm( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm )
-{
- int k, i, j;
+{
+ int k, i, j;
int* num_samples; /* number of observations in every state */
int* counter; /* array of counters for every state */
-
+
int** a_class; /* for every state - characteristic array */
-
+
CvVect32f** samples; /* for every state - pointer to observation vectors */
- int*** samples_mix; /* for every state - array of pointers to vectors mixtures */
-
+ int*** samples_mix; /* for every state - array of pointers to vectors mixtures */
+
CvTermCriteria criteria = cvTermCriteria( CV_TERMCRIT_EPS|CV_TERMCRIT_ITER,
1000, /* iter */
0.01f ); /* eps */
-
+
int total = 0;
-
- CvEHMMState* first_state = hmm->u.ehmm->u.state;
-
+
+ CvEHMMState* first_state = hmm->u.ehmm->u.state;
+
for( i = 0 ; i < hmm->num_states; i++ )
{
total += hmm->u.ehmm[i].num_states;
- }
-
+ }
+
/* for every state integer is allocated - number of vectors in state */
num_samples = (int*)cvAlloc( total * sizeof(int) );
-
+
/* integer counter is allocated for every state */
counter = (int*)cvAlloc( total * sizeof(int) );
-
- samples = (CvVect32f**)cvAlloc( total * sizeof(CvVect32f*) );
- samples_mix = (int***)cvAlloc( total * sizeof(int**) );
-
+
+ samples = (CvVect32f**)cvAlloc( total * sizeof(CvVect32f*) );
+ samples_mix = (int***)cvAlloc( total * sizeof(int**) );
+
/* clear */
memset( num_samples, 0 , total*sizeof(int) );
memset( counter, 0 , total*sizeof(int) );
-
-
+
+
/* for every state the number of vectors which belong to it is computed (smth. like histogram) */
for (k = 0; k < num_img; k++)
- {
+ {
CvImgObsInfo* obs = obs_info_array[k];
int count = 0;
-
+
for (i = 0; i < obs->obs_y; i++)
{
for (j = 0; j < obs->obs_x; j++, count++)
num_samples[state] += 1;
}
}
- }
-
+ }
+
/* for every state int* is allocated */
a_class = (int**)cvAlloc( total*sizeof(int*) );
-
+
for (i = 0; i < total; i++)
{
a_class[i] = (int*)cvAlloc( num_samples[i] * sizeof(int) );
samples[i] = (CvVect32f*)cvAlloc( num_samples[i] * sizeof(CvVect32f) );
samples_mix[i] = (int**)cvAlloc( num_samples[i] * sizeof(int*) );
}
-
+
/* for every state vectors which belong to state are gathered */
for (k = 0; k < num_img; k++)
- {
+ {
CvImgObsInfo* obs = obs_info_array[k];
int num_obs = ( obs->obs_x ) * ( obs->obs_y );
float* vector = obs->obs;
for (i = 0; i < num_obs; i++, vector+=obs->obs_size )
{
int state = obs->state[2*i+1];
-
+
samples[state][counter[state]] = vector;
samples_mix[state][counter[state]] = &(obs->mix[i]);
- counter[state]++;
+ counter[state]++;
}
- }
-
+ }
+
/* clear counters */
memset( counter, 0, total*sizeof(int) );
-
+
/* do the actual clustering using the K Means algorithm */
for (i = 0; i < total; i++)
{
if ( first_state[i].num_mix == 1)
- {
+ {
for (k = 0; k < num_samples[i]; k++)
- {
+ {
/* all vectors belong to one mixture */
a_class[i][k] = 0;
}
- }
+ }
else if( num_samples[i] )
{
/* clusterize vectors */
- cvKMeans( first_state[i].num_mix, samples[i], num_samples[i],
+ cvKMeans( first_state[i].num_mix, samples[i], num_samples[i],
obs_info_array[0]->obs_size, criteria, a_class[i] );
- }
+ }
}
-
+
/* for every vector number of mixture is assigned */
for( i = 0; i < total; i++ )
{
samples_mix[i][j][0] = a_class[i][j];
}
}
-
+
for (i = 0; i < total; i++)
{
cvFree( &(a_class[i]) );
cvFree( &samples );
cvFree( &samples_mix );
cvFree( &counter );
- cvFree( &num_samples );
-
+ cvFree( &num_samples );
+
return CV_NO_ERR;
}
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: ComputeUniModeGauss
-// Purpose: The function computes the Gaussian pdf for a sample vector
+// Purpose: The function computes the Gaussian pdf for a sample vector
// Context:
// Parameters: obsVeq - pointer to the sample vector
// mu - pointer to the mean vector of the Gaussian pdf
// var - pointer to the variance vector of the Gaussian pdf
// VecSize - the size of sample vector
-//
-// Returns: the pdf of the sample vector given the specified Gaussian
//
-// Notes:
+// Returns: the pdf of the sample vector given the specified Gaussian
+//
+// Notes:
//F*/
-/*static float icvComputeUniModeGauss(CvVect32f vect, CvVect32f mu,
- CvVect32f inv_var, float log_var_val, int vect_size)
+/*static float icvComputeUniModeGauss(CvVect32f vect, CvVect32f mu,
+ CvVect32f inv_var, float log_var_val, int vect_size)
{
- int n;
+ int n;
double tmp;
double prob;
prob = prob - tmp * tmp;
}
//prob *= 0.5f;
-
+
return (float)prob;
-}*/
+}*/
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: ComputeGaussMixture
-// Purpose: The function computes the mixture Gaussian pdf of a sample vector.
+// Purpose: The function computes the mixture Gaussian pdf of a sample vector.
// Context:
// Parameters: obsVeq - pointer to the sample vector
// mu - two-dimensional pointer to the mean vector of the Gaussian pdf;
-// the first dimension is indexed over the number of mixtures and
+// the first dimension is indexed over the number of mixtures and
// the second dimension is indexed along the size of the mean vector
// var - two-dimensional pointer to the variance vector of the Gaussian pdf;
-// the first dimension is indexed over the number of mixtures and
+// the first dimension is indexed over the number of mixtures and
// the second dimension is indexed along the size of the variance vector
// VecSize - the size of sample vector
// weight - pointer to the wights of the Gaussian mixture
// NumMix - the number of Gaussian mixtures
-//
-// Returns: the pdf of the sample vector given the specified Gaussian mixture.
//
-// Notes:
+// Returns: the pdf of the sample vector given the specified Gaussian mixture.
+//
+// Notes:
//F*/
/* Calculate probability of observation at state in logarithmic scale*/
/*static float
-icvComputeGaussMixture( CvVect32f vect, float* mu,
- float* inv_var, float* log_var_val,
+icvComputeGaussMixture( CvVect32f vect, float* mu,
+ float* inv_var, float* log_var_val,
int vect_size, float* weight, int num_mix )
-{
+{
double prob, l_prob;
-
- prob = 0.0f;
+
+ prob = 0.0f;
if (num_mix == 1)
{
- return icvComputeUniModeGauss( vect, mu, inv_var, log_var_val[0], vect_size);
+ return icvComputeUniModeGauss( vect, mu, inv_var, log_var_val[0], vect_size);
}
else
{
for (m = 0; m < num_mix; m++)
{
if ( weight[m] > 0.0)
- {
- l_prob = icvComputeUniModeGauss(vect, mu + m*vect_size,
+ {
+ l_prob = icvComputeUniModeGauss(vect, mu + m*vect_size,
inv_var + m * vect_size,
- log_var_val[m],
- vect_size);
+ log_var_val[m],
+ vect_size);
prob = prob + weight[m]*exp((double)l_prob);
}
- }
- prob = log(prob);
- }
- return (float)prob;
-}*/
+ }
+ prob = log(prob);
+ }
+ return (float)prob;
+}*/
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: EstimateObsProb
-// Purpose: The function computes the probability of every observation in every state
+// Purpose: The function computes the probability of every observation in every state
// Context:
// Parameters: obs_info - observations
// hmm - hmm
-// Returns: error status
+// Returns: error status
//
-// Notes:
+// Notes:
//F*/
static CvStatus CV_STDCALL icvEstimateObsProb( CvImgObsInfo* obs_info, CvEHMM* hmm )
{
/* check if matrix exist and check current size
if not sufficient - realloc */
- int status = 0; /* 1 - not allocated, 2 - allocated but small size,
+ int status = 0; /* 1 - not allocated, 2 - allocated but small size,
3 - size is enough, but distribution is bad, 0 - all ok */
for( j = 0; j < hmm->num_states; j++ )
total_states += hmm->u.ehmm[j].num_states;
}
- if ( hmm->obsProb == NULL )
+ if ( hmm->obsProb == NULL )
{
/* allocare memory */
int need_size = ( obs_info->obs_x * obs_info->obs_y * total_states * sizeof(float) +
buffer[2] = obs_info->obs_x;
hmm->obsProb = (float**) (buffer + 3);
status = 3;
-
+
}
else
- {
+ {
/* check current size */
int* total= (int*)(((int*)(hmm->obsProb)) - 3);
int need_size = ( obs_info->obs_x * obs_info->obs_y * total_states * sizeof(float) +
assert( sizeof(float*) == sizeof(int) );
- if ( need_size > (*total) )
+ if ( need_size > (*total) )
{
int* buffer = ((int*)(hmm->obsProb)) - 3;
cvFree( &buffer);
buffer[2] = obs_info->obs_x;
hmm->obsProb = (float**)(buffer + 3);
-
+
status = 3;
- }
+ }
}
if (!status)
{
int* obsx = ((int*)(hmm->obsProb)) - 1;
int* obsy = ((int*)(hmm->obsProb)) - 2;
-
+
assert( (*obsx > 0) && (*obsy > 0) );
/* is good distribution? */
- if ( (obs_info->obs_x > (*obsx) ) || (obs_info->obs_y > (*obsy) ) )
- status = 3;
+ if ( (obs_info->obs_x > (*obsx) ) || (obs_info->obs_y > (*obsy) ) )
+ status = 3;
}
-
+
/* if bad status - do reallocation actions */
assert( (status == 0) || (status == 3) );
/* distribute pointers of ehmm->obsProb */
for( i = 0; i < hmm->num_states; i++ )
{
- hmm->u.ehmm[i].obsProb = tmp;
+ hmm->u.ehmm[i].obsProb = tmp;
tmp += obs_info->obs_y;
}
for( i = 0; i < hmm->num_states; i++ )
{
CvEHMM* ehmm = &( hmm->u.ehmm[i] );
-
+
for( j = 0; j < obs_info->obs_y; j++ )
{
ehmm->obsProb[j] = tmpf;
tmpf += ehmm->num_states * obs_info->obs_x;
- }
+ }
}
- }/* end of pointer distribution */
+ }/* end of pointer distribution */
-#if 1
+#if 1
{
#define MAX_BUF_SIZE 1200
float local_log_mix_prob[MAX_BUF_SIZE];
float* log_mix_prob = local_log_mix_prob;
double* mix_prob = local_mix_prob;
-
+
int max_size = 0;
int obs_x = obs_info->obs_x;
}
max_size *= obs_x * vect_size;
-
+
/* allocate buffer */
if( max_size > MAX_BUF_SIZE )
{
memset( log_mix_prob, 0, max_size*sizeof(float));
/*****************computing probabilities***********************/
-
+
/* loop through external states */
for( i = 0; i < hmm->num_states; i++ )
{
CvEHMM* ehmm = &(hmm->u.ehmm[i]);
CvEHMMState* state = ehmm->u.state;
-
+
int max_mix = 0;
int n_states = ehmm->num_states;
for( j = 0; j < obs_info->obs_y; j++ )
{
int m, n;
-
+
float* obs = obs_info->obs + j * obs_x * vect_size;
float* log_mp = max_mix > 1 ? log_mix_prob : ehmm->obsProb[j];
double* mp = mix_prob;
-
+
/* several passes are done below */
-
+
/* 1. calculate logarithms of probabilities for each mixture */
/* loop through mixtures */
for( j = 0; j < obs_info->obs_y; j++ )
{
int k,m;
-
+
int obs_index = j * obs_info->obs_x;
float* B = ehmm->obsProb[j];
-
+
/* cycles through obs and states */
for( k = 0; k < obs_info->obs_x; k++ )
{
CvVect32f vect = (obs_info->obs) + (obs_index + k) * vect_size;
-
+
float* matr_line = B + k * ehmm->num_states;
for( m = 0; m < ehmm->num_states; m++ )
{
- matr_line[m] = icvComputeGaussMixture( vect, state[m].mu, state[m].inv_var,
+ matr_line[m] = icvComputeGaussMixture( vect, state[m].mu, state[m].inv_var,
state[m].log_var_val, vect_size, state[m].weight,
state[m].num_mix );
}
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: EstimateTransProb
-// Purpose: The function calculates the state and super state transition probabilities
-// of the model given the images,
+// Purpose: The function calculates the state and super state transition probabilities
+// of the model given the images,
// the state segmentation and the input parameters
// Context:
-// Parameters: obs_info_array - array of pointers to image observations
+// Parameters: obs_info_array - array of pointers to image observations
// num_img - length of above array
-// hmm - pointer to HMM structure
+// hmm - pointer to HMM structure
// Returns: void
//
-// Notes:
+// Notes:
//F*/
static CvStatus CV_STDCALL
icvEstimateTransProb( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm )
CvEHMMState* first_state = hmm->u.ehmm->u.state;
/* as a counter we will use transP matrix */
-
+
/* initialization */
-
+
/* clear transP */
icvSetZero_32f( hmm->transP, hmm->num_states, hmm->num_states );
for (i = 0; i < hmm->num_states; i++ )
{
icvSetZero_32f( hmm->u.ehmm[i].transP , hmm->u.ehmm[i].num_states, hmm->u.ehmm[i].num_states );
}
-
+
/* compute the counters */
for (i = 0; i < num_img; i++)
{
int counter = 0;
CvImgObsInfo* info = obs_info_array[i];
-
+
for (j = 0; j < info->obs_y; j++)
{
for (k = 0; k < info->obs_x; k++, counter++)
{
/* compute how many transitions from state to state
- occured both in horizontal and vertical direction */
+ occured both in horizontal and vertical direction */
int superstate, state;
int nextsuperstate, nextstate;
int begin_ind;
superstate = info->state[2 * counter];
begin_ind = (int)(hmm->u.ehmm[superstate].u.state - first_state);
- state = info->state[ 2 * counter + 1] - begin_ind;
-
+ state = info->state[ 2 * counter + 1] - begin_ind;
+
if (j < info->obs_y - 1)
{
int transP_size = hmm->num_states;
-
+
nextsuperstate = info->state[ 2*(counter + info->obs_x) ];
hmm->transP[superstate * transP_size + nextsuperstate] += 1;
}
-
+
if (k < info->obs_x - 1)
- {
+ {
int transP_size = hmm->u.ehmm[superstate].num_states;
nextstate = info->state[2*(counter+1) + 1] - begin_ind;
//assert( total );
inv_total = total ? 1.f/total : 0;
-
+
for( j = 0; j < hmm->num_states; j++)
- {
- hmm->transP[i * hmm->num_states + j] =
- hmm->transP[i * hmm->num_states + j] ?
+ {
+ hmm->transP[i * hmm->num_states + j] =
+ hmm->transP[i * hmm->num_states + j] ?
(float)log( hmm->transP[i * hmm->num_states + j] * inv_total ) : -BIG_FLT;
}
}
-
+
/* estimate other matrices */
for( k = 0; k < hmm->num_states; k++ )
{
}
//assert( total );
inv_total = total ? 1.f/total : 0;
-
+
for( j = 0; j < ehmm->num_states; j++)
- {
- ehmm->transP[i * ehmm->num_states + j] =
+ {
+ ehmm->transP[i * ehmm->num_states + j] =
(ehmm->transP[i * ehmm->num_states + j]) ?
(float)log( ehmm->transP[i * ehmm->num_states + j] * inv_total) : -BIG_FLT ;
}
}
}
return CV_NO_ERR;
-}
-
+}
+
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: MixSegmL2
// embedded HMM
// Context: used with the Viterbi training of the embedded HMM
//
-// Parameters:
+// Parameters:
// obs_info_array
// num_img
// hmm
// Returns: void
//
-// Notes:
+// Notes:
//F*/
static CvStatus CV_STDCALL
icvMixSegmL2( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm )
{
int k, i, j, m;
-
+
CvEHMMState* state = hmm->u.ehmm[0].u.state;
-
-
+
+
for (k = 0; k < num_img; k++)
- {
+ {
int counter = 0;
CvImgObsInfo* info = obs_info_array[k];
{
int e_state = info->state[2 * counter + 1];
float min_dist;
-
- min_dist = icvSquareDistance((info->obs) + (counter * info->obs_size),
+
+ min_dist = icvSquareDistance((info->obs) + (counter * info->obs_size),
state[e_state].mu, info->obs_size);
- info->mix[counter] = 0;
-
+ info->mix[counter] = 0;
+
for (m = 1; m < state[e_state].num_mix; m++)
{
float dist=icvSquareDistance( (info->obs) + (counter * info->obs_size),
if (dist < min_dist)
{
min_dist = dist;
- /* assign mixture with smallest distance */
+ /* assign mixture with smallest distance */
info->mix[counter] = m;
}
}
}
}
return CV_NO_ERR;
-}
+}
/*
CvStatus icvMixSegmProb(CvImgObsInfo* obs_info, int num_img, CvEHMM* hmm )
{
int k, i, j, m;
-
+
CvEHMMState* state = hmm->ehmm[0].state_info;
-
-
+
+
for (k = 0; k < num_img; k++)
- {
+ {
int counter = 0;
CvImgObsInfo* info = obs_info + k;
{
int e_state = info->in_state[counter];
float max_prob;
-
- max_prob = icvComputeUniModeGauss( info->obs[counter], state[e_state].mu[0],
- state[e_state].inv_var[0],
+
+ max_prob = icvComputeUniModeGauss( info->obs[counter], state[e_state].mu[0],
+ state[e_state].inv_var[0],
state[e_state].log_var[0],
info->obs_size );
- info->mix[counter] = 0;
-
+ info->mix[counter] = 0;
+
for (m = 1; m < state[e_state].num_mix; m++)
{
float prob=icvComputeUniModeGauss(info->obs[counter], state[e_state].mu[m],
- state[e_state].inv_var[m],
+ state[e_state].inv_var[m],
state[e_state].log_var[m],
info->obs_size);
if (prob > max_prob)
{
max_prob = prob;
- // assign mixture with greatest probability.
+ // assign mixture with greatest probability.
info->mix[counter] = m;
}
}
}
}
- }
+ }
return CV_NO_ERR;
-}
+}
*/
static CvStatus CV_STDCALL
icvViterbiSegmentation( int num_states, int /*num_obs*/, CvMatr32f transP,
int** q, int min_num_obs, int max_num_obs,
float* prob )
{
- // memory allocation
+ // memory allocation
int i, j, last_obs;
int m_HMMType = _CV_ERGODIC; /* _CV_CAUSAL or _CV_ERGODIC */
-
+
int m_ProbType = prob_type; /* _CV_LAST_STATE or _CV_BEST_STATE */
-
+
int m_minNumObs = min_num_obs; /*??*/
int m_maxNumObs = max_num_obs; /*??*/
-
+
int m_numStates = num_states;
-
+
float* m_pi = (float*)cvAlloc( num_states* sizeof(float) );
CvMatr32f m_a = transP;
- // offset brobability matrix to starting observation
+ // offset brobability matrix to starting observation
CvMatr32f m_b = B + start_obs * num_states;
//so m_xl will not be used more
- //m_xl = start_obs;
+ //m_xl = start_obs;
- /* if (muDur != NULL){
+ /* if (muDur != NULL){
m_d = new int[m_numStates];
m_l = new double[m_numStates];
for (i = 0; i < m_numStates; i++){
- m_l[i] = muDur[i];
+ m_l[i] = muDur[i];
+ }
}
- }
else{
m_d = NULL;
m_l = NULL;
}
*/
-
+
CvMatr32f m_Gamma = icvCreateMatrix_32f( num_states, m_maxNumObs );
int* m_csi = (int*)cvAlloc( num_states * m_maxNumObs * sizeof(int) );
-
+
//stores maximal result for every ending observation */
CvVect32f m_MaxGamma = prob;
-
+
// assert( m_xl + max_num_obs <= num_obs );
m_pi[i] = -BIG_FLT;
}
m_pi[0] = 0.0f;
-
+
for (i = 0; i < num_states; i++)
{
m_Gamma[0 * num_states + i] = m_pi[i] + m_b[0 * num_states + i];
- m_csi[0 * num_states + i] = 0;
+ m_csi[0 * num_states + i] = 0;
}
-
+
/******************************************************************/
/* Viterbi recursion */
-
+
if ( m_HMMType == _CV_CAUSAL ) //causal model
{
- int t,j;
-
+ int t;
+
for (t = 1 ; t < m_maxNumObs; t++)
{
// evaluate self-to-self transition for state 0
m_Gamma[t * num_states + 0] = m_Gamma[(t-1) * num_states + 0] + m_a[0];
m_csi[t * num_states + 0] = 0;
-
+
for (j = 1; j < num_states; j++)
- {
+ {
float self = m_Gamma[ (t-1) * num_states + j] + m_a[ j * num_states + j];
float prev = m_Gamma[ (t-1) * num_states +(j-1)] + m_a[ (j-1) * num_states + j];
-
+
if ( prev > self )
{
m_csi[t * num_states + j] = j-1;
m_csi[t * num_states + j] = j;
m_Gamma[t * num_states + j] = self;
}
-
- m_Gamma[t * num_states + j] = m_Gamma[t * num_states + j] + m_b[t * num_states + j];
- }
+
+ m_Gamma[t * num_states + j] = m_Gamma[t * num_states + j] + m_b[t * num_states + j];
+ }
}
}
- else if ( m_HMMType == _CV_ERGODIC ) //ergodic model
- {
+ else if ( m_HMMType == _CV_ERGODIC ) //ergodic model
+ {
int t;
for (t = 1 ; t < m_maxNumObs; t++)
- {
+ {
for (j = 0; j < num_states; j++)
- {
- int i;
+ {
m_Gamma[ t*num_states + j] = m_Gamma[(t-1) * num_states + 0] + m_a[0*num_states+j];
m_csi[t *num_states + j] = 0;
-
+
for (i = 1; i < num_states; i++)
{
- float currGamma = m_Gamma[(t-1) *num_states + i] + m_a[i *num_states + j];
+ float currGamma = m_Gamma[(t-1) *num_states + i] + m_a[i *num_states + j];
if (currGamma > m_Gamma[t *num_states + j])
- {
+ {
m_Gamma[t * num_states + j] = currGamma;
m_csi[t * num_states + j] = i;
}
- }
+ }
m_Gamma[t *num_states + j] = m_Gamma[t *num_states + j] + m_b[t * num_states + j];
- }
- }
+ }
+ }
}
for( last_obs = m_minNumObs-1, i = 0; last_obs < m_maxNumObs; last_obs++, i++ )
/******************************************************************/
/* Viterbi termination */
-
+
if ( m_ProbType == _CV_LAST_STATE )
{
m_MaxGamma[i] = m_Gamma[last_obs * num_states + num_states - 1];
else if( m_ProbType == _CV_BEST_STATE )
{
int k;
- q[i][last_obs] = 0;
- m_MaxGamma[i] = m_Gamma[last_obs * num_states + 0];
-
+ q[i][last_obs] = 0;
+ m_MaxGamma[i] = m_Gamma[last_obs * num_states + 0];
+
for(k = 1; k < num_states; k++)
- {
+ {
if ( m_Gamma[last_obs * num_states + k] > m_MaxGamma[i] )
{
m_MaxGamma[i] = m_Gamma[last_obs * num_states + k];
q[i][last_obs] = k;
- }
+ }
}
- }
-
+ }
+
/******************************************************************/
/* Viterbi backtracking */
for (t = last_obs-1; t >= 0; t--)
{
- q[i][t] = m_csi[(t+1) * num_states + q[i][t+1] ];
- }
- }
-
+ q[i][t] = m_csi[(t+1) * num_states + q[i][t+1] ];
+ }
+ }
+
/* memory free */
cvFree( &m_pi );
cvFree( &m_csi );
- icvDeleteMatrix( m_Gamma );
-
+ icvDeleteMatrix( m_Gamma );
+
return CV_NO_ERR;
-}
+}
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: icvEViterbi
// Purpose: The function calculates the embedded Viterbi algorithm
-// for 1 image
+// for 1 image
// Context:
-// Parameters:
+// Parameters:
// obs_info - observations
// hmm - HMM
-//
-// Returns: the Embedded Viterbi probability (float)
+//
+// Returns: the Embedded Viterbi probability (float)
// and do state segmentation of observations
//
-// Notes:
+// Notes:
//F*/
static float CV_STDCALL icvEViterbi( CvImgObsInfo* obs_info, CvEHMM* hmm )
{
float inv_obs_x = 1.f / obs_info->obs_x;
CvEHMMState* first_state = hmm->u.ehmm->u.state;
-
+
/* memory allocation for superB */
CvMatr32f superB = icvCreateMatrix_32f(hmm->num_states, obs_info->obs_y );
-
+
/* memory allocation for q */
int*** q = (int***)cvAlloc( hmm->num_states * sizeof(int**) );
int* super_q = (int*)cvAlloc( obs_info->obs_y * sizeof(int) );
-
+
for (i = 0; i < hmm->num_states; i++)
{
q[i] = (int**)cvAlloc( obs_info->obs_y * sizeof(int*) );
-
+
for (j = 0; j < obs_info->obs_y ; j++)
{
q[i][j] = (int*)cvAlloc( obs_info->obs_x * sizeof(int) );
}
- }
-
+ }
+
/* start Viterbi segmentation */
for (i = 0; i < hmm->num_states; i++)
{
CvEHMM* ehmm = &(hmm->u.ehmm[i]);
-
+
for (j = 0; j < obs_info->obs_y; j++)
{
float max_gamma;
-
+
/* 1D HMM Viterbi segmentation */
- icvViterbiSegmentation( ehmm->num_states, obs_info->obs_x,
- ehmm->transP, ehmm->obsProb[j], 0,
+ icvViterbiSegmentation( ehmm->num_states, obs_info->obs_x,
+ ehmm->transP, ehmm->obsProb[j], 0,
_CV_LAST_STATE, &q[i][j], obs_info->obs_x,
obs_info->obs_x, &max_gamma);
-
+
superB[j * hmm->num_states + i] = max_gamma * inv_obs_x;
}
}
-
+
/* perform global Viterbi segmentation (i.e. process higher-level HMM) */
-
- icvViterbiSegmentation( hmm->num_states, obs_info->obs_y,
- hmm->transP, superB, 0,
+
+ icvViterbiSegmentation( hmm->num_states, obs_info->obs_y,
+ hmm->transP, superB, 0,
_CV_LAST_STATE, &super_q, obs_info->obs_y,
obs_info->obs_y, &log_likelihood );
-
- log_likelihood /= obs_info->obs_y ;
-
-
+
+ log_likelihood /= obs_info->obs_y ;
+
+
counter = 0;
/* assign new state to observation vectors */
for (i = 0; i < obs_info->obs_y; i++)
- {
+ {
for (j = 0; j < obs_info->obs_x; j++, counter++)
{
int superstate = super_q[i];
int state = (int)(hmm->u.ehmm[superstate].u.state - first_state);
-
+
obs_info->state[2 * counter] = superstate;
obs_info->state[2 * counter + 1] = state + q[superstate][i][j];
}
}
-
+
/* memory deallocation for superB */
icvDeleteMatrix( superB );
-
+
/*memory deallocation for q */
for (i = 0; i < hmm->num_states; i++)
{
}
cvFree( &q[i] );
}
-
+
cvFree( &q );
cvFree( &super_q );
-
+
return log_likelihood;
-}
+}
static CvStatus CV_STDCALL
icvEstimateHMMStateParams( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm )
float start_log_var_val = LN2PI * vect_len;
CvVect32f tmp_vect = icvCreateVector_32f( vect_len );
-
+
CvEHMMState* first_state = hmm->u.ehmm[0].u.state;
assert( sizeof(float) == sizeof(int) );
for (m = 0; m < first_state[i].num_mix; m++)
{
((int*)(first_state[i].weight))[m] = 0;
- }
+ }
}
-
+
/* maybe gamma must be computed in mixsegm process ?? */
/* compute gamma */
int state, mixture;
state = info->state[2*i + 1];
mixture = info->mix[i];
- /* computes gamma - number of observations corresponding
- to every mixture of every state */
- ((int*)(first_state[state].weight))[mixture] += 1;
+ /* computes gamma - number of observations corresponding
+ to every mixture of every state */
+ ((int*)(first_state[state].weight))[mixture] += 1;
}
- }
+ }
/***************Mean and Var***********************/
/* compute means and variances of every item */
/* initially variance placed to inv_var */
/* zero mean and variance */
for (i = 0; i < total; i++)
{
- memset( (void*)first_state[i].mu, 0, first_state[i].num_mix * vect_len *
+ memset( (void*)first_state[i].mu, 0, first_state[i].num_mix * vect_len *
sizeof(float) );
- memset( (void*)first_state[i].inv_var, 0, first_state[i].num_mix * vect_len *
+ memset( (void*)first_state[i].inv_var, 0, first_state[i].num_mix * vect_len *
sizeof(float) );
}
-
+
/* compute sums */
for (i = 0; i < num_img; i++)
{
float* vector = info->obs;
for (j = 0; j < total_obs; j++, vector+=vect_len )
- {
+ {
int state = info->state[2 * j + 1];
- int mixture = info->mix[j];
-
+ int mixture = info->mix[j];
+
CvVect32f mean = first_state[state].mu + mixture * vect_len;
CvVect32f mean2 = first_state[state].inv_var + mixture * vect_len;
-
+
icvAddVector_32f( mean, vector, mean, vect_len );
for( k = 0; k < vect_len; k++ )
mean2[k] += vector[k]*vector[k];
- }
+ }
}
-
+
/*compute the means and variances */
/* assume gamma already computed */
for (i = 0; i < total; i++)
- {
+ {
CvEHMMState* state = &(first_state[i]);
for (m = 0; m < state->num_mix; m++)
{
- int k;
CvVect32f mu = state->mu + m * vect_len;
- CvVect32f invar = state->inv_var + m * vect_len;
-
+ CvVect32f invar = state->inv_var + m * vect_len;
+
if ( ((int*)state->weight)[m] > 1)
{
float inv_gamma = 1.f/((int*)(state->weight))[m];
-
+
icvScaleVector_32f( mu, mu, vect_len, inv_gamma);
icvScaleVector_32f( invar, invar, vect_len, inv_gamma);
}
icvMulVectors_32f(mu, mu, tmp_vect, vect_len);
- icvSubVector_32f( invar, tmp_vect, invar, vect_len);
-
+ icvSubVector_32f( invar, tmp_vect, invar, vect_len);
+
/* low bound of variance - 100 (Ara's experimental result) */
for( k = 0; k < vect_len; k++ )
{
for( k = 0; k < vect_len; k++ )
{
state->log_var_val[m] += (float)log( invar[k] );
- }
+ }
/* SMOLI 27.10.2000 */
state->log_var_val[m] *= 0.5;
cvbInvSqrt( invar, invar, vect_len );
}
}
-
+
/***************Weights***********************/
/* normilize gammas - i.e. compute mixture weights */
-
+
//compute weights
for (i = 0; i < total; i++)
- {
+ {
int gamma_total = 0;
float norm;
for (m = 0; m < first_state[i].num_mix; m++)
{
- gamma_total += ((int*)(first_state[i].weight))[m];
+ gamma_total += ((int*)(first_state[i].weight))[m];
}
norm = gamma_total ? (1.f/(float)gamma_total) : 0.f;
-
+
for (m = 0; m < first_state[i].num_mix; m++)
{
first_state[i].weight[m] = ((int*)(first_state[i].weight))[m] * norm;
- }
- }
+ }
+ }
icvDeleteVector( tmp_vect);
- return CV_NO_ERR;
-}
+ return CV_NO_ERR;
+}
/*
CvStatus icvLightingCorrection8uC1R( uchar* img, CvSize roi, int src_step )
int i, j;
int width = roi.width;
int height = roi.height;
-
+
float x1, x2, y1, y2;
int f[3] = {0, 0, 0};
float a[3] = {0, 0, 0};
-
+
float h1;
float h2;
-
+
float c1,c2;
-
+
float min = FLT_MAX;
float max = -FLT_MAX;
float correction;
-
+
float* float_img = icvAlloc( width * height * sizeof(float) );
-
+
x1 = width * (width + 1) / 2.0f; // Sum (1, ... , width)
x2 = width * (width + 1 ) * (2 * width + 1) / 6.0f; // Sum (1^2, ... , width^2)
y1 = height * (height + 1)/2.0f; // Sum (1, ... , width)
y2 = height * (height + 1 ) * (2 * height + 1) / 6.0f; // Sum (1^2, ... , width^2)
-
-
+
+
// extract grayvalues
for (i = 0; i < height; i++)
{
f[0] = f[0] + img[i*src_step + j];
}
}
-
+
h1 = (float)f[0] * (float)x1 / (float)width;
h2 = (float)f[0] * (float)y1 / (float)height;
-
+
a[2] = ((float)f[2] - h1) / (float)(x2*height - x1*x1*height/(float)width);
a[1] = ((float)f[1] - h2) / (float)(y2*width - y1*y1*width/(float)height);
- a[0] = (float)f[0]/(float)(width*height) - (float)y1*a[1]/(float)height -
+ a[0] = (float)f[0]/(float)(width*height) - (float)y1*a[1]/(float)height -
(float)x1*a[2]/(float)width;
-
- for (i = 0; i < height; i++)
- {
+
+ for (i = 0; i < height; i++)
+ {
for (j = 0; j < width; j++)
{
-
+
correction = a[0] + a[1]*(float)i + a[2]*(float)j;
-
+
float_img[i*width + j] = img[i*src_step + j] - correction;
-
+
if (float_img[i*width + j] < min) min = float_img[i*width+j];
if (float_img[i*width + j] > max) max = float_img[i*width+j];
}
}
-
+
//rescaling to the range 0:255
c2 = 0;
if (max == min)
c2 = 255.0f;
else
c2 = 255.0f/(float)(max - min);
-
+
c1 = (-(float)min)*c2;
-
+
for (i = 0; i < height; i++)
{
for (j = 0; j < width; j++)
cvFree( &float_img );
return CV_NO_ERR;
}
-
-CvStatus icvLightingCorrection( icvImage* img )
+
+CvStatus icvLightingCorrection( icvImage* img )
{
CvSize roi;
if ( img->type != IPL_DEPTH_8U || img->channels != 1 )
return CV_BADFACTOR_ERR;
roi = _cvSize( img->roi.width, img->roi.height );
-
- return _cvLightingCorrection8uC1R( img->data + img->roi.y * img->step + img->roi.x,
+
+ return _cvLightingCorrection8uC1R( img->data + img->roi.y * img->step + img->roi.x,
roi, img->step );
}
/////////////////////////////// CvMatrix implementation //////////////////////////////////
-CvMatrix::CvMatrix( int rows, int cols, int type, CvMemStorage* storage, bool alloc_data )
+CvMatrix::CvMatrix( int _rows, int _cols, int _type, CvMemStorage* storage, bool alloc_data )
{
if( storage )
{
matrix = (CvMat*)cvMemStorageAlloc( storage, sizeof(*matrix) );
- cvInitMatHeader( matrix, rows, cols, type, alloc_data ?
- cvMemStorageAlloc( storage, rows*cols*CV_ELEM_SIZE(type) ) : 0 );
+ cvInitMatHeader( matrix, _rows, _cols, _type, alloc_data ?
+ cvMemStorageAlloc( storage, _rows*_cols*CV_ELEM_SIZE(_type) ) : 0 );
}
else
matrix = 0;
#include "_kdtree.hpp"
#include "_featuretree.h"
-#if _MSC_VER >= 1400
-#pragma warning(disable:4996) // suppress "function call with parameters may be unsafe" in std::copy
-#endif
-
class CvKDTreeWrap : public CvFeatureTree {
template <class __scalartype, int __cvtype>
struct deref {
assert(results->cols == k);
assert(dist->cols == k);
- for (int j = 0; j < d->rows; ++j) {
- const typename __treetype::scalar_type* dj =
- (const typename __treetype::scalar_type*) dptr;
+ for (int j = 0; j < d->rows; ++j)
+ {
+ const typename __treetype::scalar_type* dj = (const typename __treetype::scalar_type*) dptr;
int* resultsj = (int*) resultsptr;
double* distj = (double*) distptr;
tr->find_nn_bbf(dj, k, emax, nn);
assert((int)nn.size() <= k);
- for (unsigned int j = 0; j < nn.size(); ++j) {
- *resultsj++ = *nn[j].p;
- *distj++ = nn[j].dist;
+ for (unsigned int i = 0; i < nn.size(); ++i)
+ {
+ *resultsj++ = *nn[i].p;
+ *distj++ = nn[i].dist;
}
std::fill(resultsj, resultsj + k - nn.size(), -1);
std::fill(distj, distj + k - nn.size(), 0);
template <class __treetype>
int find_ortho_range(CvMat* bounds_min, CvMat* bounds_max,
- CvMat* results) {
+ CvMat* results) {
int rn = results->rows * results->cols;
std::vector<int> inbounds;
dispatch_cvtype(mat, ((__treetype*)data)->
- find_ortho_range((typename __treetype::scalar_type*)bounds_min->data.ptr,
- (typename __treetype::scalar_type*)bounds_max->data.ptr,
- inbounds));
+ find_ortho_range((typename __treetype::scalar_type*)bounds_min->data.ptr,
+ (typename __treetype::scalar_type*)bounds_max->data.ptr,
+ inbounds));
std::copy(inbounds.begin(),
- inbounds.begin() + std::min((int)inbounds.size(), rn),
- (int*) results->data.ptr);
+ inbounds.begin() + std::min((int)inbounds.size(), rn),
+ (int*) results->data.ptr);
return (int)inbounds.size();
}
public:
CvKDTreeWrap(CvMat* _mat) : mat(_mat) {
// * a flag parameter should tell us whether
- // * (a) user ensures *mat outlives *this and is unchanged,
+ // * (a) user ensures *mat outlives *this and is unchanged,
// * (b) we take reference and user ensures mat is unchanged,
// * (c) we copy data, (d) we own and release data.
tmp[j] = j;
dispatch_cvtype(mat, data = new tree_type
- (&tmp[0], &tmp[0] + tmp.size(), mat->cols,
- tree_type::deref_type(mat)));
+ (&tmp[0], &tmp[0] + tmp.size(), mat->cols,
+ tree_type::deref_type(mat)));
}
~CvKDTreeWrap() {
dispatch_cvtype(mat, delete (tree_type*) data);
assert(CV_MAT_TYPE(results->type) == CV_32SC1);
dispatch_cvtype(mat, find_nn<tree_type>
- (desc, k, emax, results, dist));
+ (desc, k, emax, results, dist));
}
int FindOrthoRange(CvMat* bounds_min, CvMat* bounds_max,
- CvMat* results) {
+ CvMat* results) {
bool free_bounds = false;
int count = -1;
if (bounds_min->cols * bounds_min->rows != dims() ||
- bounds_max->cols * bounds_max->rows != dims())
+ bounds_max->cols * bounds_max->rows != dims())
CV_Error(CV_StsUnmatchedSizes, "bounds_{min,max} must 1 x dims or dims x 1");
if (CV_MAT_TYPE(bounds_min->type) != CV_MAT_TYPE(bounds_max->type))
CV_Error(CV_StsUnmatchedFormats, "bounds_{min,max} must have same type");
assert(bounds_max->rows * bounds_max->cols == dims());
dispatch_cvtype(mat, count = find_ortho_range<tree_type>
- (bounds_min, bounds_max,results));
+ (bounds_min, bounds_max,results));
if (free_bounds) {
cvReleaseMat(&bounds_min);
vertices_number: in, number of vertices in polygon
Return :
--------------------------------------------------------------------------*/
-void _cvSetSeqBlockSize(CvVoronoiDiagramInt* pVoronoiDiagramInt,int vertices_number)
+static void _cvSetSeqBlockSize(CvVoronoiDiagramInt* pVoronoiDiagramInt,int vertices_number)
{
int N = 2*vertices_number;
cvSetSeqBlockSize(pVoronoiDiagramInt->SiteSeq,N*pVoronoiDiagramInt->SiteSeq->elem_size);
typedef void (*pointer_LMJac)( const CvMat* src, CvMat* dst );
typedef void (*pointer_LMFunc)( const CvMat* src, CvMat* dst );
+#if 0
/* Optimization using Levenberg-Marquardt */
void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
pointer_LMFunc function,
CvMat *matrJtJN = 0;
CvMat *matrJt = 0;
CvMat *vectB = 0;
-
+
CV_FUNCNAME( "cvLevenbegrMarquardtOptimization" );
__BEGIN__;
{
CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector X0 must be 1" );
}
-
+
if( observRes->cols != 1 )
{
CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector observed rusult must be 1" );
/* Print result of function to file */
/* Compute error */
- cvSub(observRes,resFunc,error);
-
+ cvSub(observRes,resFunc,error);
+
//valError = error_function(observRes,resFunc);
/* Need to use new version of computing error (norm) */
valError = cvNorm(observRes,resFunc);
/* Define optimal delta for J'*J*delta=J'*error */
/* compute J'J */
cvMulTransposed(Jac,matrJtJ,1);
-
+
cvCopy(matrJtJ,matrJtJN);
/* compute J'*error */
return;
}
+#endif
/*------------------------------------------------------------------------------*/
#if 0
*/
#define TRACK_BUNDLE_FILE "d:\\test\\bundle.txt"
+void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPoints,
+ CvMat** pointsPres, int numImages,
+ CvMat** resultProjMatrs, CvMat* resultPoints4D,int maxIter,double epsilon );
+
/* ============== Bundle adjustment optimization ================= */
-void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMat *derivProj)
+static void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMat *derivProj)
{
/* Compute derivate for given projection matrix points and status of points */
}
/* ----- End test ----- */
- int i;
-
/* Allocate memory for derivates */
double p[12];
/* Copy projection matrix */
- for( i = 0; i < 12; i++ )
+ for(int i = 0; i < 12; i++ )
{
p[i] = cvmGet(projMatr,i/4,i%4);
}
piX[1] = X[0]*p[4] + X[1]*p[5] + X[2]*p[6] + X[3]*p[7];
piX[2] = X[0]*p[8] + X[1]*p[9] + X[2]*p[10] + X[3]*p[11];
- int i;
/* fill derivate by point */
double tmp3 = 1/(piX[2]*piX[2]);
double tmp2 = -piX[1]*tmp3;
/* fill derivate by projection matrix */
- for( i = 0; i < 4; i++ )
+ for(int i = 0; i < 4; i++ )
{
/* derivate for x */
cvmSet(derivProj,currVisPoint*2,i,X[i]/piX[2]);//x' p1i
}
/*======================================================================================*/
-void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **projDerives)
+static void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **projDerives)
{
CV_FUNCNAME( "icvComputeDerivateProjAll" );
__BEGIN__;
}
/*======================================================================================*/
-void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints, CvMat *derivPoint)
+static void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints, CvMat *derivPoint)
{
CV_FUNCNAME( "icvComputeDerivatePoints" );
{
CV_ERROR( CV_StsOutOfRange, "Size of projection matrix (projMatr) must be 3x4" );
}
-
+
if( !CV_IS_MAT(presPoints) )
{
CV_ERROR( CV_StsUnsupportedFormat, "Status must be a matrix 1xN" );
{
CV_ERROR( CV_StsUnsupportedFormat, "derivPoint must be a matrix 2 x 4VisNum" );
}
- /* ----- End test ----- */
-
+ /* ----- End test ----- */
+
/* Compute derivates by points */
-
+
double p[12];
- int i;
- for( i = 0; i < 12; i++ )
+ for(int i = 0; i < 12; i++ )
{
p[i] = cvmGet(projMatr,i/4,i%4);
}
piX[0] = X[0]*p[0] + X[1]*p[1] + X[2]*p[2] + X[3]*p[3];
piX[1] = X[0]*p[4] + X[1]*p[5] + X[2]*p[6] + X[3]*p[7];
piX[2] = X[0]*p[8] + X[1]*p[9] + X[2]*p[10] + X[3]*p[11];
-
- int i,j;
double tmp3 = 1/(piX[2]*piX[2]);
-
- for( j = 0; j < 2; j++ )//for x and y
+
+ for(int j = 0; j < 2; j++ )//for x and y
{
- for( i = 0; i < 4; i++ )// for X,Y,Z,W
+ for(int i = 0; i < 4; i++ )// for X,Y,Z,W
{
- cvmSet( derivPoint,
+ cvmSet( derivPoint,
j, currVisPoint*4+i,
(p[j*4+i]*piX[2]-p[8+i]*piX[j]) * tmp3 );
}
__END__;
return;
}
+
/*======================================================================================*/
-void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **pointDerives)
+static void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **pointDerives)
{
CV_FUNCNAME( "icvComputeDerivatePointsAll" );
__BEGIN__;
return;
}
/*======================================================================================*/
-void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, CvMat **matrV)
+static void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, CvMat **matrV)
{
int *shifts = 0;
{
if( cvmGet(presPoints[currImage],0,currPoint) > 0 )
{
- sum += cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+i) *
+ sum += cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+i) *
cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+j);
- sum += cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+i) *
+ sum += cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+i) *
cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+j);
}
}
__END__;
cvFree( &shifts);
-
+
return;
}
/*======================================================================================*/
-void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
+static void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
{
CV_FUNCNAME( "icvComputeMatrixVAll" );
__BEGIN__;
return;
}
/*======================================================================================*/
-void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvMat **presPoints, CvMat *matrW)
+static void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvMat **presPoints, CvMat *matrW)
{
CV_FUNCNAME( "icvComputeMatrixW" );
__BEGIN__;
for( int currCol = 0; currCol < 4; currCol++ )
{
double sum;
- sum = cvmGet(projDeriv[currImage],currVis*2+0,currLine) *
+ sum = cvmGet(projDeriv[currImage],currVis*2+0,currLine) *
cvmGet(pointDeriv[currImage],0,currVis*4+currCol);
- sum += cvmGet(projDeriv[currImage],currVis*2+1,currLine) *
+ sum += cvmGet(projDeriv[currImage],currVis*2+1,currLine) *
cvmGet(pointDeriv[currImage],1,currVis*4+currCol);
cvmSet(matrW,currImage*12+currLine,currPoint*4+currCol,sum);
}
}
}
-
+
#ifdef TRACK_BUNDLE
{
FILE *file;
__END__;
return;
}
+
/*======================================================================================*/
/* Compute jacobian mult projection matrices error */
-void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,CvMat *jacProjErr )
+static void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,CvMat *jacProjErr )
{
CV_FUNCNAME( "icvComputeJacErrorProj" );
__BEGIN__;
double sum = 0;
for( int i = 0; i < num; i++ )
{
- sum += cvmGet(projDeriv[currImage],i,currCol) *
+ sum += cvmGet(projDeriv[currImage],i,currCol) *
cvmGet(projErrors[currImage],i%2,i/2);
}
cvmSet(jacProjErr,currImage*12+currCol,0,sum);
__END__;
return;
}
+
/*======================================================================================*/
/* Compute jacobian mult points error */
-void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors, CvMat **presPoints,CvMat *jacPointErr )
+static void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors, CvMat **presPoints,CvMat *jacPointErr )
{
int *shifts = 0;
}
/*======================================================================================*/
+
/* Reconstruct 4D points using status */
void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat** presPoints,
CvMat *points4D,int numImages,CvMat **projError)
numVisProj++;
}
}
-
+
if( numVisProj < 2 )
{
/* This point can't be reconstructed */
y = cvmGet(projPoints[currImage],1,currPoint);
for( int k = 0; k < 4; k++ )
{
- matrA_dat[currVisProj*12 + k] =
+ matrA_dat[currVisProj*12 + k] =
x * cvmGet(projMatrs[currImage],2,k) - cvmGet(projMatrs[currImage],0,k);
matrA_dat[currVisProj*12+4 + k] =
CvMat point3D;
double point3D_dat[3];
point3D = cvMat(3,1,CV_64F,point3D_dat);
-
- int currPoint;
+
int numVis = 0;
double totalError = 0;
- for( currPoint = 0; currPoint < numPoints; currPoint++ )
+ for(int curPoint = 0; curPoint < numPoints; curPoint++ )
{
- if( cvmGet(presPoints[currImage],0,currPoint) > 0)
+ if( cvmGet(presPoints[currImage],0,curPoint) > 0)
{
double dx,dy;
- cvGetCol(points4D,&point4D,currPoint);
+ cvGetCol(points4D,&point4D,curPoint);
cvmMul(projMatrs[currImage],&point4D,&point3D);
double w = point3D_dat[2];
double x = point3D_dat[0] / w;
double y = point3D_dat[1] / w;
- dx = cvmGet(projPoints[currImage],0,currPoint) - x;
- dy = cvmGet(projPoints[currImage],1,currPoint) - y;
+ dx = cvmGet(projPoints[currImage],0,curPoint) - x;
+ dy = cvmGet(projPoints[currImage],1,curPoint) - y;
if( projError )
{
- cvmSet(projError[currImage],0,currPoint,dx);
- cvmSet(projError[currImage],1,currPoint,dy);
+ cvmSet(projError[currImage],0,curPoint,dx);
+ cvmSet(projError[currImage],1,curPoint,dy);
}
totalError += sqrt(dx*dx+dy*dy);
numVis++;
/*======================================================================================*/
-void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs, CvMat **pointsPres, CvMat **projPoints)
+static void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs, CvMat **pointsPres, CvMat **projPoints)
{
CV_FUNCNAME( "icvProjPointsStatusFunc" );
__BEGIN__;
fclose(file);
}
#endif
-
+
int currImage;
for( currImage = 0; currImage < numImages; currImage++ )
{
fclose(file);
}
#endif
-
+
cvmMul(projMatrs[currImage],&point4D,&point3D);
double w = point3D_dat[2];
cvmSet(projPoints[currImage],0,currVisPoint,point3D_dat[0]/w);
}
/*======================================================================================*/
-void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
+static void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
{
/* Free each matrix */
int currMatr;
-
+
if( *matrArray != 0 )
{/* Need delete */
for( currMatr = 0; currMatr < numMatr; currMatr++ )
}
/*======================================================================================*/
-void *icvClearAlloc(int size)
+static void *icvClearAlloc(int size)
{
void *ptr = 0;
}
#endif
+
/*======================================================================================*/
/* !!! may be useful to return norm of error */
/* !!! may be does not work correct with not all visible 4D points */
CvMat** pointsPres, int numImages,
CvMat** resultProjMatrs, CvMat* resultPoints4D,int maxIter,double epsilon )
{
-
+
CvMat *vectorX_points4D = 0;
- CvMat **vectorX_projMatrs = 0;
+ CvMat **vectorX_projMatrs = 0;
CvMat *newVectorX_points4D = 0;
CvMat **newVectorX_projMatrs = 0;
CvMat *changeVectorX_points4D = 0;
- CvMat *changeVectorX_projMatrs = 0;
+ CvMat *changeVectorX_projMatrs = 0;
CvMat **observVisPoints = 0;
CvMat **projVisPoints = 0;
{
CV_ERROR( CV_StsOutOfRange, "Number of images must be more than zero" );
}
-
+
if( maxIter < 1 || maxIter > 2000 )
{
CV_ERROR( CV_StsOutOfRange, "Maximum number of iteration must be in [1..1000]" );
}
-
+
if( epsilon < 0 )
{
CV_ERROR( CV_StsOutOfRange, "Epsilon parameter must be >= 0" );
}
-
+
if( !CV_IS_MAT(resultPoints4D) )
{
CV_ERROR( CV_StsUnsupportedFormat, "resultPoints4D must be a matrix 4 x NumPnt" );
CV_CALL( changeVectorX_points4D = cvCreateMat(4,numPoints,CV_64F));
CV_CALL( changeVectorX_projMatrs = cvCreateMat(3,4,CV_64F));
- int currImage;
-
/* ----- Test input params ----- */
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
/* Test size of input initial and result projection matrices */
if( !CV_IS_MAT(projMatrs[currImage]) )
/* ----- End test ----- */
/* Copy projection matrices to vectorX0 */
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
CV_CALL( vectorX_projMatrs[currImage] = cvCreateMat(3,4,CV_64F));
CV_CALL( newVectorX_projMatrs[currImage] = cvCreateMat(3,4,CV_64F));
CV_CALL( workMatrsInvVi[i] = cvCreateMat(4,4,CV_64F) );
}
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
CV_CALL( matrsUk[currImage] = cvCreateMat(12,12,CV_64F) );
CV_CALL( workMatrsUk[currImage] = cvCreateMat(12,12,CV_64F) );
/* Compute error with observed value and computed projection */
double prevError;
prevError = 0;
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
cvSub(observVisPoints[currImage],projVisPoints[currImage],errorProjPoints[currImage]);
double currNorm = cvNorm(errorProjPoints[currImage]);
fprintf(file,"projection errors\n");
/* Print all proejction errors */
- int currImage;
- for( currImage = 0; currImage < numImages; currImage++)
+ for(int currImage = 0; currImage < numImages; currImage++)
{
fprintf(file,"\nImage=%d\n",currImage);
int numPn = errorProjPoints[currImage]->cols;
double norm = cvNorm(vectorX_projMatrs[i]);
fprintf(file," test 6.01 prev normProj=%lf\n",norm);
}
-
+
fclose(file);
}
#endif
double norm = cvNorm(matrsUk[i]);
fprintf(file," test 6.01 prev matrsUk=%lf\n",norm);
}
-
+
for( i = 0; i < numPoints; i++ )
{
double norm = cvNorm(matrsVi[i]);
}
#endif
/* Copy matrices Uk to work matrices Uk */
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
cvCopy(matrsUk[currImage],workMatrsUk[currImage]);
}
double norm = cvNorm(matrsUk[i]);
fprintf(file," test 6.01 post1 matrsUk=%lf\n",norm);
}
-
+
for( i = 0; i < numPoints; i++ )
{
double norm = cvNorm(matrsVi[i]);
{
cvCopy(matrsVi[currV],workMatrVi);
- for( int i = 0; i < 4; i++ )
+ for( i = 0; i < 4; i++ )
{
cvmSet(workMatrVi,i,i,cvmGet(matrsVi[currV],i,i)*(1+alpha) );
}
}
/* Add alpha to matrUk and make matrix workMatrsUk */
- for( currImage = 0; currImage< numImages; currImage++ )
+ for(int currImage = 0; currImage< numImages; currImage++ )
{
for( i = 0; i < 12; i++ )
int currRowV;
for( currRowV = 0; currRowV < 4; currRowV++ )
{
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
for( int currCol = 0; currCol < 12; currCol++ )/* For each column of transposed matrix W */
{
cvmMul(matrW,matrTmpSys1,matrSysDeltaP);
/* need to compute U-matrTmpSys2. But we compute matTmpSys2-U */
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
CvMat subMatr;
cvGetSubRect(matrSysDeltaP,&subMatr,cvRect(currImage*12,currImage*12,12,12));
FILE* file;
file = fopen( TRACK_BUNDLE_FILE_DELTAP ,"w");
- int currImage;
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
fprintf(file,"\nImage=%d\n",currImage);
int i;
/* We know delta and compute new value of vector X: nextVectX = vectX + deltas */
/* Compute new P */
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
for( i = 0; i < 3; i++ )
{
icvProjPointsStatusFunc(numImages, newVectorX_points4D, newVectorX_projMatrs, pointsPres, projVisPoints);
/* Compute error with observed value and computed projection */
double newError = 0;
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
cvSub(observVisPoints[currImage],projVisPoints[currImage],errorProjPoints[currImage]);
double currNorm = cvNorm(errorProjPoints[currImage]);
newError += currNorm * currNorm;
}
newError = sqrt(newError);
-
+
currIter++;
/* Print all projection errors */
#if 0
fprintf(file,"projection errors\n");
- int currImage;
- for( currImage = 0; currImage < numImages; currImage++)
+ for(int currImage = 0; currImage < numImages; currImage++)
{
fprintf(file,"\nImage=%d\n",currImage);
int numPn = errorProjPoints[currImage]->cols;
double currNorm1 = 0;
double currNorm2 = 0;
/* compute norm for projection matrices */
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
currNorm1 = cvNorm(newVectorX_projMatrs[currImage],vectorX_projMatrs[currImage]);
currNorm2 = cvNorm(newVectorX_projMatrs[currImage]);
}
alpha /= 10;
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
cvCopy(newVectorX_projMatrs[currImage],vectorX_projMatrs[currImage]);
}
} while( change > epsilon && currIter < maxIter );
-
+
/*--------------------------------------------*/
/* Optimization complete copy computed params */
/* Copy projection matrices */
- for( currImage = 0; currImage < numImages; currImage++ )
+ for(int currImage = 0; currImage < numImages; currImage++ )
{
cvCopy(newVectorX_projMatrs[currImage],resultProjMatrs[currImage]);
}
/* Valery Mosyagin */
+#if 0
+
typedef void (*pointer_LMJac)( const CvMat* src, CvMat* dst );
typedef void (*pointer_LMFunc)( const CvMat* src, CvMat* dst );
/* Jacobian computation for trifocal case */
-void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
+static void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
{
CV_FUNCNAME( "icvJacobianFunction_ProjTrifocal" );
__BEGIN__;
/* Fill Jacobian matrix */
int currProjPoint;
int currMatr;
-
+
cvZero(Jacobian);
for( currMatr = 0; currMatr < 3; currMatr++ )
{
{
for( i = 0; i < 4; i++ )// for X,Y,Z,W
{
- cvmSet( Jacobian,
+ cvmSet( Jacobian,
currMatr*numPoints*2+currProjPoint*2+j, 36+currProjPoint*4+i,
(p[j*4+i]*piX[2]-p[8+i]*piX[j]) * tmp3 );
}
return;
}
-void icvFunc_ProjTrifocal(const CvMat *vectX, CvMat *resFunc)
+static void icvFunc_ProjTrifocal(const CvMat *vectX, CvMat *resFunc)
{
/* Computes function in a given point */
/* Computers project points using 3 projection matrices and points 3D */
/*----------------------------------------------------------------------------------------*/
-void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
+static void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
CvMat **resultProjMatrs, CvMat *resultPoints4D)
{
{
CV_ERROR( CV_StsNullPtr, "Some of projPoints is a NULL pointer" );
}
-
+
if( resultProjMatrs[i] == 0 )
{
CV_ERROR( CV_StsNullPtr, "Some of resultProjMatrs is a NULL pointer" );
cvmSet(vectorX0,36 + currPoint*4 + 3,0,cvmGet(points4D,3,currPoint));
}
-
+
/* Allocate memory for result */
cvLevenbergMarquardtOptimization( icvJacobianFunction_ProjTrifocal, icvFunc_ProjTrifocal,
vectorX0,observRes,optimX,100,1e-6);
/*------------------------------------------------------------------------------*/
/* Create good points using status information */
-void icvCreateGoodPoints(CvMat *points,CvMat **goodPoints, CvMat *status)
+static void icvCreateGoodPoints(CvMat *points,CvMat **goodPoints, CvMat *status)
{
*goodPoints = 0;
return;
}
+#endif
L1 = sqrt( (double)P1->x*P1->x + P1->y*P1->y);
L2 = sqrt( (double)P2->x*P2->x + P2->y*P2->y);
-
+
L_min = MIN(L1, L2);
dL = fabs( L1 - L2 );
////////////////////////////////////////////////////////////////////////////////////
+CvPoint2D32f Q( CvPoint2D32f q0, CvPoint2D32f q1, CvPoint2D32f q2, double t );
+double angle( CvPoint2D32f A, CvPoint2D32f B );
+
double _cvBendingWork( CvPoint2D32f* B0,
CvPoint2D32f* F0,
CvPoint2D32f* B1,
CvPoint2D32f* F1/*,
CvPoint* K*/)
{
- CvPoint2D32f Q( CvPoint2D32f q0, CvPoint2D32f q1, CvPoint2D32f q2, double t );
- double angle( CvPoint2D32f A, CvPoint2D32f B );
-
CvPoint2D32f Q0, Q1, Q2;
CvPoint2D32f Q1_nm = { 0, 0 }, Q2_nm = { 0, 0 };
double d0, d1, d2, des, t_zero;
d_angle = d_angle - CV_PI*0.5;
d_angle = fabs(d_angle);
-
+
K->x = -K->x;
K->y = -K->y;
B1->x = -B1->x;
small_edge.y = NULL_EDGE*edges1[i-2].y;
w1 = W[i-1][j-1].w_east + _cvBendingWork(&edges1[i-2],
- &edges1[i-1],
+ &edges1[i-1],
/*&null_edge*/&small_edge,
&edges2[j-1]/*,
&edges2[j-2]*/);
small_edge.y = NULL_EDGE*edges2[j-2].y;
w3 = W[i-1][j-1].w_south + _cvBendingWork( /*&null_edge*/&small_edge,
- &edges1[i-1],
+ &edges1[i-1],
&edges2[j-2],
&edges2[j-1]/*,
&edges1[i-2]*/);
}
}
+
//===================================================
CvPoint2D32f Q(CvPoint2D32f q0,CvPoint2D32f q1,CvPoint2D32f q2,double t)
{
q.x = (float)(q0.x*(1-t)*(1-t) + 2*q1.x*t*(1-t) + q2.x*t*t);
q.y = (float)(q0.y*(1-t)*(1-t) + 2*q1.y*t*(1-t) + q2.y*t*t);
- return q;
+ return q;
}
double angle(CvPoint2D32f A, CvPoint2D32f B)
{
return acos( (A.x*B.x + A.y*B.y)/sqrt( (double)(A.x*A.x + A.y*A.y)*(B.x*B.x + B.y*B.y) ) );
}
-
+#if 0
/***************************************************************************************\
*
* This function compute intermediate polygon between contour1 and contour2
* param = [0,1]; 0 correspondence to contour1, 1 - contour2
*
\***************************************************************************************/
-CvSeq* icvBlendContours(CvSeq* contour1,
+static CvSeq* icvBlendContours(CvSeq* contour1,
CvSeq* contour2,
CvSeq* corr,
double param,
CvMemStorage* storage)
{
int j;
-
+
CvSeqWriter writer01;
CvSeqReader reader01;
int corr_point;
// Create output sequence.
- CvSeq* output = cvCreateSeq(0,
+ CvSeq* output = cvCreateSeq(0,
sizeof(CvSeq),
sizeof(CvPoint),
storage );
point1 = (CvPoint* )malloc( Ni*sizeof(CvPoint) );
point2 = (CvPoint* )malloc( Nj*sizeof(CvPoint) );
- // Initialize arrays of point
+ // Initialize arrays of point
cvCvtSeqToArray( contour1, point1, CV_WHOLE_SEQ );
cvCvtSeqToArray( contour2, point2, CV_WHOLE_SEQ );
i = Ni-1; //correspondence to points of contour1
for( ; corr; corr = corr->h_next )
- {
+ {
//Initializes process of sequential reading from sequence
cvStartReadSeq( corr, &reader01, 0 );
// Compute point of intermediate polygon.
point_output.x = cvRound(point1[i].x + param*( point2[corr_point].x - point1[i].x ));
point_output.y = cvRound(point1[i].y + param*( point2[corr_point].y - point1[i].y ));
-
+
// Write element to sequence.
CV_WRITE_SEQ_ELEM( point_output, writer01 );
}
}
// Updates sequence header.
cvFlushSeqWriter( &writer01 );
-
+
return output;
}
**************************************************************************************************/
-void icvCalcContoursCorrespondence(CvSeq* contour1,
- CvSeq* contour2,
- CvSeq** corr,
+static void icvCalcContoursCorrespondence(CvSeq* contour1,
+ CvSeq* contour2,
+ CvSeq** corr,
CvMemStorage* storage)
{
int i,j; // counter of cycles
edges1 = (CvPoint2D32f* )malloc( (Ni-1)*sizeof(CvPoint2D32f) );
edges2 = (CvPoint2D32f* )malloc( (Nj-1)*sizeof(CvPoint2D32f) );
- // Initialize arrays of point
+ // Initialize arrays of point
cvCvtSeqToArray( contour1, point1, CV_WHOLE_SEQ );
cvCvtSeqToArray( contour2, point2, CV_WHOLE_SEQ );
edges2[i].y = (float)( point2[i+1].y - point2[i].y );
};
- // Find infinity constant
+ // Find infinity constant
//inf=1;
/////////////
{
j=0;/////////
W[i][j].w_east = W[i-1][j].w_east;
- W[i][j].w_east = W[i][j].w_east /*+
+ W[i][j].w_east = W[i][j].w_east /*+
_cvBendingWork( &edges1[i-2], &edges1[i-1], &null_edge, &null_edge, NULL )*/;
W[i][j].w_east = W[i][j].w_east + _cvStretchingWork( &edges2[i-1], &null_edge );
W[i][j].path_e = PATH_TO_E;
-
+
j=1;//////////
W[i][j].w_south = inf;
small_edge.x = NULL_EDGE*edges1[i-2].x;
small_edge.y = NULL_EDGE*edges1[i-2].y;
- W[i][j].w_southeast = W[i][j].w_southeast +
+ W[i][j].w_southeast = W[i][j].w_southeast +
_cvBendingWork( &edges1[i-2], &edges1[i-1], /*&null_edge*/&small_edge, &edges2[j-1]/*, &edges2[Nj-2]*/);
W[i][j].path_se = PATH_TO_E;
}
for(j=2; j<Nj; j++)
- {
+ {
i=0;//////////
W[i][j].w_south = W[i][j-1].w_south;
W[i][j].w_south = W[i][j].w_south + _cvStretchingWork( &null_edge, &edges2[j-1] );
- W[i][j].w_south = W[i][j].w_south /*+
+ W[i][j].w_south = W[i][j].w_south /*+
_cvBendingWork( &null_edge, &null_edge, &edges2[j-2], &edges2[j-1], NULL )*/;
W[i][j].path_s = 3;
small_edge.x = NULL_EDGE*edges2[j-2].x;
small_edge.y = NULL_EDGE*edges2[j-2].y;
- W[i][j].w_southeast = W[i][j].w_southeast +
+ W[i][j].w_southeast = W[i][j].w_southeast +
_cvBendingWork( /*&null_edge*/&small_edge, &edges1[i-1], &edges2[j-2], &edges2[j-1]/*, &edges1[Ni-2]*/);
W[i][j].path_se = 3;
}
i=Ni-1;j=Nj-1;
- *corr = cvCreateSeq(0,
- sizeof(CvSeq),
+ *corr = cvCreateSeq(0,
+ sizeof(CvSeq),
sizeof(int),
storage );
{
CV_WRITE_SEQ_ELEM( j, writer );
- switch( path )
+ switch( path )
{
case PATH_TO_E:
path = W[i][j].path_e;
i--;
cvFlushSeqWriter( &writer );
- corr01->h_next = cvCreateSeq( 0,
- sizeof(CvSeq),
+ corr01->h_next = cvCreateSeq( 0,
+ sizeof(CvSeq),
sizeof(int),
storage );
corr01 = corr01->h_next;
cvStartAppendToSeq( corr01, &writer );
break;
-
+
case PATH_TO_SE:
path = W[i][j].path_se;
j--; i--;
cvFlushSeqWriter( &writer );
- corr01->h_next = cvCreateSeq( 0,
- sizeof(CvSeq),
+ corr01->h_next = cvCreateSeq( 0,
+ sizeof(CvSeq),
sizeof(int),
storage );
corr01 = corr01->h_next;
free(edges1);
free(edges2);
}
-
+#endif
#include <stdio.h>
namespace cv{
-
+
inline int round(float value)
{
if(value > 0)
return int(value - 0.5f);
}
}
-
+
inline CvRect resize_rect(CvRect rect, float alpha)
{
return cvRect(rect.x + round((float)(0.5*(1 - alpha)*rect.width)), rect.y + round((float)(0.5*(1 - alpha)*rect.height)),
round(rect.width*alpha), round(rect.height*alpha));
}
-
+
CvMat* ConvertImageToMatrix(IplImage* patch);
-
+
class CvCameraPose
{
public:
m_rotation = cvCreateMat(1, 3, CV_32FC1);
m_translation = cvCreateMat(1, 3, CV_32FC1);
};
-
+
~CvCameraPose()
{
cvReleaseMat(&m_rotation);
cvReleaseMat(&m_translation);
};
-
+
void SetPose(CvMat* rotation, CvMat* translation)
{
cvCopy(rotation, m_rotation);
cvCopy(translation, m_translation);
};
-
+
CvMat* GetRotation() {return m_rotation;};
CvMat* GetTranslation() {return m_translation;};
-
+
protected:
CvMat* m_rotation;
CvMat* m_translation;
};
-
+
// AffineTransformPatch: generates an affine transformed image patch.
// - src: source image (roi is supported)
// - dst: output image. ROI of dst image should be 2 times smaller than ROI of src.
// - pose: parameters of an affine transformation
void AffineTransformPatch(IplImage* src, IplImage* dst, CvAffinePose pose);
-
+
// GenerateAffineTransformFromPose: generates an affine transformation matrix from CvAffinePose instance
// - size: the size of image patch
// - pose: affine transformation
// - transform: 2x3 transformation matrix
void GenerateAffineTransformFromPose(CvSize size, CvAffinePose pose, CvMat* transform);
-
+
// Generates a random affine pose
CvAffinePose GenRandomAffinePose();
-
-
+
+
const static int num_mean_components = 500;
const static float noise_intensity = 0.15f;
-
-
+
+
static inline CvPoint rect_center(CvRect rect)
{
return cvPoint(rect.x + rect.width/2, rect.y + rect.height/2);
}
-
- void homography_transform(IplImage* frontal, IplImage* result, CvMat* homography)
- {
- cvWarpPerspective(frontal, result, homography);
- }
-
- CvAffinePose perturbate_pose(CvAffinePose pose, float noise)
+
+ // static void homography_transform(IplImage* frontal, IplImage* result, CvMat* homography)
+ // {
+ // cvWarpPerspective(frontal, result, homography);
+ // }
+
+ static CvAffinePose perturbate_pose(CvAffinePose pose, float noise)
{
// perturbate the matrix
float noise_mult_factor = 1 + (0.5f - float(rand())/RAND_MAX)*noise;
float noise_add_factor = noise_mult_factor - 1;
-
+
CvAffinePose pose_pert = pose;
pose_pert.phi += noise_add_factor;
pose_pert.theta += noise_mult_factor;
pose_pert.lambda1 *= noise_mult_factor;
pose_pert.lambda2 *= noise_mult_factor;
-
+
return pose_pert;
}
-
- void generate_mean_patch(IplImage* frontal, IplImage* result, CvAffinePose pose, int pose_count, float noise)
+
+ static void generate_mean_patch(IplImage* frontal, IplImage* result, CvAffinePose pose, int pose_count, float noise)
{
IplImage* sum = cvCreateImage(cvSize(result->width, result->height), IPL_DEPTH_32F, 1);
IplImage* workspace = cvCloneImage(result);
IplImage* workspace_float = cvCloneImage(sum);
-
+
cvSetZero(sum);
for(int i = 0; i < pose_count; i++)
{
CvAffinePose pose_pert = perturbate_pose(pose, noise);
-
+
AffineTransformPatch(frontal, workspace, pose_pert);
cvConvertScale(workspace, workspace_float);
cvAdd(sum, workspace_float, sum);
}
-
+
cvConvertScale(sum, result, 1.0f/pose_count);
-
+
cvReleaseImage(&workspace);
cvReleaseImage(&sum);
cvReleaseImage(&workspace_float);
}
-
- void generate_mean_patch_fast(IplImage* /*frontal*/, IplImage* /*result*/, CvAffinePose /*pose*/,
- CvMat* /*pca_hr_avg*/, CvMat* /*pca_hr_eigenvectors*/, const OneWayDescriptor* /*pca_descriptors*/)
- {
- /*for(int i = 0; i < pca_hr_eigenvectors->cols; i++)
- {
-
- }*/
- }
-
+
+ // static void generate_mean_patch_fast(IplImage* /*frontal*/, IplImage* /*result*/, CvAffinePose /*pose*/,
+ // CvMat* /*pca_hr_avg*/, CvMat* /*pca_hr_eigenvectors*/, const OneWayDescriptor* /*pca_descriptors*/)
+ // {
+ // /*for(int i = 0; i < pca_hr_eigenvectors->cols; i++)
+ // {
+
+ // }*/
+ // }
+
void readPCAFeatures(const char *filename, CvMat** avg, CvMat** eigenvectors, const char *postfix = "");
void readPCAFeatures(const FileNode &fn, CvMat** avg, CvMat** eigenvectors, const char* postfix = "");
void savePCAFeatures(FileStorage &fs, const char* postfix, CvMat* avg, CvMat* eigenvectors);
void loadPCAFeatures(const char* path, const char* images_list, vector<IplImage*>& patches, CvSize patch_size);
void generatePCAFeatures(const char* path, const char* img_filename, FileStorage& fs, const char* postfix,
CvSize patch_size, CvMat** avg, CvMat** eigenvectors);
-
+
void eigenvector2image(CvMat* eigenvector, IplImage* img);
void FindOneWayDescriptor(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
CvMat* avg = 0, CvMat* eigenvalues = 0);
-
+
void FindOneWayDescriptor(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch, int n,
std::vector<int>& desc_idxs, std::vector<int>& pose_idxs, std::vector<float>& distances,
CvMat* avg = 0, CvMat* eigenvalues = 0);
-
+
void FindOneWayDescriptor(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
CvMat* avg = 0, CvMat* eigenvalues = 0);
-
+
void FindOneWayDescriptorEx(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch,
float scale_min, float scale_max, float scale_step,
int& desc_idx, int& pose_idx, float& distance, float& scale,
CvMat* avg, CvMat* eigenvectors);
-
+
void FindOneWayDescriptorEx(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch,
float scale_min, float scale_max, float scale_step,
int n, std::vector<int>& desc_idxs, std::vector<int>& pose_idxs,
std::vector<float>& distances, std::vector<float>& scales,
CvMat* avg, CvMat* eigenvectors);
-
+
void FindOneWayDescriptorEx(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch,
float scale_min, float scale_max, float scale_step,
int& desc_idx, int& pose_idx, float& distance, float& scale,
CvMat* avg, CvMat* eigenvectors);
-
+
inline CvRect fit_rect_roi_fixedsize(CvRect rect, CvRect roi)
{
CvRect fit = rect;
fit.y = MIN(fit.y, roi.y + roi.height - fit.height - 1);
return(fit);
}
-
+
inline CvRect fit_rect_fixedsize(CvRect rect, IplImage* img)
{
CvRect roi = cvGetImageROI(img);
return fit_rect_roi_fixedsize(rect, roi);
}
-
+
OneWayDescriptor::OneWayDescriptor()
{
m_pose_count = 0;
m_pca_dim_low = 100;
m_pca_dim_high = 100;
}
-
+
OneWayDescriptor::~OneWayDescriptor()
{
if(m_pose_count)
cvReleaseImage(&m_train_patch);
delete []m_samples;
delete []m_pca_coeffs;
-
+
if(!m_transforms)
{
delete []m_affine_poses;
}
}
}
-
+
void OneWayDescriptor::Allocate(int pose_count, CvSize size, int nChannels)
{
m_pose_count = pose_count;
m_samples = new IplImage* [m_pose_count];
m_pca_coeffs = new CvMat* [m_pose_count];
m_patch_size = cvSize(size.width/2, size.height/2);
-
+
if(!m_transforms)
{
m_affine_poses = new CvAffinePose[m_pose_count];
}
-
+
int length = m_pca_dim_low;//roi.width*roi.height;
for(int i = 0; i < m_pose_count; i++)
{
m_samples[i] = cvCreateImage(cvSize(size.width/2, size.height/2), IPL_DEPTH_32F, nChannels);
m_pca_coeffs[i] = cvCreateMat(1, length, CV_32FC1);
}
-
+
m_input_patch = cvCreateImage(GetPatchSize(), IPL_DEPTH_8U, 1);
m_train_patch = cvCreateImage(GetInputPatchSize(), IPL_DEPTH_8U, 1);
}
-
- void cvmSet2DPoint(CvMat* matrix, int row, int col, CvPoint2D32f point)
- {
- cvmSet(matrix, row, col, point.x);
- cvmSet(matrix, row, col + 1, point.y);
- }
-
- void cvmSet3DPoint(CvMat* matrix, int row, int col, CvPoint3D32f point)
- {
- cvmSet(matrix, row, col, point.x);
- cvmSet(matrix, row, col + 1, point.y);
- cvmSet(matrix, row, col + 2, point.z);
- }
-
+
+ // static void cvmSet2DPoint(CvMat* matrix, int row, int col, CvPoint2D32f point)
+ // {
+ // cvmSet(matrix, row, col, point.x);
+ // cvmSet(matrix, row, col + 1, point.y);
+ // }
+
+ // static void cvmSet3DPoint(CvMat* matrix, int row, int col, CvPoint3D32f point)
+ // {
+ // cvmSet(matrix, row, col, point.x);
+ // cvmSet(matrix, row, col + 1, point.y);
+ // cvmSet(matrix, row, col + 2, point.z);
+ // }
+
CvAffinePose GenRandomAffinePose()
{
const float scale_min = 0.8f;
pose.phi = float(rand())/RAND_MAX*360;
pose.lambda1 = scale_min + float(rand())/RAND_MAX*(scale_max - scale_min);
pose.lambda2 = scale_min + float(rand())/RAND_MAX*(scale_max - scale_min);
-
+
return pose;
}
-
+
void GenerateAffineTransformFromPose(CvSize size, CvAffinePose pose, CvMat* transform)
{
CvMat* temp = cvCreateMat(3, 3, CV_32FC1);
cvmSet(temp, 2, 0, 0.0f);
cvmSet(temp, 2, 1, 0.0f);
cvmSet(temp, 2, 2, 1.0f);
-
+
CvMat rotation;
cvGetSubRect(temp, &rotation, cvRect(0, 0, 3, 2));
-
+
cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.phi, 1.0, &rotation);
cvCopy(temp, final);
-
+
cvmSet(temp, 0, 0, pose.lambda1);
cvmSet(temp, 0, 1, 0.0f);
cvmSet(temp, 1, 0, 0.0f);
cvmSet(temp, 0, 2, size.width/2*(1 - pose.lambda1));
cvmSet(temp, 1, 2, size.height/2*(1 - pose.lambda2));
cvMatMul(temp, final, final);
-
+
cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.theta - pose.phi, 1.0, &rotation);
cvMatMul(temp, final, final);
-
+
cvGetSubRect(final, &rotation, cvRect(0, 0, 3, 2));
cvCopy(&rotation, transform);
-
+
cvReleaseMat(&temp);
cvReleaseMat(&final);
}
-
+
void AffineTransformPatch(IplImage* src, IplImage* dst, CvAffinePose pose)
{
CvRect src_large_roi = cvGetImageROI(src);
-
+
IplImage* temp = cvCreateImage(cvSize(src_large_roi.width, src_large_roi.height), IPL_DEPTH_32F, src->nChannels);
cvSetZero(temp);
IplImage* temp2 = cvCloneImage(temp);
CvMat* rotation_phi = cvCreateMat(2, 3, CV_32FC1);
-
+
CvSize new_size = cvSize(cvRound(temp->width*pose.lambda1), cvRound(temp->height*pose.lambda2));
IplImage* temp3 = cvCreateImage(new_size, IPL_DEPTH_32F, src->nChannels);
-
+
cvConvertScale(src, temp);
cvResetImageROI(temp);
-
-
+
+
cv2DRotationMatrix(cvPoint2D32f(temp->width/2, temp->height/2), pose.phi, 1.0, rotation_phi);
cvWarpAffine(temp, temp2, rotation_phi);
-
+
cvSetZero(temp);
-
+
cvResize(temp2, temp3);
-
+
cv2DRotationMatrix(cvPoint2D32f(temp3->width/2, temp3->height/2), pose.theta - pose.phi, 1.0, rotation_phi);
cvWarpAffine(temp3, temp, rotation_phi);
-
+
cvSetImageROI(temp, cvRect(temp->width/2 - src_large_roi.width/4, temp->height/2 - src_large_roi.height/4,
src_large_roi.width/2, src_large_roi.height/2));
cvConvertScale(temp, dst);
cvReleaseMat(&rotation_phi);
-
+
cvReleaseImage(&temp3);
cvReleaseImage(&temp2);
cvReleaseImage(&temp);
}
-
+
void OneWayDescriptor::GenerateSamples(int pose_count, IplImage* frontal, int norm)
{
/* if(m_transforms)
}
//AffineTransformPatch(frontal, patch_8u, m_affine_poses[i]);
generate_mean_patch(frontal, patch_8u, m_affine_poses[i], num_mean_components, noise_intensity);
-
+
double scale = 1.0f;
if(norm)
{
scale = 1/sum;
}
cvConvertScale(patch_8u, m_samples[i], scale);
-
+
#if 0
double maxval;
cvMinMaxLoc(m_samples[i], 0, &maxval);
}
cvReleaseImage(&patch_8u);
}
-
+
void OneWayDescriptor::GenerateSamplesFast(IplImage* frontal, CvMat* pca_hr_avg,
CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors)
{
cvResize(frontal, m_train_patch);
frontal = m_train_patch;
}
-
+
CvMat* pca_coeffs = cvCreateMat(1, pca_hr_eigenvectors->cols, CV_32FC1);
double maxval;
cvMinMaxLoc(frontal, 0, &maxval);
CvMat* frontal_data = ConvertImageToMatrix(frontal);
-
+
double sum = cvSum(frontal_data).val[0];
cvConvertScale(frontal_data, frontal_data, 1.0f/sum);
cvProjectPCA(frontal_data, pca_hr_avg, pca_hr_eigenvectors, pca_coeffs);
double coeff = cvmGet(pca_coeffs, 0, j);
IplImage* patch = pca_descriptors[j + 1].GetPatch(i);
cvAddWeighted(m_samples[i], 1.0, patch, coeff, 0, m_samples[i]);
-
+
#if 0
printf("coeff%d = %f\n", j, coeff);
IplImage* test = cvCreateImage(cvSize(12, 12), IPL_DEPTH_8U, 1);
cvWaitKey(0);
#endif
}
-
+
cvAdd(pca_descriptors[0].GetPatch(i), m_samples[i], m_samples[i]);
- double sum = cvSum(m_samples[i]).val[0];
- cvConvertScale(m_samples[i], m_samples[i], 1.0/sum);
-
+ double sm = cvSum(m_samples[i]).val[0];
+ cvConvertScale(m_samples[i], m_samples[i], 1.0/sm);
+
#if 0
IplImage* test = cvCreateImage(cvSize(12, 12), IPL_DEPTH_8U, 1);
/* IplImage* temp1 = cvCreateImage(cvSize(12, 12), IPL_DEPTH_32F, 1);
cvConvertScale(temp1, test, 255.0/maxval);*/
cvMinMaxLoc(m_samples[i], 0, &maxval);
cvConvertScale(m_samples[i], test, 255.0/maxval);
-
+
cvNamedWindow("1", 1);
cvShowImage("1", frontal);
cvNamedWindow("2", 1);
cvWaitKey(0);
#endif
}
-
+
cvReleaseMat(&pca_coeffs);
cvReleaseMat(&frontal_data);
}
-
+
void OneWayDescriptor::SetTransforms(CvAffinePose* poses, CvMat** transforms)
{
if(m_affine_poses)
{
delete []m_affine_poses;
}
-
+
m_affine_poses = poses;
m_transforms = transforms;
}
-
+
void OneWayDescriptor::Initialize(int pose_count, IplImage* frontal, const char* feature_name, int norm)
{
m_feature_name = std::string(feature_name);
CvRect roi = cvGetImageROI(frontal);
m_center = rect_center(roi);
-
+
Allocate(pose_count, cvSize(roi.width, roi.height), frontal->nChannels);
-
+
GenerateSamples(pose_count, frontal, norm);
}
-
+
void OneWayDescriptor::InitializeFast(int pose_count, IplImage* frontal, const char* feature_name,
CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors)
{
m_feature_name = std::string(feature_name);
CvRect roi = cvGetImageROI(frontal);
m_center = rect_center(roi);
-
+
Allocate(pose_count, cvSize(roi.width, roi.height), frontal->nChannels);
-
+
GenerateSamplesFast(frontal, pca_hr_avg, pca_hr_eigenvectors, pca_descriptors);
}
-
+
void OneWayDescriptor::InitializePCACoeffs(CvMat* avg, CvMat* eigenvectors)
{
for(int i = 0; i < m_pose_count; i++)
ProjectPCASample(m_samples[i], avg, eigenvectors, m_pca_coeffs[i]);
}
}
-
+
void OneWayDescriptor::ProjectPCASample(IplImage* patch, CvMat* avg, CvMat* eigenvectors, CvMat* pca_coeffs) const
{
CvMat* patch_mat = ConvertImageToMatrix(patch);
CvMat temp1;
cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1));
cvCopy(&temp1, pca_coeffs);
-
+
cvReleaseMat(&temp);
cvReleaseMat(&patch_mat);
}
-
+
void OneWayDescriptor::EstimatePosePCA(CvArr* patch, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvectors) const
{
if(avg == 0)
}
else
{
-
+
}
return;
}
roi = cvGetImageROI((IplImage*)patch);
}
}
-
+
CvMat* pca_coeffs = cvCreateMat(1, m_pca_dim_low, CV_32FC1);
-
+
if (CV_IS_MAT(patch))
{
cvCopy((CvMat*)patch, pca_coeffs);
ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
cvReleaseImage(&patch_32f);
}
-
-
+
+
distance = 1e10;
pose_idx = -1;
-
+
for(int i = 0; i < m_pose_count; i++)
{
double dist = cvNorm(m_pca_coeffs[i], pca_coeffs);
- // float dist = 0;
- // float data1, data2;
- // //CvMat* pose_pca_coeffs = m_pca_coeffs[i];
- // for (int x=0; x < pca_coeffs->width; x++)
- // for (int y =0 ; y < pca_coeffs->height; y++)
- // {
- // data1 = ((float*)(pca_coeffs->data.ptr + pca_coeffs->step*x))[y];
- // data2 = ((float*)(m_pca_coeffs[i]->data.ptr + m_pca_coeffs[i]->step*x))[y];
- // dist+=(data1-data2)*(data1-data2);
- // }
+ // float dist = 0;
+ // float data1, data2;
+ // //CvMat* pose_pca_coeffs = m_pca_coeffs[i];
+ // for (int x=0; x < pca_coeffs->width; x++)
+ // for (int y =0 ; y < pca_coeffs->height; y++)
+ // {
+ // data1 = ((float*)(pca_coeffs->data.ptr + pca_coeffs->step*x))[y];
+ // data2 = ((float*)(m_pca_coeffs[i]->data.ptr + m_pca_coeffs[i]->step*x))[y];
+ // dist+=(data1-data2)*(data1-data2);
+ // }
////#if 1
- // for (int j = 0; j < m_pca_dim_low; j++)
- // {
- // dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])*(pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]);
- // }
+ // for (int j = 0; j < m_pca_dim_low; j++)
+ // {
+ // dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])*(pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]);
+ // }
//#else
- // for (int j = 0; j <= m_pca_dim_low - 4; j += 4)
- // {
- // dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])*
- // (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]);
- // dist += (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1])*
- // (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1]);
- // dist += (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2])*
- // (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2]);
- // dist += (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3])*
- // (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3]);
- // }
+ // for (int j = 0; j <= m_pca_dim_low - 4; j += 4)
+ // {
+ // dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])*
+ // (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]);
+ // dist += (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1])*
+ // (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1]);
+ // dist += (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2])*
+ // (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2]);
+ // dist += (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3])*
+ // (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3]);
+ // }
//#endif
if(dist < distance)
{
pose_idx = i;
}
}
-
+
cvReleaseMat(&pca_coeffs);
}
-
+
void OneWayDescriptor::EstimatePose(IplImage* patch, int& pose_idx, float& distance) const
{
distance = 1e10;
pose_idx = -1;
-
+
CvRect roi = cvGetImageROI(patch);
IplImage* patch_32f = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_32F, patch->nChannels);
double sum = cvSum(patch).val[0];
cvConvertScale(patch, patch_32f, 1/sum);
-
+
for(int i = 0; i < m_pose_count; i++)
{
if(m_samples[i]->width != patch_32f->width || m_samples[i]->height != patch_32f->height)
double dist = cvNorm(m_samples[i], patch_32f);
//float dist = 0.0f;
//float i1,i2;
-
+
//for (int y = 0; y<patch_32f->height; y++)
- // for (int x = 0; x< patch_32f->width; x++)
- // {
- // i1 = ((float*)(m_samples[i]->imageData + m_samples[i]->widthStep*y))[x];
- // i2 = ((float*)(patch_32f->imageData + patch_32f->widthStep*y))[x];
- // dist+= (i1-i2)*(i1-i2);
- // }
-
+ // for (int x = 0; x< patch_32f->width; x++)
+ // {
+ // i1 = ((float*)(m_samples[i]->imageData + m_samples[i]->widthStep*y))[x];
+ // i2 = ((float*)(patch_32f->imageData + patch_32f->widthStep*y))[x];
+ // dist+= (i1-i2)*(i1-i2);
+ // }
+
if(dist < distance)
{
distance = (float)dist;
pose_idx = i;
}
-
+
#if 0
IplImage* img1 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1);
IplImage* img2 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1);
cvConvertScale(m_samples[i], img1, 255.0/maxval);
cvMinMaxLoc(patch_32f, 0, &maxval);
cvConvertScale(patch_32f, img2, 255.0/maxval);
-
+
cvNamedWindow("1", 1);
cvShowImage("1", img1);
cvNamedWindow("2", 1);
cvWaitKey(0);
#endif
}
-
+
cvReleaseImage(&patch_32f);
}
-
+
void OneWayDescriptor::Save(const char* path)
{
for(int i = 0; i < m_pose_count; i++)
char buf[1024];
sprintf(buf, "%s/patch_%04d.jpg", path, i);
IplImage* patch = cvCreateImage(cvSize(m_samples[i]->width, m_samples[i]->height), IPL_DEPTH_8U, m_samples[i]->nChannels);
-
+
double maxval;
cvMinMaxLoc(m_samples[i], 0, &maxval);
cvConvertScale(m_samples[i], patch, 255/maxval);
-
+
cvSaveImage(buf, patch);
-
+
cvReleaseImage(&patch);
}
}
-
+
void OneWayDescriptor::Write(CvFileStorage* fs, const char* name)
{
CvMat* mat = cvCreateMat(m_pose_count, m_samples[0]->width*m_samples[0]->height, CV_32FC1);
-
+
// prepare data to write as a single matrix
for(int i = 0; i < m_pose_count; i++)
{
}
}
}
-
+
cvWrite(fs, name, mat);
-
+
cvReleaseMat(&mat);
}
-
+
int OneWayDescriptor::ReadByName(const FileNode &parent, const char* name)
{
CvMat* mat = reinterpret_cast<CvMat*> (parent[name].readObj ());
{
return 0;
}
-
-
+
+
for(int i = 0; i < m_pose_count; i++)
{
for(int y = 0; y < m_samples[i]->height; y++)
}
}
}
-
+
cvReleaseMat(&mat);
return 1;
}
{
return ReadByName (FileNode (fs, parent), name);
}
-
+
IplImage* OneWayDescriptor::GetPatch(int index)
{
return m_samples[index];
}
-
+
CvAffinePose OneWayDescriptor::GetPose(int index) const
{
return m_affine_poses[index];
}
-
+
void FindOneWayDescriptor(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
CvMat* avg, CvMat* eigenvectors)
{
IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
if(_roi.width != patch_width|| _roi.height != patch_height)
{
-
+
cvResize(patch, test_img);
_roi = cvGetImageROI(test_img);
}
IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
double sum = cvSum(test_img).val[0];
cvConvertScale(test_img, patch_32f, 1.0f/sum);
-
+
//ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
//Projecting PCA
CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
cvReleaseMat(&temp);
cvReleaseMat(&patch_mat);
//End of projecting
-
+
cvReleaseImage(&patch_32f);
cvReleaseImage(&test_img);
}
-
+
//--------
-
-
-
+
+
+
for(int i = 0; i < desc_count; i++)
{
int _pose_idx = -1;
float _distance = 0;
-
+
#if 0
descriptors[i].EstimatePose(patch, _pose_idx, _distance);
#else
descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
}
#endif
-
+
if(_distance < distance)
{
desc_idx = i;
}
cvReleaseMat(&pca_coeffs);
}
-
+
#if defined(_KDTREE)
-
+
void FindOneWayDescriptor(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
CvMat* avg, CvMat* eigenvectors)
{
int patch_height = patch_size.height;
//if (avg)
//{
- CvRect _roi = cvGetImageROI((IplImage*)patch);
- IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
- if(_roi.width != patch_width|| _roi.height != patch_height)
- {
-
- cvResize(patch, test_img);
- _roi = cvGetImageROI(test_img);
- }
- else
- {
- cvCopy(patch,test_img);
- }
- IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
- float sum = cvSum(test_img).val[0];
- cvConvertScale(test_img, patch_32f, 1.0f/sum);
-
- //ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
- //Projecting PCA
- CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
- CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1);
- cvProjectPCA(patch_mat, avg, eigenvectors, temp);
- CvMat temp1;
- cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1));
- cvCopy(&temp1, pca_coeffs);
- cvReleaseMat(&temp);
- cvReleaseMat(&patch_mat);
- //End of projecting
-
- cvReleaseImage(&patch_32f);
- cvReleaseImage(&test_img);
- // }
-
+ CvRect _roi = cvGetImageROI((IplImage*)patch);
+ IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
+ if(_roi.width != patch_width|| _roi.height != patch_height)
+ {
+
+ cvResize(patch, test_img);
+ _roi = cvGetImageROI(test_img);
+ }
+ else
+ {
+ cvCopy(patch,test_img);
+ }
+ IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
+ float sum = cvSum(test_img).val[0];
+ cvConvertScale(test_img, patch_32f, 1.0f/sum);
+
+ //ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
+ //Projecting PCA
+ CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
+ CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1);
+ cvProjectPCA(patch_mat, avg, eigenvectors, temp);
+ CvMat temp1;
+ cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1));
+ cvCopy(&temp1, pca_coeffs);
+ cvReleaseMat(&temp);
+ cvReleaseMat(&patch_mat);
+ //End of projecting
+
+ cvReleaseImage(&patch_32f);
+ cvReleaseImage(&test_img);
+ // }
+
//--------
-
- //float* target = new float[m_pca_dim_low];
- //::cvflann::KNNResultSet res(1,pca_coeffs->data.fl,m_pca_dim_low);
- //::cvflann::SearchParams params;
- //params.checks = -1;
-
- //int maxDepth = 1000000;
- //int neighbors_count = 1;
- //int* neighborsIdx = new int[neighbors_count];
- //float* distances = new float[neighbors_count];
- //if (m_pca_descriptors_tree->findNearest(pca_coeffs->data.fl,neighbors_count,maxDepth,neighborsIdx,0,distances) > 0)
- //{
- // desc_idx = neighborsIdx[0] / m_pose_count;
- // pose_idx = neighborsIdx[0] % m_pose_count;
- // distance = distances[0];
- //}
- //delete[] neighborsIdx;
- //delete[] distances;
-
- cv::Mat m_object(1, m_pca_dim_low, CV_32F);
- cv::Mat m_indices(1, 1, CV_32S);
- cv::Mat m_dists(1, 1, CV_32F);
-
- float* object_ptr = m_object.ptr<float>(0);
- for (int i=0;i<m_pca_dim_low;i++)
- {
- object_ptr[i] = pca_coeffs->data.fl[i];
- }
-
- m_pca_descriptors_tree->knnSearch(m_object, m_indices, m_dists, 1, cv::flann::SearchParams(-1) );
-
- desc_idx = ((int*)(m_indices.ptr<int>(0)))[0] / m_pose_count;
- pose_idx = ((int*)(m_indices.ptr<int>(0)))[0] % m_pose_count;
- distance = ((float*)(m_dists.ptr<float>(0)))[0];
-
- // delete[] target;
-
-
+
+ //float* target = new float[m_pca_dim_low];
+ //::cvflann::KNNResultSet res(1,pca_coeffs->data.fl,m_pca_dim_low);
+ //::cvflann::SearchParams params;
+ //params.checks = -1;
+
+ //int maxDepth = 1000000;
+ //int neighbors_count = 1;
+ //int* neighborsIdx = new int[neighbors_count];
+ //float* distances = new float[neighbors_count];
+ //if (m_pca_descriptors_tree->findNearest(pca_coeffs->data.fl,neighbors_count,maxDepth,neighborsIdx,0,distances) > 0)
+ //{
+ // desc_idx = neighborsIdx[0] / m_pose_count;
+ // pose_idx = neighborsIdx[0] % m_pose_count;
+ // distance = distances[0];
+ //}
+ //delete[] neighborsIdx;
+ //delete[] distances;
+
+ cv::Mat m_object(1, m_pca_dim_low, CV_32F);
+ cv::Mat m_indices(1, 1, CV_32S);
+ cv::Mat m_dists(1, 1, CV_32F);
+
+ float* object_ptr = m_object.ptr<float>(0);
+ for (int i=0;i<m_pca_dim_low;i++)
+ {
+ object_ptr[i] = pca_coeffs->data.fl[i];
+ }
+
+ m_pca_descriptors_tree->knnSearch(m_object, m_indices, m_dists, 1, cv::flann::SearchParams(-1) );
+
+ desc_idx = ((int*)(m_indices.ptr<int>(0)))[0] / m_pose_count;
+ pose_idx = ((int*)(m_indices.ptr<int>(0)))[0] % m_pose_count;
+ distance = ((float*)(m_dists.ptr<float>(0)))[0];
+
+ // delete[] target;
+
+
// for(int i = 0; i < desc_count; i++)
// {
// int _pose_idx = -1;
//#if 0
// descriptors[i].EstimatePose(patch, _pose_idx, _distance);
//#else
- // if (!avg)
- // {
- // descriptors[i].EstimatePosePCA(patch, _pose_idx, _distance, avg, eigenvectors);
- // }
- // else
- // {
- // descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
- // }
+ // if (!avg)
+ // {
+ // descriptors[i].EstimatePosePCA(patch, _pose_idx, _distance, avg, eigenvectors);
+ // }
+ // else
+ // {
+ // descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
+ // }
//#endif
//
// if(_distance < distance)
IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
if(_roi.width != patch_width|| _roi.height != patch_height)
{
-
+
cvResize(patch, test_img);
_roi = cvGetImageROI(test_img);
}
IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
double sum = cvSum(test_img).val[0];
cvConvertScale(test_img, patch_32f, 1.0f/sum);
-
+
//ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
//Projecting PCA
CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
cvReleaseMat(&temp);
cvReleaseMat(&patch_mat);
//End of projecting
-
+
cvReleaseImage(&patch_32f);
cvReleaseImage(&test_img);
}
//--------
-
-
-
+
+
+
for(int i = 0; i < desc_count; i++)
{
int _pose_idx = -1;
float _distance = 0;
-
+
#if 0
descriptors[i].EstimatePose(patch, _pose_idx, _distance);
#else
descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
}
#endif
-
+
for (int j=0;j<n;j++)
{
if(_distance < distances[j])
}
cvReleaseMat(&pca_coeffs);
}
-
+
void FindOneWayDescriptorEx(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch,
float scale_min, float scale_max, float scale_step,
int& desc_idx, int& pose_idx, float& distance, float& scale,
CvSize patch_size = descriptors[0].GetPatchSize();
IplImage* input_patch;
CvRect roi;
-
+
input_patch= cvCreateImage(patch_size, IPL_DEPTH_8U, 1);
roi = cvGetImageROI((IplImage*)patch);
-
+
int _desc_idx, _pose_idx;
float _distance;
distance = 1e10;
for(float cur_scale = scale_min; cur_scale < scale_max; cur_scale *= scale_step)
{
// printf("Scale = %f\n", cur_scale);
-
+
CvRect roi_scaled = resize_rect(roi, cur_scale);
cvSetImageROI(patch, roi_scaled);
cvResize(patch, input_patch);
-
-
+
+
#if 0
if(roi.x > 244 && roi.y < 200)
{
cvWaitKey(0);
}
#endif
-
+
FindOneWayDescriptor(desc_count, descriptors, input_patch, _desc_idx, _pose_idx, _distance, avg, eigenvectors);
if(_distance < distance)
{
scale = cur_scale;
}
}
-
-
+
+
cvSetImageROI((IplImage*)patch, roi);
cvReleaseImage(&input_patch);
-
+
}
-
+
void FindOneWayDescriptorEx(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch,
float scale_min, float scale_max, float scale_step,
int n, std::vector<int>& desc_idxs, std::vector<int>& pose_idxs,
CvSize patch_size = descriptors[0].GetPatchSize();
IplImage* input_patch;
CvRect roi;
-
+
input_patch= cvCreateImage(patch_size, IPL_DEPTH_8U, 1);
roi = cvGetImageROI((IplImage*)patch);
-
+
// float min_distance = 1e10;
std::vector<int> _desc_idxs;
_desc_idxs.resize(n);
_pose_idxs.resize(n);
std::vector<float> _distances;
_distances.resize(n);
-
-
+
+
for (int i=0;i<n;i++)
{
distances[i] = 1e10;
}
-
+
for(float cur_scale = scale_min; cur_scale < scale_max; cur_scale *= scale_step)
{
-
+
CvRect roi_scaled = resize_rect(roi, cur_scale);
cvSetImageROI(patch, roi_scaled);
cvResize(patch, input_patch);
-
-
-
+
+
+
FindOneWayDescriptor(desc_count, descriptors, input_patch, n,_desc_idxs, _pose_idxs, _distances, avg, eigenvectors);
for (int i=0;i<n;i++)
{
}
}
}
-
-
-
+
+
+
cvSetImageROI((IplImage*)patch, roi);
cvReleaseImage(&input_patch);
}
-
+
#if defined(_KDTREE)
void FindOneWayDescriptorEx(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low,
int m_pose_count, IplImage* patch,
{
IplImage* input_patch;
CvRect roi;
-
+
input_patch= cvCreateImage(patch_size, IPL_DEPTH_8U, 1);
roi = cvGetImageROI((IplImage*)patch);
-
+
int _desc_idx, _pose_idx;
float _distance;
distance = 1e10;
for(float cur_scale = scale_min; cur_scale < scale_max; cur_scale *= scale_step)
{
// printf("Scale = %f\n", cur_scale);
-
+
CvRect roi_scaled = resize_rect(roi, cur_scale);
cvSetImageROI(patch, roi_scaled);
cvResize(patch, input_patch);
-
+
FindOneWayDescriptor(m_pca_descriptors_tree, patch_size, m_pca_dim_low, m_pose_count, input_patch, _desc_idx, _pose_idx, _distance, avg, eigenvectors);
if(_distance < distance)
{
scale = cur_scale;
}
}
-
-
+
+
cvSetImageROI((IplImage*)patch, roi);
cvReleaseImage(&input_patch);
-
+
}
#endif
-
+
const char* OneWayDescriptor::GetFeatureName() const
{
return m_feature_name.c_str();
}
-
+
CvPoint OneWayDescriptor::GetCenter() const
{
return m_center;
}
-
+
int OneWayDescriptor::GetPCADimLow() const
{
return m_pca_dim_low;
}
-
+
int OneWayDescriptor::GetPCADimHigh() const
{
return m_pca_dim_high;
{
CvRect roi = cvGetImageROI(patch);
CvMat* mat = cvCreateMat(1, roi.width*roi.height, CV_32FC1);
-
+
if(patch->depth == 32)
{
for(int y = 0; y < roi.height; y++)
printf("Image depth %d is not supported\n", patch->depth);
return 0;
}
-
+
return mat;
}
-
+
OneWayDescriptorBase::OneWayDescriptorBase(CvSize patch_size, int pose_count, const char* train_path,
const char* pca_config, const char* pca_hr_config,
const char* pca_desc_config, int pyr_levels,
m_pca_descriptors_matrix = 0;
m_pca_descriptors_tree = 0;
#endif
- // m_pca_descriptors_matrix = 0;
+ // m_pca_descriptors_matrix = 0;
m_patch_size = patch_size;
m_pose_count = pose_count;
m_pyr_levels = pyr_levels;
m_poses = 0;
m_transforms = 0;
-
+
m_pca_avg = 0;
m_pca_eigenvectors = 0;
m_pca_hr_avg = 0;
m_pca_hr_eigenvectors = 0;
m_pca_descriptors = 0;
-
+
m_descriptors = 0;
-
+
if(train_path == 0 || strlen(train_path) == 0)
{
// skip pca loading
sprintf(pca_hr_config_filename, "%s/%s", train_path, pca_hr_config);
readPCAFeatures(pca_hr_config_filename, &m_pca_hr_avg, &m_pca_hr_eigenvectors);
}
-
+
m_pca_descriptors = new OneWayDescriptor[m_pca_dim_high + 1];
-
+
#if !defined(_GH_REGIONS)
if(pca_desc_config && strlen(pca_desc_config) > 0)
// if(0)
}
#endif //_GH_REGIONS
// SavePCADescriptors("./pca_descriptors.yml");
-
+
}
OneWayDescriptorBase::OneWayDescriptorBase(CvSize patch_size, int pose_count, const string &pca_filename,
scale_min = fn["minScale"];
scale_max = fn["maxScale"];
scale_step = fn["stepScale"];
-
- LoadPCAall (fn);
+
+ LoadPCAall (fn);
}
void OneWayDescriptorBase::LoadPCAall (const FileNode &fn)
{
cvReleaseMat(&m_pca_avg);
cvReleaseMat(&m_pca_eigenvectors);
-
+
if(m_pca_hr_eigenvectors)
{
delete[] m_pca_descriptors;
cvReleaseMat(&m_pca_hr_avg);
cvReleaseMat(&m_pca_hr_eigenvectors);
}
-
-
+
+
if(m_descriptors)
delete []m_descriptors;
if(m_poses)
delete []m_poses;
-
+
if (m_transforms)
{
for(int i = 0; i < m_pose_count; i++)
}
#endif
}
-
+
void OneWayDescriptorBase::clear(){
if (m_descriptors)
{
m_poses[i] = GenRandomAffinePose();
}
}
-
+
void OneWayDescriptorBase::InitializeTransformsFromPoses()
{
m_transforms = new CvMat*[m_pose_count];
GenerateAffineTransformFromPose(cvSize(m_patch_size.width*2, m_patch_size.height*2), m_poses[i], m_transforms[i]);
}
}
-
+
void OneWayDescriptorBase::InitializePoseTransforms()
{
InitializePoses();
InitializeTransformsFromPoses();
}
-
+
void OneWayDescriptorBase::InitializeDescriptor(int desc_idx, IplImage* train_image, const KeyPoint& keypoint, const char* feature_label)
{
-
+
// TBD add support for octave != 0
CvPoint center = keypoint.pt;
-
+
CvRect roi = cvRect(center.x - m_patch_size.width/2, center.y - m_patch_size.height/2, m_patch_size.width, m_patch_size.height);
cvResetImageROI(train_image);
roi = fit_rect_fixedsize(roi, train_image);
{
return;
}
-
+
InitializeDescriptor(desc_idx, train_image, feature_label);
cvResetImageROI(train_image);
}
-
+
void OneWayDescriptorBase::InitializeDescriptor(int desc_idx, IplImage* train_image, const char* feature_label)
{
m_descriptors[desc_idx].SetPCADimHigh(m_pca_dim_high);
m_descriptors[desc_idx].SetPCADimLow(m_pca_dim_low);
m_descriptors[desc_idx].SetTransforms(m_poses, m_transforms);
-
+
if(!m_pca_hr_eigenvectors)
{
m_descriptors[desc_idx].Initialize(m_pose_count, train_image, feature_label);
m_descriptors[desc_idx].InitializeFast(m_pose_count, train_image, feature_label,
m_pca_hr_avg, m_pca_hr_eigenvectors, m_pca_descriptors);
}
-
+
if(m_pca_avg)
{
m_descriptors[desc_idx].InitializePCACoeffs(m_pca_avg, m_pca_eigenvectors);
}
}
-
+
void OneWayDescriptorBase::FindDescriptor(IplImage* src, cv::Point2f pt, int& desc_idx, int& pose_idx, float& distance) const
{
CvRect roi = cvRect(cvRound(pt.x - m_patch_size.width/4),
cvRound(pt.y - m_patch_size.height/4),
m_patch_size.width/2, m_patch_size.height/2);
cvSetImageROI(src, roi);
-
+
FindDescriptor(src, desc_idx, pose_idx, distance);
- cvResetImageROI(src);
+ cvResetImageROI(src);
}
-
+
void OneWayDescriptorBase::FindDescriptor(IplImage* patch, int& desc_idx, int& pose_idx, float& distance, float* _scale, float* scale_ranges) const
{
#if 0
float min = scale_min;
float max = scale_max;
float step = scale_step;
-
+
if (scale_ranges)
{
min = scale_ranges[0];
max = scale_ranges[1];
}
-
+
float scale = 1.0f;
-
+
#if !defined(_KDTREE)
cv::FindOneWayDescriptorEx(m_train_feature_count, m_descriptors, patch,
min, max, step, desc_idx, pose_idx, distance, scale,
min, max, step, desc_idx, pose_idx, distance, scale,
m_pca_avg, m_pca_eigenvectors);
#endif
-
+
if (_scale)
*_scale = scale;
-
+
#endif
}
-
+
void OneWayDescriptorBase::FindDescriptor(IplImage* patch, int n, std::vector<int>& desc_idxs, std::vector<int>& pose_idxs,
std::vector<float>& distances, std::vector<float>& _scales, float* scale_ranges) const
{
float min = scale_min;
float max = scale_max;
float step = scale_step;
-
+
if (scale_ranges)
{
min = scale_ranges[0];
max = scale_ranges[1];
}
-
+
distances.resize(n);
_scales.resize(n);
desc_idxs.resize(n);
pose_idxs.resize(n);
/*float scales = 1.0f;*/
-
+
cv::FindOneWayDescriptorEx(m_train_feature_count, m_descriptors, patch,
min, max, step ,n, desc_idxs, pose_idxs, distances, _scales,
m_pca_avg, m_pca_eigenvectors);
-
+
}
-
+
void OneWayDescriptorBase::SetPCAHigh(CvMat* avg, CvMat* eigenvectors)
{
m_pca_hr_avg = cvCloneMat(avg);
m_pca_hr_eigenvectors = cvCloneMat(eigenvectors);
}
-
+
void OneWayDescriptorBase::SetPCALow(CvMat* avg, CvMat* eigenvectors)
{
m_pca_avg = cvCloneMat(avg);
m_pca_eigenvectors = cvCloneMat(eigenvectors);
}
-
+
void OneWayDescriptorBase::AllocatePCADescriptors()
{
m_pca_descriptors = new OneWayDescriptor[m_pca_dim_high + 1];
m_pca_descriptors[i].SetPCADimLow(m_pca_dim_low);
}
}
-
+
void OneWayDescriptorBase::CreatePCADescriptors()
{
if(m_pca_descriptors == 0)
AllocatePCADescriptors();
}
IplImage* frontal = cvCreateImage(m_patch_size, IPL_DEPTH_32F, 1);
-
+
eigenvector2image(m_pca_hr_avg, frontal);
m_pca_descriptors[0].SetTransforms(m_poses, m_transforms);
m_pca_descriptors[0].Initialize(m_pose_count, frontal, "", 0);
-
+
for(int j = 0; j < m_pca_dim_high; j++)
{
CvMat eigenvector;
cvGetSubRect(m_pca_hr_eigenvectors, &eigenvector, cvRect(0, j, m_pca_hr_eigenvectors->cols, 1));
eigenvector2image(&eigenvector, frontal);
-
+
m_pca_descriptors[j + 1].SetTransforms(m_poses, m_transforms);
m_pca_descriptors[j + 1].Initialize(m_pose_count, frontal, "", 0);
-
+
printf("Created descriptor for PCA component %d\n", j);
}
-
+
cvReleaseImage(&frontal);
}
-
-
+
+
int OneWayDescriptorBase::LoadPCADescriptors(const char* filename)
{
FileStorage fs = FileStorage (filename, FileStorage::READ);
printf("Successfully read %d pca components\n", m_pca_dim_high);
fs.release ();
-
+
return 1;
}
if (! m_pca_descriptors[i].ReadByName(fn, buf))
{
- char buf[1024];
sprintf(buf, "descriptor for pca component %d", i);
m_pca_descriptors[i].ReadByName(fn, buf);
}
cvReleaseMat(&eigenvalues);
}
- void extractPatches (IplImage *img, vector<IplImage*>& patches, CvSize patch_size)
+ static void extractPatches (IplImage *img, vector<IplImage*>& patches, CvSize patch_size)
{
vector<KeyPoint> features;
Ptr<FeatureDetector> surf_extractor = FeatureDetector::create("SURF");
{
CvMemStorage* storage = cvCreateMemStorage();
CvFileStorage* fs = cvOpenFileStorage(filename, storage, CV_STORAGE_WRITE);
-
+
SavePCADescriptors (fs);
-
+
cvReleaseMemStorage(&storage);
cvReleaseFileStorage(&fs);
}
-
+
void OneWayDescriptorBase::SavePCADescriptors(CvFileStorage *fs) const
{
cvWriteInt(fs, "pca_components_number", m_pca_dim_high);
m_descriptors[i].SetPCADimLow(m_pca_dim_low);
}
}
-
+
void OneWayDescriptorBase::InitializeDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
const char* feature_label, int desc_start_idx)
{
for(int i = 0; i < (int)features.size(); i++)
{
InitializeDescriptor(desc_start_idx + i, train_image, features[i], feature_label);
-
+
}
cvResetImageROI(train_image);
-
+
#if defined(_KDTREE)
ConvertDescriptorsArrayToTree();
#endif
}
-
+
void OneWayDescriptorBase::CreateDescriptorsFromImage(IplImage* src, const std::vector<KeyPoint>& features)
{
m_train_feature_count = (int)features.size();
-
+
m_descriptors = new OneWayDescriptor[m_train_feature_count];
-
+
InitializeDescriptors(src, features);
-
+
}
-
+
#if defined(_KDTREE)
void OneWayDescriptorBase::ConvertDescriptorsArrayToTree()
{
if (n<1)
return;
int pca_dim_low = this->GetDescriptor(0)->GetPCADimLow();
-
+
//if (!m_pca_descriptors_matrix)
- // m_pca_descriptors_matrix = new ::cvflann::Matrix<float>(n*m_pose_count,pca_dim_low);
+ // m_pca_descriptors_matrix = new ::cvflann::Matrix<float>(n*m_pose_count,pca_dim_low);
//else
//{
- // if ((m_pca_descriptors_matrix->cols != pca_dim_low)&&(m_pca_descriptors_matrix->rows != n*m_pose_count))
- // {
- // delete m_pca_descriptors_matrix;
- // m_pca_descriptors_matrix = new ::cvflann::Matrix<float>(n*m_pose_count,pca_dim_low);
- // }
+ // if ((m_pca_descriptors_matrix->cols != pca_dim_low)&&(m_pca_descriptors_matrix->rows != n*m_pose_count))
+ // {
+ // delete m_pca_descriptors_matrix;
+ // m_pca_descriptors_matrix = new ::cvflann::Matrix<float>(n*m_pose_count,pca_dim_low);
+ // }
//}
-
+
m_pca_descriptors_matrix = cvCreateMat(n*m_pose_count,pca_dim_low,CV_32FC1);
for (int i=0;i<n;i++)
{
}
}
cv::Mat pca_descriptors_mat(m_pca_descriptors_matrix,false);
-
+
//::cvflann::KDTreeIndexParams params;
//params.trees = 1;
//m_pca_descriptors_tree = new KDTree(pca_descriptors_mat);
//m_pca_descriptors_tree->buildIndex();
}
#endif
-
+
void OneWayDescriptorObject::Allocate(int train_feature_count, int object_feature_count)
{
OneWayDescriptorBase::Allocate(train_feature_count);
m_object_feature_count = object_feature_count;
-
+
m_part_id = new int[m_object_feature_count];
}
-
-
+
+
void OneWayDescriptorObject::InitializeObjectDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
const char* feature_label, int desc_start_idx, float scale, int is_background)
{
InitializeDescriptors(train_image, features, feature_label, desc_start_idx);
-
+
for(int i = 0; i < (int)features.size(); i++)
{
CvPoint center = features[i].pt;
-
+
if(!is_background)
{
// remember descriptor part id
}
cvResetImageROI(train_image);
}
-
+
int OneWayDescriptorObject::IsDescriptorObject(int desc_idx) const
{
return desc_idx < m_object_feature_count ? 1 : 0;
}
-
+
int OneWayDescriptorObject::MatchPointToPart(CvPoint pt) const
{
int idx = -1;
break;
}
}
-
+
return idx;
}
-
+
int OneWayDescriptorObject::GetDescriptorPart(int desc_idx) const
{
// return MatchPointToPart(GetDescriptor(desc_idx)->GetCenter());
return desc_idx < m_object_feature_count ? m_part_id[desc_idx] : -1;
}
-
+
OneWayDescriptorObject::OneWayDescriptorObject(CvSize patch_size, int pose_count, const char* train_path,
const char* pca_config, const char* pca_hr_config, const char* pca_desc_config, int pyr_levels) :
OneWayDescriptorBase(patch_size, pose_count, train_path, pca_config, pca_hr_config, pca_desc_config, pyr_levels)
{
m_part_id = 0;
}
-
+
OneWayDescriptorObject::OneWayDescriptorObject(CvSize patch_size, int pose_count, const string &pca_filename,
const string &train_path, const string &images_list, float _scale_min, float _scale_max, float _scale_step, int pyr_levels) :
OneWayDescriptorBase(patch_size, pose_count, pca_filename, train_path, images_list, _scale_min, _scale_max, _scale_step, pyr_levels)
if (m_part_id)
delete []m_part_id;
}
-
+
vector<KeyPoint> OneWayDescriptorObject::_GetLabeledFeatures() const
{
vector<KeyPoint> features;
{
features.push_back(m_train_features[i]);
}
-
+
return features;
}
-
+
void eigenvector2image(CvMat* eigenvector, IplImage* img)
{
CvRect roi = cvGetImageROI(img);
cvReleaseMat(&_eigenvectors);
}
}
-
+
/****************************************************************************************\
* OneWayDescriptorMatcher *
\****************************************************************************************/
-
+
OneWayDescriptorMatcher::Params::Params( int _poseCount, Size _patchSize, string _pcaFilename,
string _trainPath, string _trainImagesList,
float _minScale, float _maxScale, float _stepScale ) :
trainPath(_trainPath), trainImagesList(_trainImagesList),
minScale(_minScale), maxScale(_maxScale), stepScale(_stepScale)
{}
-
-
+
+
OneWayDescriptorMatcher::OneWayDescriptorMatcher( const Params& _params)
{
initialize(_params);
}
-
+
OneWayDescriptorMatcher::~OneWayDescriptorMatcher()
{}
-
+
void OneWayDescriptorMatcher::initialize( const Params& _params, const Ptr<OneWayDescriptorBase>& _base )
{
clear();
-
+
if( _base.empty() )
base = _base;
-
+
params = _params;
}
-
+
void OneWayDescriptorMatcher::clear()
{
GenericDescriptorMatcher::clear();
-
+
prevTrainCount = 0;
if( !base.empty() )
base->clear();
}
-
+
void OneWayDescriptorMatcher::train()
{
if( base.empty() || prevTrainCount < (int)trainPointCollection.keypointCount() )
{
base = new OneWayDescriptorObject( params.patchSize, params.poseCount, params.pcaFilename,
params.trainPath, params.trainImagesList, params.minScale, params.maxScale, params.stepScale );
-
+
base->Allocate( (int)trainPointCollection.keypointCount() );
prevTrainCount = (int)trainPointCollection.keypointCount();
-
+
const vector<vector<KeyPoint> >& points = trainPointCollection.getKeypoints();
int count = 0;
for( size_t i = 0; i < points.size(); i++ )
for( size_t j = 0; j < points[i].size(); j++ )
base->InitializeDescriptor( count++, &_image, points[i][j], "" );
}
-
+
#if defined(_KDTREE)
base->ConvertDescriptorsArrayToTree();
#endif
}
}
-
+
bool OneWayDescriptorMatcher::isMaskSupported()
{
return false;
}
-
+
void OneWayDescriptorMatcher::knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, int knn,
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
{
train();
-
+
CV_Assert( knn == 1 ); // knn > 1 unsupported because of bug in OneWayDescriptorBase for this case
-
+
matches.resize( queryKeypoints.size() );
IplImage _qimage = queryImage;
for( size_t i = 0; i < queryKeypoints.size(); i++ )
matches[i].push_back( DMatch((int)i, descIdx, distance) );
}
}
-
+
void OneWayDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, float maxDistance,
const vector<Mat>& /*masks*/, bool /*compactResult*/ )
{
train();
-
+
matches.resize( queryKeypoints.size() );
IplImage _qimage = queryImage;
for( size_t i = 0; i < queryKeypoints.size(); i++ )
matches[i].push_back( DMatch((int)i, descIdx, distance) );
}
}
-
+
void OneWayDescriptorMatcher::read( const FileNode &fn )
{
base = new OneWayDescriptorObject( params.patchSize, params.poseCount, string (), string (), string (),
params.minScale, params.maxScale, params.stepScale );
base->Read (fn);
}
-
+
void OneWayDescriptorMatcher::write( FileStorage& fs ) const
{
base->Write (fs);
}
-
+
bool OneWayDescriptorMatcher::empty() const
{
return base.empty() || base->empty();
}
-
+
Ptr<GenericDescriptorMatcher> OneWayDescriptorMatcher::clone( bool emptyTrainData ) const
{
OneWayDescriptorMatcher* matcher = new OneWayDescriptorMatcher( params );
-
+
if( !emptyTrainData )
{
CV_Error( CV_StsNotImplemented, "deep clone functionality is not implemented, because "
"OneWayDescriptorBase has not copy constructor or clone method ");
-
+
//matcher->base;
matcher->params = params;
matcher->prevTrainCount = prevTrainCount;
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4710 4711 4514 4996 )
-#endif
-
-#ifdef HAVE_CVCONFIG_H
+#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
int norphans = 0, maxOrphans = _maxOrphans;
GCVtx** orphans = _orphans;
stub.next = nilNode;
-
+
// initialize the active queue and the graph vertices
for( i = 0; i < nvtx; i++ )
{
GCVtx* v, *u;
int e0 = -1, ei = 0, ej = 0, min_weight, weight;
uchar vt;
-
+
// grow S & T search trees, find an edge connecting them
while( first != nilNode )
{
v->parent = ORPHAN;
}
}
-
+
v->weight = (short)(v->weight + min_weight*(1-k*2));
if( v->weight == 0 )
{
curr_ts++;
while( norphans > 0 )
{
- GCVtx* v = orphans[--norphans];
+ GCVtx* v1 = orphans[--norphans];
int d, min_dist = INT_MAX;
e0 = 0;
- vt = v->t;
+ vt = v1->t;
- for( ei = v->first; ei != 0; ei = edges[ei].next )
+ for( ei = v1->first; ei != 0; ei = edges[ei].next )
{
if( edges[ei^(vt^1)].weight == 0 )
continue;
}
}
- if( (v->parent = e0) > 0 )
+ if( (v1->parent = e0) > 0 )
{
- v->ts = curr_ts;
- v->dist = min_dist;
+ v1->ts = curr_ts;
+ v1->dist = min_dist;
continue;
}
/* no parent is found */
- v->ts = 0;
- for( ei = v->first; ei != 0; ei = edges[ei].next )
+ v1->ts = 0;
+ for( ei = v1->first; ei != 0; ei = edges[ei].next )
{
u = edges[ei].dst;
ej = u->parent;
u->next = nilNode;
last = last->next = u;
}
- if( ej > 0 && edges[ej].dst == v )
+ if( ej > 0 && edges[ej].dst == v1 )
{
if( norphans >= maxOrphans )
maxOrphans = icvGCResizeOrphansBuf( orphans, norphans );
void cvReleaseStereoGCState( CvStereoGCState** _state )
{
CvStereoGCState* state;
-
+
if( !_state && !*_state )
return;
CvMat* left3, CvMat* right3 )
{
int k, x, y, rows = left->rows, cols = left->cols;
-
+
for( k = 0; k < 2; k++ )
{
const CvMat* src = k == 0 ? left : right;
const uchar* sptr_next = y < rows-1 ? sptr + sstep : sptr;
uchar* dptr = dst->data.ptr + dst->step*y;
int v_prev = sptr[0];
-
+
for( x = 0; x < cols; x++, dptr += 3 )
{
int v = sptr[x], v1, minv = v, maxv = v;
-
+
v1 = (v + v_prev)/2;
minv = MIN(minv, v1); maxv = MAX(maxv, v1);
v1 = (v + sptr_prev[x])/2;
{
const uchar* lptr = state->left->data.ptr + state->left->step*y;
const uchar* rptr = state->right->data.ptr + state->right->step*y;
-
+
for( x = 0; x < cols; x++ )
{
for( d = maxd-1, i = 0; d >= mind; d-- )
GCVtx** pright = pright0 + pstep*y;
const uchar* lr[] = { left, right };
const short* dlr[] = { dleft, dright };
- GCVtx** plr[] = { pleft, pright };
+ GCVtx** plr[] = { pleft, pright };
for( k = 0; k < 2; k++ )
{
GCVtx** pright = pright0 + pstep*y;
for( x = 0; x < cols; x++ )
{
- GCVtx* var = pleft[x];
- if( var && var->parent && var->t )
- dleft[x] = (short)alpha;
+ GCVtx* var2 = pleft[x];
+ if( var2 && var2->parent && var2->t )
+ dleft[x] = (short)alpha;
- var = pright[x];
- if( var && var->parent && var->t )
+ var2 = pright[x];
+ if( var2 && var2->parent && var2->t )
dright[x] = (short)-alpha;
}
}
icvInitStereoConstTabs();
icvInitGraySubpix( left, right, state->left, state->right );
-
+
std::vector<int> disp(state->numberOfDisparities);
CvMat _disp = cvMat( 1, (int)disp.size(), CV_32S, &disp[0] );
cvRange( &_disp, state->minDisparity, state->minDisparity + state->numberOfDisparities );
float angle;
} CvTSTrans;
-void SET_TRANS_0(CvTSTrans *pT)
+static void SET_TRANS_0(CvTSTrans *pT)
{
memset(pT,0,sizeof(CvTSTrans));
pT->C = 1;
int y0=0, y1=pFG->height-1;
for(y0=0; y0<pFG->height; ++y0)
{
- CvMat m;
- CvScalar s = cvSum(cvGetRow(pFG, &m, y0));
+ CvMat tmp;
+ CvScalar s = cvSum(cvGetRow(pFG, &tmp, y0));
if(s.val[0] > 255*7) break;
}
for(y1=pFG->height-1; y1>0; --y1)
{
- CvMat m;
- CvScalar s = cvSum(cvGetRow(pFG, &m, y1));
+ CvMat tmp;
+ CvScalar s = cvSum(cvGetRow(pFG, &tmp, y1));
if(s.val[0] > 255*7) break;
}
p->FrameNum = cvReadIntByName( fs, node, "FrameNum", p->FrameNum );
p->FrameNum = cvReadIntByName( fs, node, "Dur", p->FrameNum );
{
- int LastFrame = cvReadIntByName( fs, node, "LastFrame", p->FrameBegin+p->FrameNum-1 );
- p->FrameNum = MIN(p->FrameNum,LastFrame - p->FrameBegin+1);
+ int lastFrame = cvReadIntByName( fs, node, "LastFrame", p->FrameBegin+p->FrameNum-1 );
+ p->FrameNum = MIN(p->FrameNum,lastFrame - p->FrameBegin+1);
}
icvTestSeqAllocTrans(p);
if(pTransSeq&&KeyFrameNum>1)
{
- int i0,i1,i;
- for(i=0; i<KeyFrameNum; ++i)
+ int i0,i1;
+ for(int i=0; i<KeyFrameNum; ++i)
{
CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,i);
KeyFrames[i] = cvReadIntByName(fs,pTN,"frame",-1);
for(i0=0, i1=1; i1<KeyFrameNum;)
{
- int i;
-
for(i1=i0+1; i1<KeyFrameNum && KeyFrames[i1]<0; i1++);
assert(i1<KeyFrameNum);
assert(i1>i0);
- for(i=i0+1; i<i1; ++i)
+ for(int i=i0+1; i<i1; ++i)
{
KeyFrames[i] = cvRound(KeyFrames[i0] + (float)(i-i0)*(float)(KeyFrames[i1] - KeyFrames[i0])/(float)(i1-i0));
}
{ /* Only one transform record: */
int i;
double val;
- CvFileNode* node = cvGetFileNodeByName( fs, pTN,name);
- if(node == NULL) continue;
- val = cvReadReal(node,defv);
+ CvFileNode* fnode = cvGetFileNodeByName( fs, pTN,name);
+ if(fnode == NULL) continue;
+ val = cvReadReal(fnode,defv);
for(i=0; i<p->TransNum; ++i)
{
double v0;
double v1;
- CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,0);
- v0 = cvReadRealByName(fs, pTN,name,defv);
+ CvFileNode* pTN1 = (CvFileNode*)cvGetSeqElem(pTransSeq,0);
+ v0 = cvReadRealByName(fs, pTN1,name,defv);
for(i1=1,i0=0; i1<KeyFrameNum; ++i1)
{
int f0,f1;
int i;
- CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,i1);
- CvFileNode* pVN = cvGetFileNodeByName(fs,pTN,name);
+ CvFileNode* pTN2 = (CvFileNode*)cvGetSeqElem(pTransSeq,i1);
+ CvFileNode* pVN = cvGetFileNodeByName(fs,pTN2,name);
if(pVN)v1 = cvReadReal(pVN,defv);
else if(pVN == NULL && i1 == KeyFrameNum-1) v1 = defv;
/*==========================================================================================*/
/* Functions for calculation the tensor */
/*==========================================================================================*/
+#if 0
#if 1
-void fprintMatrix(FILE* file,CvMat* matrix)
+static void fprintMatrix(FILE* file,CvMat* matrix)
{
int i,j;
fprintf(file,"\n");
#endif
/*==========================================================================================*/
-void icvNormalizePoints( CvMat* points, CvMat* normPoints,CvMat* cameraMatr )
+static void icvNormalizePoints( CvMat* points, CvMat* normPoints,CvMat* cameraMatr )
{
/* Normalize image points using camera matrix */
return;
}
-
+#endif
/*=====================================================================================*/
/*
}
/*==========================================================================================*/
-int icvGetRandNumbers(int range,int count,int* arr)
+static int icvGetRandNumbers(int range,int count,int* arr)
{
/* Generate random numbers [0,range-1] */
return 1;
}
/*==========================================================================================*/
-void icvSelectColsByNumbers(CvMat* srcMatr, CvMat* dstMatr, int* indexes,int number)
+static void icvSelectColsByNumbers(CvMat* srcMatr, CvMat* dstMatr, int* indexes,int number)
{
CV_FUNCNAME( "icvSelectColsByNumbers" );
}
/*==========================================================================================*/
-void icvProject4DPoints(CvMat* points4D,CvMat* projMatr, CvMat* projPoints)
+static void icvProject4DPoints(CvMat* points4D,CvMat* projMatr, CvMat* projPoints)
{
CvMat* tmpProjPoints = 0;
return;
}
/*==========================================================================================*/
-int icvCompute3ProjectMatricesNPointsStatus( CvMat** points,/* 3 arrays of points on image */
+#if 0
+static int icvCompute3ProjectMatricesNPointsStatus( CvMat** points,/* 3 arrays of points on image */
CvMat** projMatrs,/* array of 3 prejection matrices */
CvMat** statuses,/* 3 arrays of status of points */
double threshold,/* Threshold for good point */
return numProjMatrs;
}
+#endif
/*==========================================================================================*/
int icvComputeProjectMatricesNPoints( CvMat* points1,CvMat* points2,CvMat* points3,
projMatrs[1] = projMatr2;
projMatrs[2] = projMatr3;
- int i;
- for( i = 0; i < 3; i++ )
+ for(int i = 0; i < 3; i++ )
{
if( projMatrs[i]->cols != 4 || projMatrs[i]->rows != 3 )
{
}
}
- for( i = 0; i < 3; i++ )
+ for(int i = 0; i < 3; i++ )
{
if( points[i]->rows != 2)
{
icvProject4DPoints(recPoints4D,&proj6[2],tmpProjPoints[2]);
/* Compute distances and number of good points (inliers) */
- int i;
int currImage;
numGoodPoints = 0;
- for( i = 0; i < numPoints; i++ )
+ for(int i = 0; i < numPoints; i++ )
{
double dist=-1;
dist = 0;
CvMat *optStatus;
optStatus = cvCreateMat(1,numPoints,CV_64F);
int testNumber = 0;
- for( i=0;i<numPoints;i++ )
+ for(int i=0;i<numPoints;i++ )
{
cvmSet(optStatus,0,i,(double)bestFlags[i]);
testNumber += bestFlags[i];
CvMat *gPresPoints;
gPresPoints = cvCreateMat(1,maxGoodPoints,CV_64F);
- for( i = 0; i < maxGoodPoints; i++)
+ for(int i = 0; i < maxGoodPoints; i++)
{
cvmSet(gPresPoints,0,i,1.0);
}
int currImage;
finalGoodPoints = 0;
- for( i = 0; i < numPoints; i++ )
+ for(int i = 0; i < numPoints; i++ )
{
double dist=-1;
/* Choose max distance for each of three points */
/* Create status */
CvMat *optStatus;
optStatus = cvCreateMat(1,numPoints,CV_64F);
- for( i=0;i<numPoints;i++ )
+ for(int i=0;i<numPoints;i++ )
{
cvmSet(optStatus,0,i,(double)bestFlags[i]);
}
int currImage;
finalGoodPoints = 0;
- for( i = 0; i < numPoints; i++ )
+ for(int i = 0; i < numPoints; i++ )
{
double dist=-1;
/* Choose max distance for each of three points */
matrA_dat[7] = s;
matrA_dat[8] = -(p+q+r+s+t);
- CvMat matrU;
CvMat matrW;
CvMat matrV;
- double matrU_dat[3*3];
double matrW_dat[3*3];
double matrV_dat[3*3];
- matrU = cvMat(3,3,CV_64F,matrU_dat);
matrW = cvMat(3,3,CV_64F,matrW_dat);
matrV = cvMat(3,3,CV_64F,matrV_dat);
matrK_dat[4*6+5] = -B2;
matrK_dat[5*6+5] = -C2;
- CvMat matrU;
- CvMat matrW;
- CvMat matrV;
+ CvMat matrW1;
+ CvMat matrV1;
- double matrU_dat[36];
- double matrW_dat[36];
- double matrV_dat[36];
+ double matrW_dat1[36];
+ double matrV_dat1[36];
- matrU = cvMat(6,6,CV_64F,matrU_dat);
- matrW = cvMat(6,6,CV_64F,matrW_dat);
- matrV = cvMat(6,6,CV_64F,matrV_dat);
+ matrW1 = cvMat(6,6,CV_64F,matrW_dat1);
+ matrV1 = cvMat(6,6,CV_64F,matrV_dat1);
/* From svd we need just last vector of V or last row V' */
/* We get transposed matrixes U and V */
- cvSVD(&matrK,&matrW,0,&matrV,CV_SVD_V_T);
+ cvSVD(&matrK,&matrW1,0,&matrV1,CV_SVD_V_T);
- a = matrV_dat[6*5+0];
- b = matrV_dat[6*5+1];
- c = matrV_dat[6*5+2];
- d = matrV_dat[6*5+3];
+ a = matrV_dat1[6*5+0];
+ b = matrV_dat1[6*5+1];
+ c = matrV_dat1[6*5+2];
+ d = matrV_dat1[6*5+3];
/* we don't need last two coefficients. Because it just a k1,k2 */
cvmSet(projMatrCoefs,0,0,a);
#endif
/*==========================================================================================*/
-
-void icvComputeCameraExrinnsicByPosition(CvMat* camPos, CvMat* rotMatr, CvMat* transVect)
+#if 0
+static void icvComputeCameraExrinnsicByPosition(CvMat* camPos, CvMat* rotMatr, CvMat* transVect)
{
/* We know position of camera. we must to compute rotate matrix and translate vector */
/*==========================================================================================*/
-void FindTransformForProjectMatrices(CvMat* projMatr1,CvMat* projMatr2,CvMat* rotMatr,CvMat* transVect)
+static void FindTransformForProjectMatrices(CvMat* projMatr1,CvMat* projMatr2,CvMat* rotMatr,CvMat* transVect)
{
/* Computes homography for project matrix be "canonical" form */
CV_FUNCNAME( "computeProjMatrHomography" );
/* Part with metric reconstruction */
#if 1
-void icvComputeQ(int numMatr, CvMat** projMatr, CvMat** cameraMatr, CvMat* matrQ)
+static void icvComputeQ(int numMatr, CvMat** projMatr, CvMat** cameraMatr, CvMat* matrQ)
{
/* K*K' = P*Q*P' */
/* try to solve Q by linear method */
#endif
/*-----------------------------------------------------------------------------------------------------*/
-void icvDecomposeQ(CvMat* /*matrQ*/,CvMat* /*matrH*/)
+static void icvDecomposeQ(CvMat* /*matrQ*/,CvMat* /*matrH*/)
{
#if 0
/* Use SVD to decompose matrix Q=H*I*H' */
#endif
}
+#endif
+
#include "_vectrack.h"
#define NUM_FACE_ELEMENTS 3
-enum
+enum
{
MOUTH = 0,
LEYE = 1,
inline int GetEnergy(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoint* ptTempl, CvRect* rTempl);
inline int GetEnergy2(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoint* ptTempl, CvRect* rTempl, int* element);
inline double CalculateTransformationLMS3_0( CvPoint* pTemplPoints, CvPoint* pSrcPoints);
-inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
+inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
CvPoint* pSrcPoints,
double* pdbAverageScale,
double* pdbAverageRotate,
int Energy(const CvTrackingRect& prev)
{
int prev_color = 0 == prev.iColor ? iColor : prev.iColor;
- iEnergy = 1 * pow2(r.width - prev.r.width) +
- 1 * pow2(r.height - prev.r.height) +
- 1 * pow2(iColor - prev_color) / 4 +
- - 1 * nRectsInThis +
- - 0 * nRectsOnTop +
- + 0 * nRectsOnLeft +
- + 0 * nRectsOnRight +
+ iEnergy = 1 * pow2(r.width - prev.r.width) +
+ 1 * pow2(r.height - prev.r.height) +
+ 1 * pow2(iColor - prev_color) / 4 +
+ - 1 * nRectsInThis +
+ - 0 * nRectsOnTop +
+ + 0 * nRectsOnLeft +
+ + 0 * nRectsOnRight +
+ 0 * nRectsOnBottom;
return iEnergy;
}
double dbRotateDelta;
double dbRotateAngle;
CvPoint ptRotate;
-
+
CvPoint ptTempl[NUM_FACE_ELEMENTS];
CvRect rTempl[NUM_FACE_ELEMENTS];
-
+
IplImage* imgGray;
IplImage* imgThresh;
CvMemStorage* mstgContours;
if (NULL != mstgContours)
cvReleaseMemStorage(&mstgContours);
};
- int Init(CvRect* pRects, IplImage* imgGray)
+ int Init(CvRect* pRects, IplImage* imgray)
{
for (int i = 0; i < NUM_FACE_ELEMENTS; i++)
{
ptTempl[i] = face[i].ptCenter;
rTempl[i] = face[i].r;
}
- imgGray = cvCreateImage(cvSize(imgGray->width, imgGray->height), 8, 1);
- imgThresh = cvCreateImage(cvSize(imgGray->width, imgGray->height), 8, 1);
+ imgray = cvCreateImage(cvSize(imgray->width, imgray->height), 8, 1);
+ imgThresh = cvCreateImage(cvSize(imgray->width, imgray->height), 8, 1);
mstgContours = cvCreateMemStorage();
- if ((NULL == imgGray) ||
- (NULL == imgThresh) ||
+ if ((NULL == imgray) ||
+ (NULL == imgThresh) ||
(NULL == mstgContours))
return FALSE;
return TRUE;
ReallocImage(&imgThresh, sz, 1);
ptRotate = face[MOUTH].ptCenter;
float m[6];
- CvMat mat = cvMat( 2, 3, CV_32FC1, m );
+ CvMat mat = cvMat( 2, 3, CV_32FC1, m );
if (NULL == imgGray || NULL == imgThresh)
return FALSE;
-
+
/*m[0] = (float)cos(-dbRotateAngle*CV_PI/180.);
m[1] = (float)sin(-dbRotateAngle*CV_PI/180.);
m[2] = (float)ptRotate.x;
m[5] = (float)ptRotate.y;*/
cv2DRotationMatrix( cvPointTo32f(ptRotate), -dbRotateAngle, 1., &mat );
cvWarpAffine( img, imgGray, &mat );
-
+
if (NULL == mstgContours)
mstgContours = cvCreateMemStorage();
else
void Energy();
}; //class CvFaceElement
-int CV_CDECL CompareEnergy(const void* el1, const void* el2, void*)
+inline int CV_CDECL CompareEnergy(const void* el1, const void* el2, void*)
{
return ((CvTrackingRect*)el1)->iEnergy - ((CvTrackingRect*)el2)->iEnergy;
}// int CV_CDECL CompareEnergy(const void* el1, const void* el2, void*)
}
for (CvSeq* internal = external->v_next; internal; internal = internal->h_next)
{
- cr.r = cvContourBoundingRect(internal);
+ cr.r = cvContourBoundingRect(internal);
Move(cr.r, roi.x, roi.y);
if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize && cr.r.height > dMinSize)
{
for (j = i + 1; j < nRects; j++)
{
CvTrackingRect* pRect2 = (CvTrackingRect*)(reader2.ptr);
- if (abs(pRect1->ptCenter.y - pRect2->ptCenter.y) < d &&
+ if (abs(pRect1->ptCenter.y - pRect2->ptCenter.y) < d &&
abs(pRect1->r.height - pRect2->r.height) < d)
{
CvTrackingRect rNew;
(NULL == pRects) ||
(nRects < NUM_FACE_ELEMENTS))
return NULL;
-
+
//int new_face = FALSE;
CvFaceTracker* pFace = pFaceTracker;
if (NULL == pFace)
pFaceTracker->InitNextImage(imgGray);
*ptRotate = pFaceTracker->ptRotate;
*dbAngleRotate = pFaceTracker->dbRotateAngle;
-
+
int nElements = 16;
double dx = pFaceTracker->face[LEYE].ptCenter.x - pFaceTracker->face[REYE].ptCenter.x;
double dy = pFaceTracker->face[LEYE].ptCenter.y - pFaceTracker->face[REYE].ptCenter.y;
int d = cvRound(0.25 * d_eyes);
int dMinSize = d;
int nRestarts = 0;
-
+
int elem;
-
+
CvFaceElement big_face[NUM_FACE_ELEMENTS];
START:
// init
}
if (2 == elements)
find2 = TRUE;
- else
+ else
restart = TRUE;
}
}
pFaceTracker->iTrackingFaceType = noel;
found = TRUE;
}
- else
+ else
{
restart = TRUE;
goto RESTART;
}
}
-
+
if (found)
{
// angle by mouth & eyes
{
assert(imgGray != NULL);
assert(imgGray->nChannels == 1);
- int i, j;
+ int i, j;
// create histogram
int histImg[256] = {0};
uchar* buffImg = (uchar*)imgGray->imageData;
double prev_d02 = sqrt((double)prev_v02.x*prev_v02.x + prev_v02.y*prev_v02.y);
double new_d01 = sqrt((double)new_v01.x*new_v01.x + new_v01.y*new_v01.y);
double scale = templ_d01 / new_d01;
- double new_d02 = templ_d02 / scale;
+ double new_d02 = templ_d02 / scale;
double sin_a = double(prev_v01.x * prev_v02.y - prev_v01.y * prev_v02.x) / (prev_d01 * prev_d02);
double cos_a = cos(asin(sin_a));
double x = double(new_v01.x) * cos_a - double(new_v01.y) * sin_a;
double h_mouth = double(ppNew[MOUTH]->r.height) * scale;
energy +=
int(512.0 * (e_prev + 16.0 * e_templ)) +
- 4 * pow2(ppNew[LEYE]->r.width - ppNew[REYE]->r.width) +
- 4 * pow2(ppNew[LEYE]->r.height - ppNew[REYE]->r.height) +
- 4 * (int)pow(w_eye - double(rTempl[LEYE].width + rTempl[REYE].width) / 2.0, 2) +
- 2 * (int)pow(h_eye - double(rTempl[LEYE].height + rTempl[REYE].height) / 2.0, 2) +
- 1 * (int)pow(w_mouth - double(rTempl[MOUTH].width), 2) +
- 1 * (int)pow(h_mouth - double(rTempl[MOUTH].height), 2) +
+ 4 * pow2(ppNew[LEYE]->r.width - ppNew[REYE]->r.width) +
+ 4 * pow2(ppNew[LEYE]->r.height - ppNew[REYE]->r.height) +
+ 4 * (int)pow(w_eye - double(rTempl[LEYE].width + rTempl[REYE].width) / 2.0, 2) +
+ 2 * (int)pow(h_eye - double(rTempl[LEYE].height + rTempl[REYE].height) / 2.0, 2) +
+ 1 * (int)pow(w_mouth - double(rTempl[MOUTH].width), 2) +
+ 1 * (int)pow(h_mouth - double(rTempl[MOUTH].height), 2) +
0;
return energy;
}
double h0 = (double)ppNew[element[0]]->r.height * scale_templ;
double w1 = (double)ppNew[element[1]]->r.width * scale_templ;
double h1 = (double)ppNew[element[1]]->r.height * scale_templ;
-
+
int energy = ppNew[element[0]]->iEnergy + ppNew[element[1]]->iEnergy +
- - 2 * (ppNew[element[0]]->nRectsInThis - ppNew[element[1]]->nRectsInThis) +
+ - 2 * (ppNew[element[0]]->nRectsInThis - ppNew[element[1]]->nRectsInThis) +
(int)pow(w0 - (double)rTempl[element[0]].width, 2) +
(int)pow(h0 - (double)rTempl[element[0]].height, 2) +
(int)pow(w1 - (double)rTempl[element[1]].width, 2) +
(int)pow(h1 - (double)rTempl[element[1]].height, 2) +
(int)pow(new_d - prev_d, 2) +
0;
-
+
return energy;
}
-inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
+inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
CvPoint* pSrcPoints,
double* pdbAverageScale,
double* pdbAverageRotate,
double dbYt = double(pTemplPoints[0].y + pTemplPoints[1].y + pTemplPoints[2].y ) / 3.0;
double dbXs = double(pSrcPoints[0].x + pSrcPoints[1].x + pSrcPoints[2].x) / 3.0;
double dbYs = double(pSrcPoints[0].y + pSrcPoints[1].y + pSrcPoints[2].y) / 3.0;
-
+
double dbXtXt = double(pow2(pTemplPoints[0].x) + pow2(pTemplPoints[1].x) + pow2(pTemplPoints[2].x)) / 3.0;
double dbYtYt = double(pow2(pTemplPoints[0].y) + pow2(pTemplPoints[1].y) + pow2(pTemplPoints[2].y)) / 3.0;
-
+
double dbXsXs = double(pow2(pSrcPoints[0].x) + pow2(pSrcPoints[1].x) + pow2(pSrcPoints[2].x)) / 3.0;
double dbYsYs = double(pow2(pSrcPoints[0].y) + pow2(pSrcPoints[1].y) + pow2(pSrcPoints[2].y)) / 3.0;
-
- double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
- pTemplPoints[1].x * pSrcPoints[1].x +
+
+ double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
+ pTemplPoints[1].x * pSrcPoints[1].x +
pTemplPoints[2].x * pSrcPoints[2].x) / 3.0;
- double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
- pTemplPoints[1].y * pSrcPoints[1].y +
+ double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
+ pTemplPoints[1].y * pSrcPoints[1].y +
pTemplPoints[2].y * pSrcPoints[2].y) / 3.0;
-
- double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
- pTemplPoints[1].x * pSrcPoints[1].y +
+
+ double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
+ pTemplPoints[1].x * pSrcPoints[1].y +
pTemplPoints[2].x * pSrcPoints[2].y) / 3.0;
- double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
- pTemplPoints[1].y * pSrcPoints[1].x +
+ double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
+ pTemplPoints[1].y * pSrcPoints[1].x +
pTemplPoints[2].y * pSrcPoints[2].x ) / 3.0;
-
+
dbXtXt -= dbXt * dbXt;
dbYtYt -= dbYt * dbYt;
-
+
dbXsXs -= dbXs * dbXs;
dbYsYs -= dbYs * dbYs;
-
+
dbXtXs -= dbXt * dbXs;
dbYtYs -= dbYt * dbYs;
-
+
dbXtYs -= dbXt * dbYs;
dbYtXs -= dbYt * dbXs;
-
+
dbAverageRotate = atan2( dbXtYs - dbYtXs, dbXtXs + dbYtYs );
-
+
double cosR = cos(dbAverageRotate);
double sinR = sin(dbAverageRotate);
double del = dbXsXs + dbYsYs;
dbAverageScale = (double(dbXtXs + dbYtYs) * cosR + double(dbXtYs - dbYtXs) * sinR) / del;
dbLMS = dbXtXt + dbYtYt - ((double)pow(dbXtXs + dbYtYs,2) + (double)pow(dbXtYs - dbYtXs,2)) / del;
}
-
+
dbAverageShiftX = double(dbXt) - dbAverageScale * (double(dbXs) * cosR + double(dbYs) * sinR);
dbAverageShiftY = double(dbYt) - dbAverageScale * (double(dbYs) * cosR - double(dbXs) * sinR);
-
+
if( pdbAverageScale != NULL ) *pdbAverageScale = dbAverageScale;
if( pdbAverageRotate != NULL ) *pdbAverageRotate = dbAverageRotate;
if( pdbAverageShiftX != NULL ) *pdbAverageShiftX = dbAverageShiftX;
if( pdbAverageShiftY != NULL ) *pdbAverageShiftY = dbAverageShiftY;
-
+
assert(dbLMS >= 0);
return dbLMS;
}
double dbYt = double(pTemplPoints[0].y + pTemplPoints[1].y + pTemplPoints[2].y ) / 3.0;
double dbXs = double(pSrcPoints[0].x + pSrcPoints[1].x + pSrcPoints[2].x) / 3.0;
double dbYs = double(pSrcPoints[0].y + pSrcPoints[1].y + pSrcPoints[2].y) / 3.0;
-
+
double dbXtXt = double(pow2(pTemplPoints[0].x) + pow2(pTemplPoints[1].x) + pow2(pTemplPoints[2].x)) / 3.0;
double dbYtYt = double(pow2(pTemplPoints[0].y) + pow2(pTemplPoints[1].y) + pow2(pTemplPoints[2].y)) / 3.0;
-
+
double dbXsXs = double(pow2(pSrcPoints[0].x) + pow2(pSrcPoints[1].x) + pow2(pSrcPoints[2].x)) / 3.0;
double dbYsYs = double(pow2(pSrcPoints[0].y) + pow2(pSrcPoints[1].y) + pow2(pSrcPoints[2].y)) / 3.0;
-
- double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
- pTemplPoints[1].x * pSrcPoints[1].x +
+
+ double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
+ pTemplPoints[1].x * pSrcPoints[1].x +
pTemplPoints[2].x * pSrcPoints[2].x) / 3.0;
- double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
- pTemplPoints[1].y * pSrcPoints[1].y +
+ double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
+ pTemplPoints[1].y * pSrcPoints[1].y +
pTemplPoints[2].y * pSrcPoints[2].y) / 3.0;
-
- double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
- pTemplPoints[1].x * pSrcPoints[1].y +
+
+ double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
+ pTemplPoints[1].x * pSrcPoints[1].y +
pTemplPoints[2].x * pSrcPoints[2].y) / 3.0;
- double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
- pTemplPoints[1].y * pSrcPoints[1].x +
+ double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
+ pTemplPoints[1].y * pSrcPoints[1].x +
pTemplPoints[2].y * pSrcPoints[2].x ) / 3.0;
-
+
dbXtXt -= dbXt * dbXt;
dbYtYt -= dbYt * dbYt;
-
+
dbXsXs -= dbXs * dbXs;
dbYsYs -= dbYs * dbYs;
-
+
dbXtXs -= dbXt * dbXs;
dbYtYs -= dbYt * dbYs;
-
+
dbXtYs -= dbXt * dbYs;
dbYtXs -= dbYt * dbXs;
-
+
double del = dbXsXs + dbYsYs;
if( del != 0 )
dbLMS = dbXtXt + dbYtYt - ((double)pow(dbXtXs + dbYtYs,2) + (double)pow(dbXtYs - dbYtXs,2)) / del;
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
assert(fn.isSeq());
for( int i = 0; i < (int)fn.size(); i+=3 )
{
- string name = fn[i];
+ string nm = fn[i];
DatasetParams params;
string sf = fn[i+1]; params.dispScaleFactor = atoi(sf.c_str());
string uv = fn[i+2]; params.dispUnknVal = atoi(uv.c_str());
- datasetsParams[name] = params;
+ datasetsParams[nm] = params;
}
return cvtest::TS::OK;
}
min_val = max_val = step = 0;
}
- CvParamGrid( double min_val, double max_val, double log_step )
- {
- this->min_val = min_val;
- this->max_val = max_val;
- step = log_step;
- }
+ CvParamGrid( double min_val, double max_val, double log_step );
//CvParamGrid( int param_id );
bool check() const;
CV_PROP_RW double step;
};
+inline CvParamGrid::CvParamGrid( double _min_val, double _max_val, double _log_step )
+{
+ min_val = _min_val;
+ max_val = _max_val;
+ step = _log_step;
+}
+
class CV_EXPORTS_W CvNormalBayesClassifier : public CvStatModel
{
public:
CvNormalBayesClassifier( const CvMat* trainData, const CvMat* responses,
const CvMat* varIdx=0, const CvMat* sampleIdx=0 );
-
+
virtual bool train( const CvMat* trainData, const CvMat* responses,
const CvMat* varIdx = 0, const CvMat* sampleIdx=0, bool update=false );
-
+
virtual float predict( const CvMat* samples, CV_OUT CvMat* results=0 ) const;
CV_WRAP virtual void clear();
bool update=false );
CV_WRAP virtual float predict( const cv::Mat& samples, CV_OUT cv::Mat* results=0 ) const;
#endif
-
+
virtual void write( CvFileStorage* storage, const char* name ) const;
virtual void read( CvFileStorage* storage, CvFileNode* node );
virtual bool train( const CvMat* trainData, const CvMat* responses,
const CvMat* sampleIdx=0, bool is_regression=false,
int maxK=32, bool updateBase=false );
-
+
virtual float find_nearest( const CvMat* samples, int k, CV_OUT CvMat* results=0,
const float** neighbors=0, CV_OUT CvMat* neighborResponses=0, CV_OUT CvMat* dist=0 ) const;
-
+
#ifndef SWIG
CV_WRAP CvKNearest( const cv::Mat& trainData, const cv::Mat& responses,
const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false, int max_k=32 );
-
+
CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses,
const cv::Mat& sampleIdx=cv::Mat(), bool isRegression=false,
- int maxK=32, bool updateBase=false );
-
+ int maxK=32, bool updateBase=false );
+
virtual float find_nearest( const cv::Mat& samples, int k, cv::Mat* results=0,
const float** neighbors=0, cv::Mat* neighborResponses=0,
cv::Mat* dist=0 ) const;
CV_WRAP virtual float find_nearest( const cv::Mat& samples, int k, CV_OUT cv::Mat& results,
CV_OUT cv::Mat& neighborResponses, CV_OUT cv::Mat& dists) const;
#endif
-
+
virtual void clear();
int get_max_k() const;
int get_var_count() const;
int get_sample_count() const;
bool is_regression() const;
-
+
virtual float write_results( int k, int k1, int start, int end,
const float* neighbor_responses, const float* dist, CvMat* _results,
CvMat* _neighbor_responses, CvMat* _dist, Cv32suf* sort_buf ) const;
virtual bool train( const CvMat* trainData, const CvMat* responses,
const CvMat* varIdx=0, const CvMat* sampleIdx=0,
CvSVMParams params=CvSVMParams() );
-
+
virtual bool train_auto( const CvMat* trainData, const CvMat* responses,
const CvMat* varIdx, const CvMat* sampleIdx, CvSVMParams params,
int kfold = 10,
virtual float predict( const CvMat* sample, bool returnDFVal=false ) const;
virtual float predict( const CvMat* samples, CvMat* results ) const;
-
+
#ifndef SWIG
CV_WRAP CvSVM( const cv::Mat& trainData, const cv::Mat& responses,
const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(),
CvSVMParams params=CvSVMParams() );
-
+
CV_WRAP virtual bool train( const cv::Mat& trainData, const cv::Mat& responses,
const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(),
CvSVMParams params=CvSVMParams() );
-
+
CV_WRAP virtual bool train_auto( const cv::Mat& trainData, const cv::Mat& responses,
const cv::Mat& varIdx, const cv::Mat& sampleIdx, CvSVMParams params,
int k_fold = 10,
bool balanced=false);
CV_WRAP virtual float predict( const cv::Mat& sample, bool returnDFVal=false ) const;
#endif
-
+
CV_WRAP virtual int get_support_vector_count() const;
virtual const float* get_support_vector(int i) const;
virtual CvSVMParams get_params() const { return params; };
// Default parameters
enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};
-
+
// The initial step
enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, FLT_EPSILON));
-
+
virtual ~EM();
CV_WRAP virtual void clear();
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
-
+
CV_WRAP virtual bool trainE(InputArray samples,
InputArray means0,
InputArray covs0=noArray(),
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
-
+
CV_WRAP virtual bool trainM(InputArray samples,
InputArray probs0,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
-
+
CV_WRAP Vec2d predict(InputArray sample,
OutputArray probs=noArray()) const;
virtual void read(const FileNode& fn);
protected:
-
+
virtual void setTrainData(int startStep, const Mat& samples,
const Mat* probs0,
const Mat* means0,
int buf_count, buf_size;
bool shared;
int is_buf_16u;
-
+
CvMat* cat_count;
CvMat* cat_ofs;
CvMat* cat_map;
const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
const cv::Mat& missingDataMask=cv::Mat(),
CvDTreeParams params=CvDTreeParams() );
-
+
CV_WRAP virtual CvDTreeNode* predict( const cv::Mat& sample, const cv::Mat& missingDataMask=cv::Mat(),
bool preprocessedInput=false ) const;
CV_WRAP virtual cv::Mat getVarImportance();
#endif
-
+
virtual const CvMat* get_var_importance();
CV_WRAP virtual void clear();
virtual void try_split_node( CvDTreeNode* n );
virtual void split_node_data( CvDTreeNode* n );
virtual CvDTreeSplit* find_best_split( CvDTreeNode* n );
- virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi,
+ virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
- virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi,
+ virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
- virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi,
+ virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 );
virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 );
const CvMat* sampleIdx=0, const CvMat* varType=0,
const CvMat* missingDataMask=0,
CvRTParams params=CvRTParams() );
-
+
virtual bool train( CvMLData* data, CvRTParams params=CvRTParams() );
virtual float predict( const CvMat* sample, const CvMat* missing = 0 ) const;
virtual float predict_prob( const CvMat* sample, const CvMat* missing = 0 ) const;
CV_WRAP virtual float predict_prob( const cv::Mat& sample, const cv::Mat& missing = cv::Mat() ) const;
CV_WRAP virtual cv::Mat getVarImportance();
#endif
-
+
CV_WRAP virtual void clear();
virtual const CvMat* get_var_importance();
virtual float get_proximity( const CvMat* sample1, const CvMat* sample2,
const CvMat* missing1 = 0, const CvMat* missing2 = 0 ) const;
-
+
virtual float calc_error( CvMLData* data, int type , std::vector<float>* resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}
- virtual float get_train_error();
+ virtual float get_train_error();
virtual void read( CvFileStorage* fs, CvFileNode* node );
virtual void write( CvFileStorage* fs, const char* name ) const;
{
protected:
virtual double calc_node_dir( CvDTreeNode* node );
- virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi,
+ virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
- virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi,
+ virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
- virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi,
+ virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
virtual void split_node_data( CvDTreeNode* n );
};
virtual void try_split_node( CvDTreeNode* n );
virtual CvDTreeSplit* find_surrogate_split_ord( CvDTreeNode* n, int vi, uchar* ext_buf = 0 );
virtual CvDTreeSplit* find_surrogate_split_cat( CvDTreeNode* n, int vi, uchar* ext_buf = 0 );
- virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi,
+ virtual CvDTreeSplit* find_split_ord_class( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
virtual CvDTreeSplit* find_split_cat_class( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
- virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi,
+ virtual CvDTreeSplit* find_split_ord_reg( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
- virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi,
+ virtual CvDTreeSplit* find_split_cat_reg( CvDTreeNode* n, int vi,
float init_quality = 0, CvDTreeSplit* _split = 0, uchar* ext_buf = 0 );
virtual void calc_node_value( CvDTreeNode* n );
virtual double calc_node_dir( CvDTreeNode* n );
const CvMat* sampleIdx=0, const CvMat* varType=0,
const CvMat* missingDataMask=0,
CvBoostParams params=CvBoostParams() );
-
+
virtual bool train( const CvMat* trainData, int tflag,
const CvMat* responses, const CvMat* varIdx=0,
const CvMat* sampleIdx=0, const CvMat* varType=0,
const CvMat* missingDataMask=0,
CvBoostParams params=CvBoostParams(),
bool update=false );
-
+
virtual bool train( CvMLData* data,
CvBoostParams params=CvBoostParams(),
bool update=false );
const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
const cv::Mat& missingDataMask=cv::Mat(),
CvBoostParams params=CvBoostParams() );
-
+
CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag,
const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
const cv::Mat& missingDataMask=cv::Mat(),
CvBoostParams params=CvBoostParams(),
bool update=false );
-
+
CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(),
const cv::Range& slice=cv::Range::all(), bool rawMode=false,
bool returnSum=false ) const;
#endif
-
+
virtual float calc_error( CvMLData* _data, int type , std::vector<float> *resp = 0 ); // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}
CV_WRAP virtual void prune( CvSlice slice );
// DataType: CLASS CvGBTrees
// Gradient Boosting Trees (GBT) algorithm implementation.
-//
+//
// data - training dataset
// params - parameters of the CvGBTrees
// weak - array[0..(class_count-1)] of CvSeq
// missing - mask of the missing values in the training set. This
// matrix has the same size as train_data. 1 - missing
// value, 0 - not a missing value.
-// class_labels - output class labels map.
+// class_labels - output class labels map.
// rng - random number generator. Used for spliting the
// training set.
// class_count - count of output classes.
/*
// DataType: ENUM
// Loss functions implemented in CvGBTrees.
- //
+ //
// SQUARED_LOSS
// problem: regression
// loss = (x - x')^2
- //
+ //
// ABSOLUTE_LOSS
// problem: regression
// loss = abs(x - x')
- //
+ //
// HUBER_LOSS
// problem: regression
// loss = delta*( abs(x - x') - delta/2), if abs(x - x') > delta
//
// DEVIANCE_LOSS
// problem: classification
- //
- */
+ //
+ */
enum {SQUARED_LOSS=0, ABSOLUTE_LOSS, HUBER_LOSS=3, DEVIANCE_LOSS};
-
-
+
+
/*
// Default constructor. Creates a model only (without training).
// Should be followed by one form of the train(...) function.
//
// API
// CvGBTrees();
-
+
// INPUT
// OUTPUT
// RESULT
const CvMat* sampleIdx=0, const CvMat* varType=0,
const CvMat* missingDataMask=0,
CvGBTreesParams params=CvGBTreesParams() );
-
+
// INPUT
// trainData - a set of input feature vectors.
// size of matrix is
const CvMat* missingDataMask=0,
CvGBTreesParams params=CvGBTreesParams() );
-
+
/*
// Destructor.
*/
virtual ~CvGBTrees();
-
-
+
+
/*
// Gradient tree boosting model training
//
const CvMat* missingDataMask=0,
CvGBTreesParams params=CvGBTreesParams(),
bool update=false );
-
+
// INPUT
// trainData - a set of input feature vectors.
// size of matrix is
const CvMat* missingDataMask=0,
CvGBTreesParams params=CvGBTreesParams(),
bool update=false );
-
-
+
+
/*
// Gradient tree boosting model training
//
// virtual bool train( CvMLData* data,
CvGBTreesParams params=CvGBTreesParams(),
bool update=false ) {return false;};
-
+
// INPUT
// data - training set.
// params - parameters of GTB algorithm.
CvGBTreesParams params=CvGBTreesParams(),
bool update=false );
-
+
/*
// Response value prediction
//
// virtual float predict_serial( const CvMat* sample, const CvMat* missing=0,
CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ,
int k=-1 ) const;
-
+
// INPUT
// sample - input sample of the same type as in the training set.
// missing - missing values mask. missing=0 if there are no
// slice = CV_WHOLE_SEQ when all trees are used.
// k - number of ensemble used.
// k is in {-1,0,1,..,<count of output classes-1>}.
- // in the case of classification problem
+ // in the case of classification problem
// <count of output classes-1> ensembles are built.
// If k = -1 ordinary prediction is the result,
// otherwise function gives the prediction of the
virtual float predict_serial( const CvMat* sample, const CvMat* missing=0,
CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ,
int k=-1 ) const;
-
+
/*
// Response value prediction.
// Parallel version (in the case of TBB existence)
// virtual float predict( const CvMat* sample, const CvMat* missing=0,
CvMat* weak_responses=0, CvSlice slice = CV_WHOLE_SEQ,
int k=-1 ) const;
-
+
// INPUT
// sample - input sample of the same type as in the training set.
// missing - missing values mask. missing=0 if there are no
// slice = CV_WHOLE_SEQ when all trees are used.
// k - number of ensemble used.
// k is in {-1,0,1,..,<count of output classes-1>}.
- // in the case of classification problem
+ // in the case of classification problem
// <count of output classes-1> ensembles are built.
// If k = -1 ordinary prediction is the result,
// otherwise function gives the prediction of the
// OUTPUT
// RESULT
// Predicted value.
- */
+ */
virtual float predict( const CvMat* sample, const CvMat* missing=0,
CvMat* weakResponses=0, CvSlice slice = CV_WHOLE_SEQ,
int k=-1 ) const;
//
// API
// virtual void clear();
-
+
// INPUT
// OUTPUT
// delete data, weak, orig_response, sum_response,
std::vector<float> *resp = 0 );
/*
- //
+ //
// Write parameters of the gtb model and data. Write learned model.
//
// API
/*
- //
+ //
// Read parameters of the gtb model and data. Read learned model.
//
// API
*/
virtual void read( CvFileStorage* fs, CvFileNode* node );
-
+
// new-style C++ interface
CV_WRAP CvGBTrees( const cv::Mat& trainData, int tflag,
const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
const cv::Mat& missingDataMask=cv::Mat(),
CvGBTreesParams params=CvGBTreesParams() );
-
+
CV_WRAP virtual bool train( const cv::Mat& trainData, int tflag,
const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(),
const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(),
CV_WRAP virtual float predict( const cv::Mat& sample, const cv::Mat& missing=cv::Mat(),
const cv::Range& slice = cv::Range::all(),
int k=-1 ) const;
-
+
protected:
/*
//
// API
// virtual void find_gradient( const int k = 0);
-
+
// INPUT
// k - used for classification problem, determining current
// tree ensemble.
*/
virtual void find_gradient( const int k = 0);
-
+
/*
- //
+ //
// Change values in tree leaves according to the used loss function.
//
// API
/*
- //
+ //
// Find optimal constant prediction value according to the used loss
// function.
// The goal is to find a constant which gives the minimal summary loss
*/
virtual float find_optimal_value( const CvMat* _Idx );
-
+
/*
- //
+ //
// Randomly split the whole training set in two parts according
// to params.portion.
//
/*
- //
+ //
// Internal recursive function giving an array of subtree tree leaves.
//
// API
// RESULT
*/
void leaves_get( CvDTreeNode** leaves, int& count, CvDTreeNode* node );
-
-
+
+
/*
- //
+ //
// Get leaves of the tree.
//
// API
*/
CvDTreeNode** GetLeaves( const CvDTree* dtree, int& len );
-
+
/*
- //
+ //
// Is it a regression or a classification.
//
// API
/*
- //
+ //
// Write parameters of the gtb model.
//
// API
/*
- //
+ //
// Read parameters of the gtb model and data.
//
// API
// RESULT
*/
virtual void read_params( CvFileStorage* fs, CvFileNode* fnode );
- int get_len(const CvMat* mat) const;
+ int get_len(const CvMat* mat) const;
+
-
CvDTreeTrainData* data;
CvGBTreesParams params;
virtual void create( const CvMat* layerSizes,
int activateFunc=CvANN_MLP::SIGMOID_SYM,
double fparam1=0, double fparam2=0 );
-
+
virtual int train( const CvMat* inputs, const CvMat* outputs,
const CvMat* sampleWeights, const CvMat* sampleIdx=0,
CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(),
int flags=0 );
virtual float predict( const CvMat* inputs, CV_OUT CvMat* outputs ) const;
-
+
#ifndef SWIG
CV_WRAP CvANN_MLP( const cv::Mat& layerSizes,
int activateFunc=CvANN_MLP::SIGMOID_SYM,
double fparam1=0, double fparam2=0 );
-
+
CV_WRAP virtual void create( const cv::Mat& layerSizes,
int activateFunc=CvANN_MLP::SIGMOID_SYM,
- double fparam1=0, double fparam2=0 );
-
+ double fparam1=0, double fparam2=0 );
+
CV_WRAP virtual int train( const cv::Mat& inputs, const cv::Mat& outputs,
const cv::Mat& sampleWeights, const cv::Mat& sampleIdx=cv::Mat(),
CvANN_MLP_TrainParams params = CvANN_MLP_TrainParams(),
- int flags=0 );
-
+ int flags=0 );
+
CV_WRAP virtual float predict( const cv::Mat& inputs, CV_OUT cv::Mat& outputs ) const;
#endif
-
+
CV_WRAP virtual void clear();
// possible activation functions
virtual ~CvMLData();
// returns:
- // 0 - OK
+ // 0 - OK
// -1 - file can not be opened or is not correct
int read_csv( const char* filename );
const CvMat* get_train_sample_idx() const;
const CvMat* get_test_sample_idx() const;
void mix_train_and_test_idx();
-
+
const CvMat* get_var_idx();
void chahge_var_idx( int vi, bool state ); // misspelled (saved for back compitability),
// use change_var_idx
void set_var_types( const char* str ); // str examples:
// "ord[0-17],cat[18]", "ord[0,2,4,10-12], cat[1,3,5-9,13,14]",
// "cat", "ord" (all vars are categorical/ordered)
- void change_var_type( int var_idx, int type); // type in { CV_VAR_ORDERED, CV_VAR_CATEGORICAL }
-
+ void change_var_type( int var_idx, int type); // type in { CV_VAR_ORDERED, CV_VAR_CATEGORICAL }
+
void set_delimiter( char ch );
char get_delimiter() const;
void set_miss_ch( char ch );
char get_miss_ch() const;
-
+
const std::map<std::string, int>& get_class_labels_map() const;
protected:
void str_to_flt_elem( const char* token, float& flt_elem, int& type);
void free_train_test_idx();
-
+
char delimiter;
char miss_ch;
//char flt_separator;
int train_sample_count;
bool mix;
-
+
int total_class_count;
std::map<std::string, int> class_map;
namespace cv
{
-
+
typedef CvStatModel StatModel;
typedef CvParamGrid ParamGrid;
typedef CvNormalBayesClassifier NormalBayesClassifier;
template<> CV_EXPORTS void Ptr<CvDTreeSplit>::delete_obj();
CV_EXPORTS bool initModule_ml(void);
-
+
}
#endif // __cplusplus
n *= cols;
xf -= n; df -= n;
-
+
for( i = 0; i < n; i++ )
df[i] *= xf[i];
}
xf[j] = (xf[j] + bias[j])*scale;
df[j] = -fabs(xf[j]);
}
-
+
cvExp( _df, _df );
n *= cols;
}
struct rprop_loop {
- rprop_loop(const CvANN_MLP* _point, double**& _weights, int& _count, int& _ivcount, CvVectors* _x0,
+ rprop_loop(const CvANN_MLP* _point, double**& _weights, int& _count, int& _ivcount, CvVectors* _x0,
int& _l_count, CvMat*& _layer_sizes, int& _ovcount, int& _max_count,
- CvVectors* _u, const double*& _sw, double& _inv_count, CvMat*& _dEdw, int& _dcount0, double* _E, int _buf_sz)
+ CvVectors* _u, const double*& _sw, double& _inv_count, CvMat*& _dEdw, int& _dcount0, double* _E, int _buf_sz)
{
point = _point;
weights = _weights;
E = _E;
buf_sz = _buf_sz;
}
-
+
const CvANN_MLP* point;
double** weights;
int count;
double* E;
int buf_sz;
-
+
void operator()( const cv::BlockedRange& range ) const
{
double* buf_ptr;
double** x = 0;
- double **df = 0;
+ double **df = 0;
int total = 0;
-
+
for(int i = 0; i < l_count; i++ )
total += layer_sizes->data.i[i];
CvMat* buf;
for(int si = range.begin(); si < range.end(); si++ )
{
if (si % dcount0 != 0) continue;
- int n1, n2, j, k;
+ int n1, n2, k;
double* w;
CvMat _w, _dEdw, hdr1, hdr2, ghdr1, ghdr2, _df;
CvMat *x1, *x2, *grad1, *grad2, *temp;
// grab and preprocess input data
if( x0->type == CV_32F )
- {
+ {
for(int i = 0; i < dcount; i++ )
{
const float* x0data = x0->data.fl[si+i];
double* xdata = x[0]+i*ivcount;
- for( j = 0; j < ivcount; j++ )
+ for(int j = 0; j < ivcount; j++ )
xdata[j] = x0data[j]*w[j*2] + w[j*2+1];
}
- }
+ }
else
for(int i = 0; i < dcount; i++ )
{
const double* x0data = x0->data.db[si+i];
double* xdata = x[0]+i*ivcount;
- for( j = 0; j < ivcount; j++ )
+ for(int j = 0; j < ivcount; j++ )
xdata[j] = x0data[j]*w[j*2] + w[j*2+1];
- }
+ }
cvInitMatHeader( x1, dcount, ivcount, CV_64F, x[0] );
// forward pass, compute y[i]=w*x[i-1], x[i]=f(y[i]), df[i]=f'(y[i])
double* gdata = grad1->data.db + i*ovcount;
double sweight = sw ? sw[si+i] : inv_count, E1 = 0;
- for( j = 0; j < ovcount; j++ )
+ for(int j = 0; j < ovcount; j++ )
{
double t = udata[j]*w[j*2] + w[j*2+1] - xdata[j];
gdata[j] = t*sweight;
}
*E += sweight*E1;
}
-
+
// backward pass, update dEdw
#ifdef HAVE_TBB
static tbb::spin_mutex mutex;
{
double* dst = _dEdw.data.db + n1*n2;
const double* src = grad1->data.db + k*n2;
- for( j = 0; j < n2; j++ )
+ for(int j = 0; j < n2; j++ )
dst[j] += src[j];
}
-
+
if (i > 1)
cvInitMatHeader( &_w, n1, n2, CV_64F, weights[i] );
#ifdef HAVE_TBB
int CvANN_MLP::train_rprop( CvVectors x0, CvVectors u, const double* sw )
{
- const int max_buf_sz = 1 << 16;
+ const int max_buf_size = 1 << 16;
CvMat* dw = 0;
CvMat* dEdw = 0;
CvMat* prev_dEdw_sign = 0;
cvZero( prev_dEdw_sign );
inv_count = 1./count;
- dcount0 = max_buf_sz/(2*total);
+ dcount0 = max_buf_size/(2*total);
dcount0 = MAX( dcount0, 1 );
dcount0 = MIN( dcount0, count );
buf_sz = dcount0*(total + max_count)*2;
double E = 0;
// first, iterate through all the samples and compute dEdw
- cv::parallel_for(cv::BlockedRange(0, count),
- rprop_loop(this, weights, count, ivcount, &x0, l_count, layer_sizes,
+ cv::parallel_for(cv::BlockedRange(0, count),
+ rprop_loop(this, weights, count, ivcount, &x0, l_count, layer_sizes,
ovcount, max_count, &u, sw, inv_count, dEdw, dcount0, &E, buf_sz)
);
void CvANN_MLP::create( const Mat& _layer_sizes, int _activ_func,
double _f_param1, double _f_param2 )
{
- CvMat layer_sizes = _layer_sizes;
- create( &layer_sizes, _activ_func, _f_param1, _f_param2 );
+ CvMat cvlayer_sizes = _layer_sizes;
+ create( &cvlayer_sizes, _activ_func, _f_param1, _f_param2 );
}
int CvANN_MLP::train( const Mat& _inputs, const Mat& _outputs,
{
CvMat inputs = _inputs, outputs = _outputs, sweights = _sample_weights, sidx = _sample_idx;
return train(&inputs, &outputs, sweights.data.ptr ? &sweights : 0,
- sidx.data.ptr ? &sidx : 0, _params, flags);
+ sidx.data.ptr ? &sidx : 0, _params, flags);
}
float CvANN_MLP::predict( const Mat& _inputs, Mat& _outputs ) const
CV_Assert(layer_sizes != 0);
_outputs.create(_inputs.rows, layer_sizes->data.i[layer_sizes->cols-1], _inputs.type());
CvMat inputs = _inputs, outputs = _outputs;
-
- return predict(&inputs, &outputs);
+
+ return predict(&inputs, &outputs);
}
/* End of file. */
void
-CvBoostTree::scale( double scale )
+CvBoostTree::scale( double _scale )
{
CvDTreeNode* node = root;
CvDTreeNode* parent;
for(;;)
{
- node->value *= scale;
+ node->value *= _scale;
if( !node->left )
break;
node = node->left;
int i, best_i = -1;
double L = 0, R = weights[n];
double best_val = init_quality, lsum = 0, rsum = node->value*R;
-
+
// compensate for missing values
for( i = n1; i < n; i++ )
{
{
R += counts[i];
rsum += sum[i];
- sum[i] = fabs(counts[i]) > DBL_EPSILON ? sum[i]/counts[i] : 0;
+ sum[i] = fabs(counts[i]) > DBL_EPSILON ? sum[i]/counts[i] : 0;
sum_ptr[i] = sum + i;
}
__BEGIN__;
int i;
-
+
set_params( _params );
cvReleaseMat( &active_vars );
if ( (_params.boost_type == LOGIT) || (_params.boost_type == GENTLE) )
data->do_responses_copy();
-
+
update_weights( 0 );
for( i = 0; i < params.weak_count; i++ )
}
bool CvBoost::train( CvMLData* _data,
- CvBoostParams params,
+ CvBoostParams _params,
bool update )
{
bool result = false;
const CvMat* var_idx = _data->get_var_idx();
CV_CALL( result = train( values, CV_ROW_SAMPLE, response, var_idx,
- train_sidx, var_types, missing, params, update ) );
+ train_sidx, var_types, missing, _params, update ) );
__END__;
// invert the subsample mask
cvXorS( subsample_mask, cvScalar(1.), subsample_mask );
data->get_vectors( subsample_mask, values, missing, 0 );
-
+
_sample = cvMat( 1, data->var_count, CV_32F );
_mask = cvMat( 1, data->var_count, CV_8U );
}
-const CvMat*
+const CvMat*
CvBoost::get_active_vars( bool absolute_idx )
{
CvMat* mask = 0;
CvMat* inv_map = 0;
CvMat* result = 0;
-
+
CV_FUNCNAME( "CvBoost::get_active_vars" );
__BEGIN__;
-
+
if( !weak )
CV_ERROR( CV_StsError, "The boosted tree ensemble has not been trained yet" );
int i, j, nactive_vars;
CvBoostTree* wtree;
const CvDTreeNode* node;
-
+
assert(!active_vars && !active_vars_abs);
mask = cvCreateMat( 1, data->var_count, CV_8U );
inv_map = cvCreateMat( 1, data->var_count, CV_32S );
}
nactive_vars = cvCountNonZero(mask);
-
+
//if ( nactive_vars > 0 )
{
active_vars = cvCreateMat( 1, nactive_vars, CV_32S );
j++;
}
}
-
+
// second pass: now compute the condensed indices
cvStartReadSeq( weak, &reader );
"floating-point vector of the same number of components as the length of input slice" );
wstep = CV_IS_MAT_CONT(weak_responses->type) ? 1 : weak_responses->step/sizeof(float);
}
-
+
int var_count = active_vars->cols;
const int* vtype = data->var_type->data.i;
const int* cmap = data->cat_map->data.i;
CvBoostTree* wtree;
const CvDTreeNode* node;
CV_READ_SEQ_ELEM( wtree, reader );
-
+
node = wtree->get_root();
while( node->left )
{
{
const int* avars = active_vars->data.i;
const uchar* m = _missing ? _missing->data.ptr : 0;
-
+
// full-featured version
for( i = 0; i < weak_count; i++ )
{
CvBoostTree* wtree;
const CvDTreeNode* node;
CV_READ_SEQ_ELEM( wtree, reader );
-
+
node = wtree->get_root();
while( node->left )
{
{
CvMat sample, miss;
int si = sidx ? sidx[i] : i;
- cvGetRow( values, &sample, si );
- if( missing )
- cvGetRow( missing, &miss, si );
+ cvGetRow( values, &sample, si );
+ if( missing )
+ cvGetRow( missing, &miss, si );
float r = (float)predict( &sample, missing ? &miss : 0 );
if( pred_resp )
pred_resp[i] = r;
CvMat sample, miss;
int si = sidx ? sidx[i] : i;
cvGetRow( values, &sample, si );
- if( missing )
- cvGetRow( missing, &miss, si );
+ if( missing )
+ cvGetRow( missing, &miss, si );
float r = (float)predict( &sample, missing ? &miss : 0 );
if( pred_resp )
pred_resp[i] = r;
float d = r - response->data.fl[si*r_step];
err += d*d;
}
- err = sample_count ? err / (float)sample_count : -FLT_MAX;
+ err = sample_count ? err / (float)sample_count : -FLT_MAX;
}
return err;
}
default_model_name = "my_boost_tree";
active_vars = active_vars_abs = orig_response = sum_response = weak_eval =
subsample_mask = weights = subtree_weights = 0;
-
+
train( _train_data, _tflag, _responses, _var_idx, _sample_idx,
_var_type, _missing_mask, _params );
-}
+}
bool
weak_count = weak->total;
slice.start_index = 0;
}
-
+
if( !(weak_responses->data && weak_responses->type() == CV_32FC1 &&
(weak_responses->cols == 1 || weak_responses->rows == 1) &&
weak_responses->cols + weak_responses->rows - 1 == weak_count) )
CV_FUNCNAME( "CvERTreeTrainData::set_data" );
__BEGIN__;
-
+
int sample_all = 0, r_type, cv_n;
int total_c_count = 0;
int tree_block_size, temp_block_size, max_split_size, nv_size, cv_size = 0;
int vi, i, size;
char err[100];
const int *sidx = 0, *vidx = 0;
-
+
if ( _params.use_surrogates )
CV_ERROR(CV_StsBadArg, "CvERTrees do not support surrogate splits");
-
+
if( _update_data && data_root )
{
CV_ERROR(CV_StsBadArg, "CvERTrees do not support data update");
CV_ERROR( CV_StsBadArg, "The array of _responses must be an integer or "
"floating-point vector containing as many elements as "
"the total number of samples in the training data matrix" );
-
+
is_buf_16u = false;
- if ( sample_count < 65536 )
- is_buf_16u = true;
-
+ if ( sample_count < 65536 )
+ is_buf_16u = true;
+
r_type = CV_VAR_CATEGORICAL;
if( _var_type )
CV_CALL( var_type0 = cvPreprocessVarType( _var_type, var_idx, var_count, &r_type ));
CV_CALL( var_type = cvCreateMat( 1, var_count+2, CV_32SC1 ));
-
+
cat_var_count = 0;
ord_var_count = -1;
buf_size = (work_var_count + 1)*sample_count;
shared = _shared;
buf_count = shared ? 2 : 1;
-
+
if ( is_buf_16u )
{
CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_16UC1 ));
{
CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_32SC1 ));
CV_CALL( int_ptr = (int**)cvAlloc( sample_count*sizeof(int_ptr[0]) ));
- }
+ }
size = is_classifier ? cat_var_count+1 : cat_var_count;
size = !size ? 1 : size;
CV_CALL( cat_count = cvCreateMat( 1, size, CV_32SC1 ));
CV_CALL( cat_ofs = cvCreateMat( 1, size, CV_32SC1 ));
-
+
size = is_classifier ? (cat_var_count + 1)*params.max_categories : cat_var_count*params.max_categories;
size = !size ? 1 : size;
CV_CALL( cat_map = cvCreateMat( 1, size, CV_32SC1 ));
{
int c_count, prev_label;
int* c_map;
-
+
if (is_buf_16u)
udst = (unsigned short*)(buf->data.s + ci*sample_count);
else
idst = buf->data.i + ci*sample_count;
-
+
// copy data
for( i = 0; i < sample_count; i++ )
{
_idst[i] = val;
pair16u32s_ptr[i].u = udst + i;
pair16u32s_ptr[i].i = _idst + i;
- }
+ }
else
{
idst[i] = val;
// replace labels for missing values with -1
for( ; i < sample_count; i++ )
*int_ptr[i] = -1;
- }
+ }
}
else if( ci < 0 ) // process ordered variable
{
if( cv_n )
{
- unsigned short* udst = 0;
- int* idst = 0;
+ unsigned short* usdst = 0;
+ int* idst2 = 0;
if (is_buf_16u)
{
- udst = (unsigned short*)(buf->data.s + (get_work_var_count()-1)*sample_count);
+ usdst = (unsigned short*)(buf->data.s + (get_work_var_count()-1)*sample_count);
for( i = vi = 0; i < sample_count; i++ )
{
- udst[i] = (unsigned short)vi++;
+ usdst[i] = (unsigned short)vi++;
vi &= vi < cv_n ? -1 : 0;
}
int a = (*rng)(sample_count);
int b = (*rng)(sample_count);
unsigned short unsh = (unsigned short)vi;
- CV_SWAP( udst[a], udst[b], unsh );
+ CV_SWAP( usdst[a], usdst[b], unsh );
}
}
else
{
- idst = buf->data.i + (get_work_var_count()-1)*sample_count;
+ idst2 = buf->data.i + (get_work_var_count()-1)*sample_count;
for( i = vi = 0; i < sample_count; i++ )
{
- idst[i] = vi++;
+ idst2[i] = vi++;
vi &= vi < cv_n ? -1 : 0;
}
{
int a = (*rng)(sample_count);
int b = (*rng)(sample_count);
- CV_SWAP( idst[a], idst[b], vi );
+ CV_SWAP( idst2[a], idst2[b], vi );
}
}
}
- if ( cat_map )
+ if ( cat_map )
cat_map->cols = MAX( total_c_count, 1 );
max_split_size = cvAlign(sizeof(CvDTreeSplit) +
const float** ord_values, const int** missing, int* sample_indices_buf )
{
int vidx = var_idx ? var_idx->data.i[vi] : vi;
- int node_sample_count = n->sample_count;
+ int node_sample_count = n->sample_count;
// may use missing_buf as buffer for sample indices!
const int* sample_indices = get_sample_indices(n, sample_indices_buf ? sample_indices_buf : missing_buf);
if( !is_buf_16u )
cat_values = buf->data.i + n->buf_idx*buf->cols + ci*sample_count + n->offset;
else {
- const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
+ const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
ci*sample_count + n->offset);
for( int i = 0; i < n->sample_count; i++ )
cat_values_buf[i] = short_values[i];
void CvERTreeTrainData::get_vectors( const CvMat* _subsample_idx,
float* values, uchar* missing,
- float* responses, bool get_class_idx )
+ float* _responses, bool get_class_idx )
{
CvMat* subsample_idx = 0;
CvMat* subsample_co = 0;
}
// copy responses
- if( responses )
+ if( _responses )
{
if( is_classifier )
{
int idx = sidx ? sidx[i] : i;
int val = get_class_idx ? src[idx] :
cat_map->data.i[cat_ofs->data.i[cat_var_count]+src[idx]];
- responses[i] = (float)val;
+ _responses[i] = (float)val;
}
}
- else
+ else
{
float* _values_buf = (float*)(uchar*)inn_buf;
int* sample_idx_buf = (int*)(_values_buf + sample_count);
for( i = 0; i < count; i++ )
{
int idx = sidx ? sidx[i] : i;
- responses[i] = _values[idx];
+ _responses[i] = _values[idx];
}
}
}
CvDTreeNode* CvERTreeTrainData::subsample_data( const CvMat* _subsample_idx )
{
CvDTreeNode* root = 0;
-
+
CV_FUNCNAME( "CvERTreeTrainData::subsample_data" );
__BEGIN__;
const float epsilon = FLT_EPSILON*2;
const float split_delta = (1 + FLT_EPSILON) * FLT_EPSILON;
- int n = node->sample_count, i;
+ int n = node->sample_count;
int m = data->get_num_classes();
cv::AutoBuffer<uchar> inn_buf;
for (; smpi < n; smpi++)
{
float ptemp = values[smpi];
- int m = missing[smpi];
- if (m) continue;
+ int ms = missing[smpi];
+ if (ms) continue;
if ( ptemp < pmin)
pmin = ptemp;
if ( ptemp > pmax)
if (split_val - pmin <= FLT_EPSILON)
split_val = pmin + split_delta;
if (pmax - split_val <= FLT_EPSILON)
- split_val = pmax - split_delta;
+ split_val = pmax - split_delta;
// calculate Gini index
if ( !priors )
cv::AutoBuffer<int> lrc(m*2);
int *lc = lrc, *rc = lc + m;
int L = 0, R = 0;
-
+
// init arrays of class instance counters on both sides of the split
- for( i = 0; i < m; i++ )
+ for(int i = 0; i < m; i++ )
{
lc[i] = 0;
rc[i] = 0;
{
int r = responses[si];
float val = values[si];
- int m = missing[si];
- if (m) continue;
+ int ms = missing[si];
+ if (ms) continue;
if ( val < split_val )
{
lc[r]++;
cv::AutoBuffer<double> lrc(m*2);
double *lc = lrc, *rc = lc + m;
double L = 0, R = 0;
-
+
// init arrays of class instance counters on both sides of the split
- for( i = 0; i < m; i++ )
+ for(int i = 0; i < m; i++ )
{
lc[i] = 0;
rc[i] = 0;
{
int r = responses[si];
float val = values[si];
- int m = missing[si];
+ int ms = missing[si];
double p = priors[r];
- if (m) continue;
+ if (ms) continue;
if ( val < split_val )
{
lc[r] += p;
}
best_val = (lbest_val*R + rbest_val*L) / (L*R);
}
-
+
}
CvDTreeSplit* split = 0;
{
int ci = data->get_var_type(vi);
int n = node->sample_count;
- int cm = data->get_num_classes();
+ int cm = data->get_num_classes();
int vm = data->cat_count->data.i[ci];
double best_val = init_quality;
CvDTreeSplit *split = 0;
const int* labels = data->get_cat_var_data( node, vi, ext_buf );
const int* responses = data->get_class_labels( node, ext_buf + n );
-
- const double* priors = data->have_priors ? data->priors_mult->data.db : 0;
+
+ const double* priors = data->have_priors ? data->priors_mult->data.db : 0;
// create random class mask
cv::AutoBuffer<int> valid_cidx(vm);
if (var_class_mask->data.ptr[mask_class_idx])
{
lc[r]++;
- L++;
+ L++;
split->subset[var_class_idx >> 5] |= 1 << (var_class_idx & 31);
}
else
{
lbest_val += lc[i]*lc[i];
rbest_val += rc[i]*rc[i];
- }
+ }
best_val = (lbest_val*R + rbest_val*L) / ((double)(L*R));
}
else
continue;
double p = priors[si];
int mask_class_idx = valid_cidx[var_class_idx];
-
+
if (var_class_mask->data.ptr[mask_class_idx])
{
lc[r]+=(int)p;
- L+=p;
+ L+=p;
split->subset[var_class_idx >> 5] |= 1 << (var_class_idx & 31);
}
else
split->quality = (float)best_val;
cvReleaseMat(&var_class_mask);
- }
- }
+ }
+ }
return split;
}
if (split_val - pmin <= FLT_EPSILON)
split_val = pmin + split_delta;
if (pmax - split_val <= FLT_EPSILON)
- split_val = pmax - split_delta;
+ split_val = pmax - split_delta;
for (int si = 0; si < n; si++)
{
else
{
rsum += r;
- R++;
+ R++;
}
}
best_val = (lsum*lsum*R + rsum*rsum*L)/((double)L*R);
if (var_class_mask->data.ptr[mask_class_idx])
{
lsum += r;
- L++;
+ L++;
split->subset[var_class_idx >> 5] |= 1 << (var_class_idx & 31);
}
else
split->quality = (float)best_val;
cvReleaseMat(&var_class_mask);
- }
- }
+ }
+ }
return split;
}
{
int ci = data->get_var_type(vi);
if (ci >= 0) continue;
-
+
int n1 = node->get_num_valid(vi), nr1 = 0;
float* values_buf = (float*)(uchar*)inn_buf;
int* missing_buf = (int*)(values_buf + n);
for( i = 0; i < n; i++ )
nr1 += ((!missing[i]) & dir[i]);
left->set_num_valid(vi, n1 - nr1);
- right->set_num_valid(vi, nr1);
+ right->set_num_valid(vi, nr1);
}
// split categorical vars, responses and cv_labels using new_idx relocation table
for( vi = 0; vi < data->get_work_var_count() + data->ord_var_count; vi++ )
if (data->is_buf_16u)
{
- unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*buf->cols +
+ unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*buf->cols +
ci*scount + left->offset);
- unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*buf->cols +
+ unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*buf->cols +
ci*scount + right->offset);
-
+
for( i = 0; i < n; i++ )
{
int d = dir[i];
}
else
{
- int *ldst = buf->data.i + left->buf_idx*buf->cols +
+ int *ldst = buf->data.i + left->buf_idx*buf->cols +
ci*scount + left->offset;
- int *rdst = buf->data.i + right->buf_idx*buf->cols +
+ int *rdst = buf->data.i + right->buf_idx*buf->cols +
ci*scount + right->offset;
-
+
for( i = 0; i < n; i++ )
{
int d = dir[i];
*ldst = idx;
ldst++;
}
-
+
}
if( vi < data->var_count )
left->set_num_valid(vi, n1 - nr1);
right->set_num_valid(vi, nr1);
}
- }
+ }
}
// split sample indices
temp_buf[i] = sample_idx_src[i];
int pos = data->get_work_var_count();
-
+
if (data->is_buf_16u)
{
- unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
+ unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
pos*scount + left->offset);
- unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
+ unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
pos*scount + right->offset);
-
+
for (i = 0; i < n; i++)
{
int d = dir[i];
}
else
{
- int* ldst = buf->data.i + left->buf_idx*buf->cols +
+ int* ldst = buf->data.i + left->buf_idx*buf->cols +
pos*scount + left->offset;
- int* rdst = buf->data.i + right->buf_idx*buf->cols +
+ int* rdst = buf->data.i + right->buf_idx*buf->cols +
pos*scount + right->offset;
for (i = 0; i < n; i++)
{
}
}
}
-
+
// deallocate the parent node data that is not needed anymore
- data->free_node_data(node);
+ data->free_node_data(node);
}
CvERTrees::CvERTrees()
__END__
return result;
-
+
}
-bool CvERTrees::train( CvMLData* data, CvRTParams params)
+bool CvERTrees::train( CvMLData* _data, CvRTParams params)
{
bool result = false;
__BEGIN__;
- CV_CALL( result = CvRTrees::train( data, params) );
+ CV_CALL( result = CvRTrees::train( _data, params) );
__END__;
const int dims = data->var_count;
float maximal_response = 0;
- CvMat* oob_sample_votes = 0;
+ CvMat* oob_sample_votes = 0;
CvMat* oob_responses = 0;
float* oob_samples_perm_ptr= 0;
// initialize these variable to avoid warning C4701
CvMat oob_predictions_sum = cvMat( 1, 1, CV_32FC1 );
CvMat oob_num_of_predictions = cvMat( 1, 1, CV_32FC1 );
-
+
nsamples = data->sample_count;
nclasses = data->get_num_classes();
cvGetRow( oob_responses, &oob_predictions_sum, 0 );
cvGetRow( oob_responses, &oob_num_of_predictions, 1 );
}
-
+
CV_CALL(oob_samples_perm_ptr = (float*)cvAlloc( sizeof(float)*nsamples*dims ));
CV_CALL(samples_ptr = (float*)cvAlloc( sizeof(float)*nsamples*dims ));
CV_CALL(missing_ptr = (uchar*)cvAlloc( sizeof(uchar)*nsamples*dims ));
- CV_CALL(true_resp_ptr = (float*)cvAlloc( sizeof(float)*nsamples ));
+ CV_CALL(true_resp_ptr = (float*)cvAlloc( sizeof(float)*nsamples ));
CV_CALL(data->get_vectors( 0, samples_ptr, missing_ptr, true_resp_ptr ));
{
maximal_response = (float)MAX( MAX( fabs(minval), fabs(maxval) ), 0 );
}
}
-
+
trees = (CvForestTree**)cvAlloc( sizeof(trees[0])*max_ntrees );
memset( trees, 0, sizeof(trees[0])*max_ntrees );
sample.data.fl += dims, missing.data.ptr += dims )
{
CvDTreeNode* predicted_node = 0;
-
+
// predict oob samples
if( !predicted_node )
CV_CALL(predicted_node = tree->predict(&sample, &missing, true));
}
result = true;
-
+
cvFree( &oob_samples_perm_ptr );
cvFree( &samples_ptr );
cvFree( &missing_ptr );
cvFree( &true_resp_ptr );
-
+
cvReleaseMat( &sample_idx_for_tree );
cvReleaseMat( &oob_sample_votes );
static CV_IMPLEMENT_QSORT_EX( icvSortFloat, float, CV_CMP_FLOAT, float)\r
\r
//===========================================================================\r
-string ToString(int i)\r
+static string ToString(int i)\r
{\r
stringstream tmp;\r
tmp << i;\r
//----------------------------- CvGBTreesParams -----------------------------\r
//===========================================================================\r
\r
-CvGBTreesParams::CvGBTreesParams() \r
+CvGBTreesParams::CvGBTreesParams()\r
: CvDTreeParams( 3, 10, 0, false, 10, 0, false, false, 0 )\r
{\r
weak_count = 200;\r
\r
//===========================================================================\r
\r
-CvGBTreesParams::CvGBTreesParams( int _loss_function_type, int _weak_count, \r
- float _shrinkage, float _subsample_portion, \r
+CvGBTreesParams::CvGBTreesParams( int _loss_function_type, int _weak_count,\r
+ float _shrinkage, float _subsample_portion,\r
int _max_depth, bool _use_surrogates )\r
: CvDTreeParams( 3, 10, 0, false, 10, 0, false, false, 0 )\r
{\r
class_labels = 0;\r
class_count = 1;\r
delta = 0.0f;\r
- \r
+\r
clear();\r
}\r
\r
//data->shared = false;\r
for (int i=0; i<class_count; ++i)\r
{\r
- int weak_count = cvSliceLength( slice, weak[i] );\r
+ int weak_count = cvSliceLength( slice, weak[i] );\r
if ((weak[i]) && (weak_count))\r
{\r
- cvStartReadSeq( weak[i], &reader ); \r
+ cvStartReadSeq( weak[i], &reader );\r
cvSetSeqReaderPos( &reader, slice.start_index );\r
for (int j=0; j<weak_count; ++j)\r
{\r
if (weak[i]) cvReleaseMemStorage( &(weak[i]->storage) );\r
delete[] weak;\r
}\r
- if (data) \r
+ if (data)\r
{\r
data->shared = false;\r
delete data;\r
\r
//===========================================================================\r
\r
-bool \r
-CvGBTrees::train( CvMLData* data, CvGBTreesParams params, bool update )\r
+bool\r
+CvGBTrees::train( CvMLData* _data, CvGBTreesParams _params, bool update )\r
{\r
bool result;\r
- result = train ( data->get_values(), CV_ROW_SAMPLE,\r
- data->get_responses(), data->get_var_idx(),\r
- data->get_train_sample_idx(), data->get_var_types(),\r
- data->get_missing(), params, update);\r
+ result = train ( _data->get_values(), CV_ROW_SAMPLE,\r
+ _data->get_responses(), _data->get_var_idx(),\r
+ _data->get_train_sample_idx(), _data->get_var_types(),\r
+ _data->get_missing(), _params, update);\r
//update is not supported\r
return result;\r
}\r
}\r
\r
orig_response = cvCreateMat( 1, n, CV_32F );\r
- int step = (_responses->cols > _responses->rows) ? 1 : _responses->step / CV_ELEM_SIZE(_responses->type);\r
+ int step = (_responses->cols > _responses->rows) ? 1 : _responses->step / CV_ELEM_SIZE(_responses->type);\r
switch (CV_MAT_TYPE(_responses->type))\r
{\r
case CV_32FC1:\r
- {\r
- for (int i=0; i<n; ++i)\r
+ {\r
+ for (int i=0; i<n; ++i)\r
orig_response->data.fl[i] = _responses->data.fl[i*step];\r
- }; break;\r
+ }; break;\r
case CV_32SC1:\r
{\r
for (int i=0; i<n; ++i)\r
mask[j] = 1;\r
}\r
delete[] mask;\r
- \r
+\r
class_labels = cvCreateMat(1, class_count, CV_32S);\r
class_labels->data.i[0] = int(orig_response->data.fl[0]);\r
int j = 1;\r
if (_sample_idx)\r
{\r
int sample_idx_len = get_len(_sample_idx);\r
- \r
+\r
switch (CV_MAT_TYPE(_sample_idx->type))\r
{\r
case CV_32SC1:\r
{\r
sample_idx = cvCreateMat( 1, sample_idx_len, CV_32S );\r
for (int i=0; i<sample_idx_len; ++i)\r
- sample_idx->data.i[i] = _sample_idx->data.i[i];\r
+ sample_idx->data.i[i] = _sample_idx->data.i[i];\r
} break;\r
case CV_8S:\r
case CV_8U:\r
for (int i=0; i<sample_idx_len; ++i)\r
if (int( _sample_idx->data.ptr[i] ))\r
sample_idx->data.i[active_samples_count++] = i;\r
- \r
+\r
} break;\r
default: CV_Error(CV_StsUnmatchedFormats, "_sample_idx should be a 32sC1, 8sC1 or 8uC1 vector.");\r
}\r
storage = cvCreateMemStorage();\r
weak[i] = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvDTree*), storage );\r
storage = 0;\r
- } \r
+ }\r
\r
// subsample params and data\r
rng = &cv::theRNG();\r
\r
- int samples_count = get_len(sample_idx);\r
+ int samples_count = get_len(sample_idx);\r
\r
- params.subsample_portion = params.subsample_portion <= FLT_EPSILON || \r
+ params.subsample_portion = params.subsample_portion <= FLT_EPSILON ||\r
1 - params.subsample_portion <= FLT_EPSILON\r
? 1 : params.subsample_portion;\r
int train_sample_count = cvFloor(params.subsample_portion * samples_count);\r
*subsample_test = cvMat( 1, test_sample_count, CV_32SC1,\r
idx_data + train_sample_count );\r
}\r
- \r
+\r
// training procedure\r
\r
for ( int i=0; i < params.weak_count; ++i )\r
{\r
- do_subsample();\r
+ do_subsample();\r
for ( int k=0; k < class_count; ++k )\r
{\r
find_gradient(k);\r
cvGetRow( data->train_data, &x, idx);\r
else\r
cvGetCol( data->train_data, &x, idx);\r
- \r
+\r
if (missing)\r
{\r
if (_tflag == CV_ROW_SAMPLE)\r
cvGetRow( missing, &x_miss, idx);\r
else\r
cvGetCol( missing, &x_miss, idx);\r
- \r
+\r
res = (float)tree->predict(&x, &x_miss)->value;\r
}\r
else\r
{\r
res = (float)tree->predict(&x)->value;\r
}\r
- sum_response_tmp->data.fl[idx + k*n] = \r
+ sum_response_tmp->data.fl[idx + k*n] =\r
sum_response->data.fl[idx + k*n] +\r
params.shrinkage * res;\r
}\r
cvReleaseMat(&new_responses);\r
data->free_train_data();\r
\r
- return true;\r
+ return true;\r
\r
} // CvGBTrees::train(...)\r
\r
//===========================================================================\r
\r
-float Sign(float x)\r
+inline float Sign(float x)\r
{\r
if (x<0.0f) return -1.0f;\r
else if (x>0.0f) return 1.0f;\r
residuals[i] = fabs(resp_data[idx] - current_data[idx]);\r
}\r
icvSortFloat(residuals, n, 0.0f);\r
- \r
+\r
delta = residuals[int(ceil(n*alpha))];\r
\r
for (int i=0; i<n; ++i)\r
int s_step = (sample_idx->cols > sample_idx->rows) ? 1\r
: sample_idx->step/CV_ELEM_SIZE(sample_idx->type);\r
int idx = *(sample_data + subsample_data[i]*s_step);\r
- \r
+\r
for (int j=0; j<class_count; ++j)\r
{\r
double res;\r
exp_sfi += res;\r
}\r
int orig_label = int(resp_data[idx]);\r
- /*\r
+ /*\r
grad_data[idx] = (float)(!(k-class_labels->data.i[orig_label]+1)) -\r
(float)(exp_fk / exp_sfi);\r
- */\r
- int ensemble_label = 0;\r
- while (class_labels->data.i[ensemble_label] - orig_label)\r
- ensemble_label++; \r
- \r
+ */\r
+ int ensemble_label = 0;\r
+ while (class_labels->data.i[ensemble_label] - orig_label)\r
+ ensemble_label++;\r
+\r
grad_data[idx] = (float)(!(k-ensemble_label)) -\r
(float)(exp_fk / exp_sfi);\r
}\r
\r
for (int i=0; i<get_len(subsample_train); ++i)\r
{\r
- int idx = *(sample_data + subsample_data[i]*s_step);\r
- if (data->tflag == CV_ROW_SAMPLE)\r
+ int idx = *(sample_data + subsample_data[i]*s_step);\r
+ if (data->tflag == CV_ROW_SAMPLE)\r
cvGetRow( data->train_data, &x, idx);\r
else\r
cvGetCol( data->train_data, &x, idx);\r
- \r
+\r
if (missing)\r
{\r
if (data->tflag == CV_ROW_SAMPLE)\r
cvGetRow( missing, &miss_x, idx);\r
else\r
cvGetCol( missing, &miss_x, idx);\r
- \r
+\r
predictions[i] = tree->predict(&x, &miss_x);\r
}\r
else\r
if (!samples_in_leaf) // It should not be done anyways! but...\r
{\r
leaves[i]->value = 0.0;\r
- continue; \r
+ continue;\r
}\r
\r
CvMat* leaf_idx = cvCreateMat(1, samples_in_leaf, CV_32S);\r
int len = sum_response_tmp->cols;\r
for (int j=0; j<get_len(leaf_idx); ++j)\r
{\r
- int idx = leaf_idx_data[j]; \r
+ int idx = leaf_idx_data[j];\r
sum_response_tmp->data.fl[idx + _k*len] =\r
sum_response->data.fl[idx + _k*len] +\r
params.shrinkage * value;\r
}\r
- leaf_idx_data = 0; \r
+ leaf_idx_data = 0;\r
cvReleaseMat(&leaf_idx);\r
}\r
\r
/*\r
void CvGBTrees::change_values(CvDTree* tree, const int _k)\r
{\r
- \r
+\r
CvDTreeNode** leaves;\r
int leaves_count = 0;\r
- int offset = _k*sum_response_tmp->cols;\r
- CvMat leaf_idx;\r
- leaf_idx.rows = 1;\r
- \r
+ int offset = _k*sum_response_tmp->cols;\r
+ CvMat leaf_idx;\r
+ leaf_idx.rows = 1;\r
+\r
leaves = GetLeaves( tree, leaves_count);\r
\r
for (int i=0; i<leaves_count; ++i)\r
data->get_sample_indices(leaves[i], leaf_idx_data);\r
//CvMat* leaf_idx = new CvMat();\r
//cvInitMatHeader(leaf_idx, n, 1, CV_32S, leaf_idx_data);\r
- leaf_idx.cols = n;\r
- leaf_idx.data.i = leaf_idx_data;\r
+ leaf_idx.cols = n;\r
+ leaf_idx.data.i = leaf_idx_data;\r
\r
float value = find_optimal_value(&leaf_idx);\r
leaves[i]->value = value;\r
- float val = params.shrinkage * value;\r
+ float val = params.shrinkage * value;\r
+\r
\r
- \r
for (int j=0; j<n; ++j)\r
{\r
int idx = leaf_idx_data[j] + offset;\r
}\r
//leaf_idx_data = 0;\r
//cvReleaseMat(&leaf_idx);\r
- leaf_idx.data.i = 0;\r
- //delete leaf_idx;\r
- delete[] leaf_idx_data;\r
+ leaf_idx.data.i = 0;\r
+ //delete leaf_idx;\r
+ delete[] leaf_idx_data;\r
}\r
\r
// releasing the memory\r
for (int i=0; i<n; ++i, ++idx)\r
residuals[i] = (resp_data[*idx] - cur_data[*idx]);\r
icvSortFloat(residuals, n, 0.0f);\r
- if (n % 2) \r
+ if (n % 2)\r
gamma = residuals[n/2];\r
else gamma = (residuals[n/2-1] + residuals[n/2]) / 2.0f;\r
delete[] residuals;\r
tmp1 += tmp;\r
tmp2 += fabs(tmp)*(1-fabs(tmp));\r
};\r
- if (tmp2 == 0) \r
+ if (tmp2 == 0)\r
{\r
tmp2 = 1;\r
}\r
//===========================================================================\r
\r
float CvGBTrees::predict_serial( const CvMat* _sample, const CvMat* _missing,\r
- CvMat* weak_responses, CvSlice slice, int k) const \r
+ CvMat* weak_responses, CvSlice slice, int k) const\r
{\r
float result = 0.0f;\r
\r
CvSeqReader reader;\r
int weak_count = cvSliceLength( slice, weak[class_count-1] );\r
CvDTree* tree;\r
- \r
+\r
if (weak_responses)\r
{\r
- if (CV_MAT_TYPE(weak_responses->type) != CV_32F)\r
+ if (CV_MAT_TYPE(weak_responses->type) != CV_32F)\r
return 0.0f;\r
if ((k >= 0) && (k<class_count) && (weak_responses->rows != 1))\r
return 0.0f;\r
if (weak_responses->cols != weak_count)\r
return 0.0f;\r
}\r
- \r
+\r
float* sum = new float[class_count];\r
memset(sum, 0, class_count*sizeof(float));\r
\r
{\r
if ((weak[i]) && (weak_count))\r
{\r
- cvStartReadSeq( weak[i], &reader ); \r
+ cvStartReadSeq( weak[i], &reader );\r
cvSetSeqReaderPos( &reader, slice.start_index );\r
for (int j=0; j<weak_count; ++j)\r
{\r
}\r
}\r
}\r
- \r
+\r
for (int i=0; i<class_count; ++i)\r
sum[i] += base_value;\r
\r
\r
delete[] sum;\r
\r
- /*\r
+ /*\r
int orig_class_label = -1;\r
for (int i=0; i<get_len(class_labels); ++i)\r
if (class_labels->data.i[i] == class_label+1)\r
orig_class_label = i;\r
- */\r
- int orig_class_label = class_labels->data.i[class_label];\r
+ */\r
+ int orig_class_label = class_labels->data.i[class_label];\r
\r
return float(orig_class_label);\r
}\r
class Tree_predictor\r
{\r
private:\r
- pCvSeq* weak;\r
- float* sum;\r
- const int k;\r
- const CvMat* sample;\r
- const CvMat* missing;\r
+ pCvSeq* weak;\r
+ float* sum;\r
+ const int k;\r
+ const CvMat* sample;\r
+ const CvMat* missing;\r
const float shrinkage;\r
- \r
+\r
#ifdef HAVE_TBB\r
static tbb::spin_mutex SumMutex;\r
#endif\r
\r
\r
public:\r
- Tree_predictor() : weak(0), sum(0), k(0), sample(0), missing(0), shrinkage(1.0f) {}\r
- Tree_predictor(pCvSeq* _weak, const int _k, const float _shrinkage,\r
- const CvMat* _sample, const CvMat* _missing, float* _sum ) :\r
- weak(_weak), sum(_sum), k(_k), sample(_sample),\r
+ Tree_predictor() : weak(0), sum(0), k(0), sample(0), missing(0), shrinkage(1.0f) {}\r
+ Tree_predictor(pCvSeq* _weak, const int _k, const float _shrinkage,\r
+ const CvMat* _sample, const CvMat* _missing, float* _sum ) :\r
+ weak(_weak), sum(_sum), k(_k), sample(_sample),\r
missing(_missing), shrinkage(_shrinkage)\r
- {}\r
- \r
+ {}\r
+\r
Tree_predictor( const Tree_predictor& p, cv::Split ) :\r
- weak(p.weak), sum(p.sum), k(p.k), sample(p.sample),\r
+ weak(p.weak), sum(p.sum), k(p.k), sample(p.sample),\r
missing(p.missing), shrinkage(p.shrinkage)\r
- {}\r
+ {}\r
+\r
+ Tree_predictor& operator=( const Tree_predictor& )\r
+ { return *this; }\r
\r
- Tree_predictor& operator=( const Tree_predictor& )\r
- { return *this; }\r
- \r
virtual void operator()(const cv::BlockedRange& range) const\r
- {\r
+ {\r
#ifdef HAVE_TBB\r
tbb::spin_mutex::scoped_lock lock;\r
#endif\r
CvSeqReader reader;\r
- int begin = range.begin();\r
- int end = range.end();\r
- \r
- int weak_count = end - begin;\r
- CvDTree* tree;\r
-\r
- for (int i=0; i<k; ++i)\r
- {\r
- float tmp_sum = 0.0f;\r
- if ((weak[i]) && (weak_count))\r
- {\r
- cvStartReadSeq( weak[i], &reader ); \r
- cvSetSeqReaderPos( &reader, begin );\r
- for (int j=0; j<weak_count; ++j)\r
- {\r
- CV_READ_SEQ_ELEM( tree, reader );\r
- tmp_sum += shrinkage*(float)(tree->predict(sample, missing)->value);\r
- }\r
- }\r
+ int begin = range.begin();\r
+ int end = range.end();\r
+\r
+ int weak_count = end - begin;\r
+ CvDTree* tree;\r
+\r
+ for (int i=0; i<k; ++i)\r
+ {\r
+ float tmp_sum = 0.0f;\r
+ if ((weak[i]) && (weak_count))\r
+ {\r
+ cvStartReadSeq( weak[i], &reader );\r
+ cvSetSeqReaderPos( &reader, begin );\r
+ for (int j=0; j<weak_count; ++j)\r
+ {\r
+ CV_READ_SEQ_ELEM( tree, reader );\r
+ tmp_sum += shrinkage*(float)(tree->predict(sample, missing)->value);\r
+ }\r
+ }\r
#ifdef HAVE_TBB\r
lock.acquire(SumMutex);\r
- sum[i] += tmp_sum;\r
+ sum[i] += tmp_sum;\r
lock.release();\r
#else\r
sum[i] += tmp_sum;\r
#endif\r
- }\r
- } // Tree_predictor::operator()\r
- \r
+ }\r
+ } // Tree_predictor::operator()\r
+\r
+ virtual ~Tree_predictor() {}\r
+\r
}; // class Tree_predictor\r
\r
\r
\r
\r
float CvGBTrees::predict( const CvMat* _sample, const CvMat* _missing,\r
- CvMat* /*weak_responses*/, CvSlice slice, int k) const \r
+ CvMat* /*weak_responses*/, CvSlice slice, int k) const\r
{\r
float result = 0.0f;\r
- if (!weak) return 0.0f;\r
+ if (!weak) return 0.0f;\r
float* sum = new float[class_count];\r
for (int i=0; i<class_count; ++i)\r
sum[i] = 0.0f;\r
- int begin = slice.start_index;\r
- int end = begin + cvSliceLength( slice, weak[0] );\r
- \r
+ int begin = slice.start_index;\r
+ int end = begin + cvSliceLength( slice, weak[0] );\r
+\r
pCvSeq* weak_seq = weak;\r
- Tree_predictor predictor = Tree_predictor(weak_seq, class_count,\r
+ Tree_predictor predictor = Tree_predictor(weak_seq, class_count,\r
params.shrinkage, _sample, _missing, sum);\r
- \r
+\r
//#ifdef HAVE_TBB\r
-// tbb::parallel_for(cv::BlockedRange(begin, end), predictor,\r
+// tbb::parallel_for(cv::BlockedRange(begin, end), predictor,\r
// tbb::auto_partitioner());\r
//#else\r
cv::parallel_for(cv::BlockedRange(begin, end), predictor);\r
//#endif\r
\r
- for (int i=0; i<class_count; ++i)\r
+ for (int i=0; i<class_count; ++i)\r
sum[i] = sum[i] /** params.shrinkage*/ + base_value;\r
\r
if (class_count == 1)\r
\r
void CvGBTrees::read( CvFileStorage* fs, CvFileNode* node )\r
{\r
- \r
+\r
CV_FUNCNAME( "CvGBTrees::read" );\r
\r
__BEGIN__;\r
\r
\r
for (int j=0; j<class_count; ++j)\r
- { \r
+ {\r
s = "trees_";\r
s += ToString(j);\r
\r
class Sample_predictor\r
{\r
private:\r
- const CvGBTrees* gbt;\r
- float* predictions;\r
- const CvMat* samples;\r
- const CvMat* missing;\r
+ const CvGBTrees* gbt;\r
+ float* predictions;\r
+ const CvMat* samples;\r
+ const CvMat* missing;\r
const CvMat* idx;\r
CvSlice slice;\r
\r
public:\r
- Sample_predictor() : gbt(0), predictions(0), samples(0), missing(0),\r
+ Sample_predictor() : gbt(0), predictions(0), samples(0), missing(0),\r
idx(0), slice(CV_WHOLE_SEQ)\r
{}\r
\r
- Sample_predictor(const CvGBTrees* _gbt, float* _predictions,\r
- const CvMat* _samples, const CvMat* _missing,\r
+ Sample_predictor(const CvGBTrees* _gbt, float* _predictions,\r
+ const CvMat* _samples, const CvMat* _missing,\r
const CvMat* _idx, CvSlice _slice=CV_WHOLE_SEQ) :\r
- gbt(_gbt), predictions(_predictions), samples(_samples),\r
+ gbt(_gbt), predictions(_predictions), samples(_samples),\r
missing(_missing), idx(_idx), slice(_slice)\r
- {}\r
- \r
+ {}\r
+\r
\r
Sample_predictor( const Sample_predictor& p, cv::Split ) :\r
- gbt(p.gbt), predictions(p.predictions),\r
+ gbt(p.gbt), predictions(p.predictions),\r
samples(p.samples), missing(p.missing), idx(p.idx),\r
slice(p.slice)\r
- {}\r
+ {}\r
\r
\r
virtual void operator()(const cv::BlockedRange& range) const\r
- {\r
- int begin = range.begin();\r
- int end = range.end();\r
+ {\r
+ int begin = range.begin();\r
+ int end = range.end();\r
\r
- CvMat x;\r
+ CvMat x;\r
CvMat miss;\r
\r
for (int i=begin; i<end; ++i)\r
predictions[i] = gbt->predict_serial(&x,&miss,0,slice);\r
}\r
}\r
- } // Sample_predictor::operator()\r
+ } // Sample_predictor::operator()\r
+\r
+ virtual ~Sample_predictor() {}\r
\r
}; // class Sample_predictor\r
\r
\r
\r
// type in {CV_TRAIN_ERROR, CV_TEST_ERROR}\r
-float \r
+float\r
CvGBTrees::calc_error( CvMLData* _data, int type, std::vector<float> *resp )\r
{\r
\r
float err = 0.0f;\r
- const CvMat* sample_idx = (type == CV_TRAIN_ERROR) ?\r
+ const CvMat* _sample_idx = (type == CV_TRAIN_ERROR) ?\r
_data->get_train_sample_idx() :\r
_data->get_test_sample_idx();\r
const CvMat* response = _data->get_responses();\r
- \r
- int n = sample_idx ? get_len(sample_idx) : 0;\r
+\r
+ int n = _sample_idx ? get_len(_sample_idx) : 0;\r
n = (type == CV_TRAIN_ERROR && n == 0) ? _data->get_values()->rows : n;\r
- \r
+\r
if (!n)\r
return -FLT_MAX;\r
- \r
- float* pred_resp = 0; \r
+\r
+ float* pred_resp = 0;\r
if (resp)\r
{\r
resp->resize(n);\r
pred_resp = new float[n];\r
\r
Sample_predictor predictor = Sample_predictor(this, pred_resp, _data->get_values(),\r
- _data->get_missing(), sample_idx);\r
- \r
+ _data->get_missing(), _sample_idx);\r
+\r
//#ifdef HAVE_TBB\r
// tbb::parallel_for(cv::BlockedRange(0,n), predictor, tbb::auto_partitioner());\r
//#else\r
cv::parallel_for(cv::BlockedRange(0,n), predictor);\r
//#endif\r
- \r
- int* sidx = sample_idx ? sample_idx->data.i : 0;\r
+\r
+ int* sidx = _sample_idx ? _sample_idx->data.i : 0;\r
int r_step = CV_IS_MAT_CONT(response->type) ?\r
1 : response->step / CV_ELEM_SIZE(response->type);\r
- \r
+\r
\r
if ( !problem_type() )\r
{\r
float d = pred_resp[i] - response->data.fl[si*r_step];\r
err += d*d;\r
}\r
- err = err / (float)n; \r
+ err = err / (float)n;\r
}\r
- \r
+\r
return err;\r
}\r
\r
const cv::Mat& responses, const cv::Mat& varIdx,\r
const cv::Mat& sampleIdx, const cv::Mat& varType,\r
const cv::Mat& missingDataMask,\r
- CvGBTreesParams params )\r
+ CvGBTreesParams _params )\r
{\r
data = 0;\r
weak = 0;\r
class_labels = 0;\r
class_count = 1;\r
delta = 0.0f;\r
- \r
+\r
clear();\r
- \r
- train(trainData, tflag, responses, varIdx, sampleIdx, varType, missingDataMask, params, false);\r
+\r
+ train(trainData, tflag, responses, varIdx, sampleIdx, varType, missingDataMask, _params, false);\r
}\r
\r
bool CvGBTrees::train( const cv::Mat& trainData, int tflag,\r
const cv::Mat& responses, const cv::Mat& varIdx,\r
const cv::Mat& sampleIdx, const cv::Mat& varType,\r
const cv::Mat& missingDataMask,\r
- CvGBTreesParams params,\r
+ CvGBTreesParams _params,\r
bool update )\r
{\r
CvMat _trainData = trainData, _responses = responses;\r
CvMat _varIdx = varIdx, _sampleIdx = sampleIdx, _varType = varType;\r
CvMat _missingDataMask = missingDataMask;\r
- \r
+\r
return train( &_trainData, tflag, &_responses, varIdx.empty() ? 0 : &_varIdx,\r
sampleIdx.empty() ? 0 : &_sampleIdx, varType.empty() ? 0 : &_varType,\r
- missingDataMask.empty() ? 0 : &_missingDataMask, params, update);\r
+ missingDataMask.empty() ? 0 : &_missingDataMask, _params, update);\r
}\r
\r
-float CvGBTrees::predict( const cv::Mat& sample, const cv::Mat& missing,\r
+float CvGBTrees::predict( const cv::Mat& sample, const cv::Mat& _missing,\r
const cv::Range& slice, int k ) const\r
{\r
- CvMat _sample = sample, _missing = missing;\r
- return predict(&_sample, missing.empty() ? 0 : &_missing, 0,\r
+ CvMat _sample = sample, miss = _missing;\r
+ return predict(&_sample, _missing.empty() ? 0 : &miss, 0,\r
slice==cv::Range::all() ? CV_WHOLE_SEQ : cvSlice(slice.start, slice.end), k);\r
}\r
/* Calculates upper triangular matrix S, where A is a symmetrical matrix A=S'*S */
-CV_IMPL void cvChol( CvMat* A, CvMat* S )
+static void cvChol( CvMat* A, CvMat* S )
{
int dim = A->rows;
/* Generates <sample> of <amount> points from a discrete variate xi,
where Pr{xi = k} == probs[k], 0 < k < len - 1. */
-CV_IMPL void cvRandSeries( float probs[], int len, int sample[], int amount )
+static void cvRandSeries( float probs[], int len, int sample[], int amount )
{
CvMat* univals = cvCreateMat(1, amount, CV_32FC1);
float* knots = (float*)cvAlloc( len * sizeof(float) );
#define ICV_RAND_MAX 4294967296 // == 2^32
-CV_IMPL void cvRandRoundUni (CvMat* center,
- float radius_small,
- float radius_large,
- CvMat* desired_matrix,
- CvRNG* rng_state_ptr)
-{
- float rad, norm, coefficient;
- int dim, size, i, j;
- CvMat *cov, sample;
- CvRNG rng_local;
-
- CV_FUNCNAME("cvRandRoundUni");
- __BEGIN__
-
- rng_local = *rng_state_ptr;
-
- CV_ASSERT ((radius_small >= 0) &&
- (radius_large > 0) &&
- (radius_small <= radius_large));
- CV_ASSERT (center && desired_matrix && rng_state_ptr);
- CV_ASSERT (center->rows == 1);
- CV_ASSERT (center->cols == desired_matrix->cols);
-
- dim = desired_matrix->cols;
- size = desired_matrix->rows;
- cov = cvCreateMat (dim, dim, CV_32FC1);
- cvSetIdentity (cov);
- cvRandMVNormal (center, cov, desired_matrix, &rng_local);
-
- for (i = 0; i < size; i++)
- {
- rad = (float)(cvRandReal(&rng_local)*(radius_large - radius_small) + radius_small);
- cvGetRow (desired_matrix, &sample, i);
- norm = (float) cvNorm (&sample, 0, CV_L2);
- coefficient = rad / norm;
- for (j = 0; j < dim; j++)
- CV_MAT_ELEM (sample, float, 0, j) *= coefficient;
- }
-
- __END__
-
-}
+// static void cvRandRoundUni (CvMat* center,
+// float radius_small,
+// float radius_large,
+// CvMat* desired_matrix,
+// CvRNG* rng_state_ptr)
+// {
+// float rad, norm, coefficient;
+// int dim, size, i, j;
+// CvMat *cov, sample;
+// CvRNG rng_local;
+
+// CV_FUNCNAME("cvRandRoundUni");
+// __BEGIN__
+
+// rng_local = *rng_state_ptr;
+
+// CV_ASSERT ((radius_small >= 0) &&
+// (radius_large > 0) &&
+// (radius_small <= radius_large));
+// CV_ASSERT (center && desired_matrix && rng_state_ptr);
+// CV_ASSERT (center->rows == 1);
+// CV_ASSERT (center->cols == desired_matrix->cols);
+
+// dim = desired_matrix->cols;
+// size = desired_matrix->rows;
+// cov = cvCreateMat (dim, dim, CV_32FC1);
+// cvSetIdentity (cov);
+// cvRandMVNormal (center, cov, desired_matrix, &rng_local);
+
+// for (i = 0; i < size; i++)
+// {
+// rad = (float)(cvRandReal(&rng_local)*(radius_large - radius_small) + radius_small);
+// cvGetRow (desired_matrix, &sample, i);
+// norm = (float) cvNorm (&sample, 0, CV_L2);
+// coefficient = rad / norm;
+// for (j = 0; j < dim; j++)
+// CV_MAT_ELEM (sample, float, 0, j) *= coefficient;
+// }
+
+// __END__
+
+// }
// By S. Dilman - end -
}
-int icvGetNumberOfCluster( double* prob_vector, int num_of_clusters, float r,
+static int icvGetNumberOfCluster( double* prob_vector, int num_of_clusters, float r,
float outlier_thresh, int normalize_probs )
{
int max_prob_loc = 0;
ok = true;
__END__;
-
+
if( responses && responses->data.ptr != _responses->data.ptr )
cvReleaseMat(&responses);
result = _result;
buf_sz = _buf_sz;
}
-
+
const CvKNearest* pointer;
int k;
const CvMat* _samples;
CvMat* _dist;
float* result;
int buf_sz;
-
+
void operator()( const cv::BlockedRange& range ) const
{
cv::AutoBuffer<float> buf(buf_sz);
int _max_k, bool _update_base )
{
CvMat tdata = _train_data, responses = _responses, sidx = _sample_idx;
-
+
return train(&tdata, &responses, sidx.data.ptr ? &sidx : 0, _is_regression, _max_k, _update_base );
}
Mat* _dist ) const
{
CvMat s = _samples, results, *presults = 0, nresponses, *pnresponses = 0, dist, *pdist = 0;
-
+
if( _results )
{
if(!(_results->data && (_results->type() == CV_32F ||
_results->create(_samples.rows, 1, CV_32F);
presults = &(results = *_results);
}
-
+
if( _neighbor_responses )
{
if(!(_neighbor_responses->data && _neighbor_responses->type() == CV_32F &&
_neighbor_responses->create(_samples.rows, k, CV_32F);
pnresponses = &(nresponses = *_neighbor_responses);
}
-
+
if( _dist )
{
if(!(_dist->data && _dist->type() == CV_32F &&
_dist->create(_samples.rows, k, CV_32F);
pdist = &(dist = *_dist);
}
-
+
return find_nearest(&s, k, presults, _neighbors, pnresponses, pdist );
}
-float CvKNearest::find_nearest( const cv::Mat& samples, int k, CV_OUT cv::Mat& results,
+float CvKNearest::find_nearest( const cv::Mat& _samples, int k, CV_OUT cv::Mat& results,
CV_OUT cv::Mat& neighborResponses, CV_OUT cv::Mat& dists) const
{
- return find_nearest(samples, k, &results, 0, &neighborResponses, &dists);
+ return find_nearest(_samples, k, &results, 0, &neighborResponses, &dists);
}
/* End of file */
double* cov_data = cov->data.db + i*_var_count;
double s1val = sum1[i];
double avg1 = avg_data[i];
- int count = count_data[i];
+ int _count = count_data[i];
for( j = 0; j <= i; j++ )
{
double avg2 = avg2_data[j];
- double cov_val = prod_data[j] - avg1 * sum2[j] - avg2 * s1val + avg1 * avg2 * count;
- cov_val = (count > 1) ? cov_val / (count - 1) : cov_val;
+ double cov_val = prod_data[j] - avg1 * sum2[j] - avg2 * s1val + avg1 * avg2 * _count;
+ cov_val = (_count > 1) ? cov_val / (_count - 1) : cov_val;
cov_data[j] = cov_val;
}
}
value = _value;
var_count1 = _var_count1;
}
-
+
CvMat* c;
CvMat** cov_rotate_mats;
CvMat** inv_eigen_values;
CvMat* results;
float* value;
int var_count1;
-
+
void operator()( const cv::BlockedRange& range ) const
{
int cls = -1;
- int rtype = 0, rstep = 0;
+ int rtype = 0, rstep = 0;
int nclasses = cls_labels->cols;
int _var_count = avg[0]->cols;
-
+
if (results)
{
rtype = CV_MAT_TYPE(results->type);
// allocate memory and initializing headers for calculating
cv::AutoBuffer<double> buffer(nclasses + var_count1);
CvMat diff = cvMat( 1, var_count1, CV_64FC1, &buffer[0] );
-
+
for(int k = range.begin(); k < range.end(); k += 1 )
{
int ival;
cov_rotate_mats = 0;
c = 0;
default_model_name = "my_nb";
-
+
CvMat tdata = _train_data, responses = _responses, vidx = _var_idx, sidx = _sample_idx;
train(&tdata, &responses, vidx.data.ptr ? &vidx : 0,
sidx.data.ptr ? &sidx : 0);
float CvNormalBayesClassifier::predict( const Mat& _samples, Mat* _results ) const
{
CvMat samples = _samples, results, *presults = 0;
-
+
if( _results )
{
if( !(_results->data && _results->type() == CV_32F &&
_results->create(_samples.rows, 1, CV_32F);
presults = &(results = *_results);
}
-
+
return predict(&samples, presults);
}
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4514 4710 4711 4710 )
-#endif
-
-#ifdef HAVE_CVCONFIG_H
+#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
CvMat** out_sample_idx=0 );
void
-cvSortSamplesByClasses( const float** samples, const CvMat* classes,
+cvSortSamplesByClasses( const float** samples, const CvMat* classes,
int* class_ranges, const uchar** mask CV_DEFAULT(0) );
-void
+void
cvCombineResponseMaps (CvMat* _responses,
const CvMat* old_response_map,
CvMat* new_response_map,
void cvCheckTrainData( const CvMat* train_data, int tflag,
- const CvMat* missing_mask,
+ const CvMat* missing_mask,
int* var_all, int* sample_all );
CvMat* cvPreprocessIndexArray( const CvMat* idx_arr, int data_arr_size, bool check_for_duplicates=false );
CvDTree* tree;
CvDTreeNode* node;
};
-
+
struct ForestTreeBestSplitFinder : DTreeBestSplitFinder
{
ForestTreeBestSplitFinder() : DTreeBestSplitFinder() {}
return grow_forest( params.term_crit );
}
-bool CvRTrees::train( CvMLData* data, CvRTParams params )
+bool CvRTrees::train( CvMLData* _data, CvRTParams params )
{
- const CvMat* values = data->get_values();
- const CvMat* response = data->get_responses();
- const CvMat* missing = data->get_missing();
- const CvMat* var_types = data->get_var_types();
- const CvMat* train_sidx = data->get_train_sample_idx();
- const CvMat* var_idx = data->get_var_idx();
+ const CvMat* values = _data->get_values();
+ const CvMat* response = _data->get_responses();
+ const CvMat* missing = _data->get_missing();
+ const CvMat* var_types = _data->get_var_types();
+ const CvMat* train_sidx = _data->get_train_sample_idx();
+ const CvMat* var_idx = _data->get_var_idx();
return train( values, CV_ROW_SAMPLE, response, var_idx,
train_sidx, var_types, missing, params );
const int dims = data->var_count;
float maximal_response = 0;
- CvMat* oob_sample_votes = 0;
+ CvMat* oob_sample_votes = 0;
CvMat* oob_responses = 0;
float* oob_samples_perm_ptr= 0;
// initialize these variable to avoid warning C4701
CvMat oob_predictions_sum = cvMat( 1, 1, CV_32FC1 );
CvMat oob_num_of_predictions = cvMat( 1, 1, CV_32FC1 );
-
+
nsamples = data->sample_count;
nclasses = data->get_num_classes();
cvGetRow( oob_responses, &oob_predictions_sum, 0 );
cvGetRow( oob_responses, &oob_num_of_predictions, 1 );
}
-
+
oob_samples_perm_ptr = (float*)cvAlloc( sizeof(float)*nsamples*dims );
samples_ptr = (float*)cvAlloc( sizeof(float)*nsamples*dims );
missing_ptr = (uchar*)cvAlloc( sizeof(uchar)*nsamples*dims );
- true_resp_ptr = (float*)cvAlloc( sizeof(float)*nsamples );
+ true_resp_ptr = (float*)cvAlloc( sizeof(float)*nsamples );
data->get_vectors( 0, samples_ptr, missing_ptr, true_resp_ptr );
-
+
double minval, maxval;
CvMat responses = cvMat(1, nsamples, CV_32FC1, true_resp_ptr);
cvMinMaxLoc( &responses, &minval, &maxval );
cvFree( &samples_ptr );
cvFree( &missing_ptr );
cvFree( &true_resp_ptr );
-
+
cvReleaseMat( &sample_idx_mask_for_tree );
cvReleaseMat( &sample_idx_for_tree );
{
CvMat sample, miss;
int si = sidx ? sidx[i] : i;
- cvGetRow( values, &sample, si );
- if( missing )
- cvGetRow( missing, &miss, si );
+ cvGetRow( values, &sample, si );
+ if( missing )
+ cvGetRow( missing, &miss, si );
float r = (float)predict( &sample, missing ? &miss : 0 );
if( pred_resp )
pred_resp[i] = r;
CvMat sample, miss;
int si = sidx ? sidx[i] : i;
cvGetRow( values, &sample, si );
- if( missing )
- cvGetRow( missing, &miss, si );
+ if( missing )
+ cvGetRow( missing, &miss, si );
float r = (float)predict( &sample, missing ? &miss : 0 );
if( pred_resp )
pred_resp[i] = r;
float d = r - response->data.fl[si*r_step];
err += d*d;
}
- err = sample_count ? err / (float)sample_count : -FLT_MAX;
+ err = sample_count ? err / (float)sample_count : -FLT_MAX;
}
return err;
}
float *responses_ptr = (float*)cvAlloc( sizeof(float)*sample_count );
data->get_vectors( 0, values_ptr, missing_ptr, responses_ptr);
-
+
if (data->is_classifier)
{
int err_count = 0;
float *vp = values_ptr;
- uchar *mp = missing_ptr;
+ uchar *mp = missing_ptr;
for (int si = 0; si < sample_count; si++, vp += var_count, mp += var_count)
{
CvMat sample = cvMat( 1, var_count, CV_32FC1, vp );
}
else
CV_Error( CV_StsBadArg, "This method is not supported for regression problems" );
-
+
cvFree( &values_ptr );
cvFree( &missing_ptr );
- cvFree( &responses_ptr );
+ cvFree( &responses_ptr );
return err;
}
float CvRTrees::predict_prob( const CvMat* sample, const CvMat* missing) const
{
- if( nclasses == 2 ) //classification
+ if( nclasses == 2 ) //classification
{
cv::AutoBuffer<int> _votes(nclasses);
int* votes = _votes;
CvDTreeNode* predicted_node = trees[k]->predict( sample, missing );
int class_idx = predicted_node->class_idx;
CV_Assert( 0 <= class_idx && class_idx < nclasses );
-
+
++votes[class_idx];
}
-
- return float(votes[1])/ntrees;
+
+ return float(votes[1])/ntrees;
}
else // regression
- CV_Error(CV_StsBadArg, "This function works for binary classification problems only...");
-
+ CV_Error(CV_StsBadArg, "This function works for binary classification problems only...");
+
return -1;
}
{
// initialize active variables mask
CvMat submask1;
- cvGetCols( active_var_mask, &submask1, 0, nactive_vars );
+ cvGetCols( active_var_mask, &submask1, 0, nactive_vars );
cvSet( &submask1, cvScalar(1) );
- if( nactive_vars < var_count )
- {
- CvMat submask2;
- cvGetCols( active_var_mask, &submask2, nactive_vars, var_count );
- cvZero( &submask2 );
- }
+ if( nactive_vars < var_count )
+ {
+ CvMat submask2;
+ cvGetCols( active_var_mask, &submask2, nactive_vars, var_count );
+ cvZero( &submask2 );
+ }
}
}
#include <stdarg.h>
#include <ctype.h>
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4514 ) /* unreferenced inline functions */
-#endif
-
#if 1
typedef float Qfloat;
#define QFLOAT_TYPE CV_32F
CvSVMKernel* _kernel, double* _alpha, CvSVMSolutionInfo& _si )
{
int i;
- double p = _kernel->params->p, C = _kernel->params->C;
+ double p = _kernel->params->p, kernel_param_c = _kernel->params->C;
if( !create( _sample_count, _var_count, _samples, 0,
- _sample_count*2, 0, C, C, _storage, _kernel, &CvSVMSolver::get_row_svr,
+ _sample_count*2, 0, kernel_param_c, kernel_param_c, _storage, _kernel, &CvSVMSolver::get_row_svr,
&CvSVMSolver::select_working_set, &CvSVMSolver::calc_rho ))
return false;
CvSVMKernel* _kernel, double* _alpha, CvSVMSolutionInfo& _si )
{
int i;
- double C = _kernel->params->C, sum;
+ double kernel_param_c = _kernel->params->C, sum;
if( !create( _sample_count, _var_count, _samples, 0,
_sample_count*2, 0, 1., 1., _storage, _kernel, &CvSVMSolver::get_row_svr,
y = (schar*)cvMemStorageAlloc( storage, sample_count*2*sizeof(y[0]) );
alpha = (double*)cvMemStorageAlloc( storage, alpha_count*sizeof(alpha[0]) );
- sum = C * _kernel->params->nu * sample_count * 0.5;
+ sum = kernel_param_c * _kernel->params->nu * sample_count * 0.5;
for( i = 0; i < sample_count; i++ )
{
- alpha[i] = alpha[i + sample_count] = MIN(sum, C);
+ alpha[i] = alpha[i + sample_count] = MIN(sum, kernel_param_c);
sum -= alpha[i];
b[i] = -_y[i];
return ok;
}
-struct indexedratio
+struct indexedratio
{
double val;
int ind;
int svm_type, sample_count, var_count, sample_size;
int block_size = 1 << 16;
double* alpha;
- int i, k;
RNG* rng = &theRNG();
// all steps are logarithmic and must be > 1
double degree_step = 10, g_step = 10, coef_step = 10, C_step = 10, nu_step = 10, p_step = 10;
- double gamma = 0, C = 0, degree = 0, coef = 0, p = 0, nu = 0;
+ double gamma = 0, curr_c = 0, degree = 0, coef = 0, p = 0, nu = 0;
double best_degree = 0, best_gamma = 0, best_coef = 0, best_C = 0, best_nu = 0, best_p = 0;
float min_error = FLT_MAX, error;
cvZero( responses_local );
// randomly permute samples and responses
- for( i = 0; i < sample_count; i++ )
+ for(int i = 0; i < sample_count; i++ )
{
int i1 = (*rng)(sample_count);
int i2 = (*rng)(sample_count);
else
CV_SWAP( responses->data.i[i1], responses->data.i[i2], y );
}
-
+
if (!is_regression && class_labels->cols==2 && balanced)
{
// count class samples
int num_0=0,num_1=0;
- for (i=0; i<sample_count; ++i)
+ for (int i=0; i<sample_count; ++i)
{
if (responses->data.i[i]==class_labels->data.i[0])
++num_0;
else
++num_1;
}
-
+
int label_smallest_class;
int label_biggest_class;
if (num_0 < num_1)
{
label_biggest_class = class_labels->data.i[1];
- label_smallest_class = class_labels->data.i[0];
+ label_smallest_class = class_labels->data.i[0];
}
else
{
}
int* cls_lbls = class_labels ? class_labels->data.i : 0;
- C = C_grid.min_val;
+ curr_c = C_grid.min_val;
do
{
- params.C = C;
+ params.C = curr_c;
gamma = gamma_grid.min_val;
do
{
int train_size = trainset_size;
error = 0;
- for( k = 0; k < k_fold; k++ )
+ for(int k = 0; k < k_fold; k++ )
{
memcpy( samples_local, samples, sizeof(samples[0])*test_size*k );
memcpy( samples_local + test_size*k, test_samples_ptr + test_size,
EXIT;
// Compute test set error on <test_size> samples
- for( i = 0; i < test_size; i++, true_resp += resp_elem_size, test_samples_ptr++ )
+ for(int i = 0; i < test_size; i++, true_resp += resp_elem_size, test_samples_ptr++ )
{
float resp = predict( *test_samples_ptr, var_count );
error += is_regression ? powf( resp - *(float*)true_resp, 2 )
best_degree = degree;
best_gamma = gamma;
best_coef = coef;
- best_C = C;
+ best_C = curr_c;
best_nu = nu;
best_p = p;
}
gamma *= gamma_grid.step;
}
while( gamma < gamma_grid.max_val );
- C *= C_grid.step;
+ curr_c *= C_grid.step;
}
- while( C < C_grid.max_val );
+ while( curr_c < C_grid.max_val );
}
min_error /= (float) sample_count;
int var_count = get_var_count();
assert( row_len == var_count );
- (void)row_len;
+ (void)row_len;
int class_count = class_labels ? class_labels->cols :
params.svm_type == ONE_CLASS ? 1 : 0;
__BEGIN__;
int class_count;
-
+
if( !kernel )
CV_ERROR( CV_StsBadArg, "The SVM should be trained first" );
CV_CALL( cvPreparePredictData( sample, var_all, var_idx,
class_count, 0, &row_sample ));
result = predict( row_sample, get_var_count(), returnDFVal );
-
+
__END__;
if( sample && (!CV_IS_MAT(sample) || sample->data.fl != row_sample) )
samples = _samples;
results = _results;
}
-
+
const CvSVM* pointer;
float* result;
const CvMat* samples;
CvMat* results;
-
+
void operator()( const cv::BlockedRange& range ) const
{
for(int i = range.begin(); i < range.end(); i++ )
results->data.fl[i] = (float)r;
if (i == 0)
*result = (float)r;
- }
+ }
}
};
float CvSVM::predict(const CvMat* samples, CV_OUT CvMat* results) const
{
float result = 0;
- cv::parallel_for(cv::BlockedRange(0, samples->rows),
- predict_body_svm(this, &result, samples, results)
+ cv::parallel_for(cv::BlockedRange(0, samples->rows),
+ predict_body_svm(this, &result, samples, results)
);
return result;
}
kernel = 0;
solver = 0;
default_model_name = "my_svm";
-
+
train( _train_data, _responses, _var_idx, _sample_idx, _params );
}
float CvSVM::predict( const Mat& _sample, bool returnDFVal ) const
{
- CvMat sample = _sample;
+ CvMat sample = _sample;
return predict(&sample, returnDFVal);
}
__BEGIN__;
double degree_step = 7,
- g_step = 15,
- coef_step = 14,
- C_step = 20,
- nu_step = 5,
- p_step = 7; // all steps must be > 1
+ g_step = 15,
+ coef_step = 14,
+ C_step = 20,
+ nu_step = 5,
+ p_step = 7; // all steps must be > 1
double degree_begin = 0.01, degree_end = 2;
double g_begin = 1e-5, g_end = 0.5;
double coef_begin = 0.1, coef_end = 300;
double rate = 0, gamma = 0, C = 0, degree = 0, coef = 0, p = 0, nu = 0;
- double best_rate = 0;
+ double best_rate = 0;
double best_degree = degree_begin;
double best_gamma = g_begin;
double best_coef = coef_begin;
- double best_C = C_begin;
- double best_nu = nu_begin;
+ double best_C = C_begin;
+ double best_nu = nu_begin;
double best_p = p_begin;
CvSVMModelParams svm_params, *psvm_params;
int i;
} CvDI;
-int CV_CDECL
+static int CV_CDECL
icvCmpDI( const void* a, const void* b, void* )
{
const CvDI* e1 = (const CvDI*) a;
CvMat* mean = NULL;
CvMat* cov = NULL;
CvMemStorage* storage = NULL;
-
+
CV_FUNCNAME( "cvCreateTestSet" );
__BEGIN__;
CV_WRITE_SEQ_ELEM( elem, writer );
}
CV_CALL( seq = cvEndWriteSeq( &writer ) );
-
+
/* sort the sequence in a distance ascending order */
CV_CALL( cvSeqSort( seq, icvCmpDI, NULL ) );
{
int last_idx;
double max_dst;
-
+
last_idx = num_samples * (cur_class + 1) / num_classes - 1;
CV_CALL( max_dst = (*((CvDI*) cvGetSeqElem( seq, last_idx ))).d );
max_dst = MAX( max_dst, elem.d );
int vi, i, size;
char err[100];
const int *sidx = 0, *vidx = 0;
-
+
if( _update_data && data_root )
{
data = new CvDTreeTrainData( _train_data, _tflag, _responses, _var_idx,
sample_count = sample_all;
var_count = var_all;
-
+
if( _sample_idx )
{
CV_CALL( sample_indices = cvPreprocessIndexArray( _sample_idx, sample_all ));
var_count = var_idx->rows + var_idx->cols - 1;
}
- is_buf_16u = false;
- if ( sample_count < 65536 )
- is_buf_16u = true;
-
+ is_buf_16u = false;
+ if ( sample_count < 65536 )
+ is_buf_16u = true;
+
if( !CV_IS_MAT(_responses) ||
(CV_MAT_TYPE(_responses->type) != CV_32SC1 &&
CV_MAT_TYPE(_responses->type) != CV_32FC1) ||
CV_ERROR( CV_StsBadArg, "The array of _responses must be an integer or "
"floating-point vector containing as many elements as "
"the total number of samples in the training data matrix" );
-
+
r_type = CV_VAR_CATEGORICAL;
if( _var_type )
CV_CALL( var_type0 = cvPreprocessVarType( _var_type, var_idx, var_count, &r_type ));
CV_CALL( var_type = cvCreateMat( 1, var_count+2, CV_32SC1 ));
-
+
cat_var_count = 0;
ord_var_count = -1;
work_var_count = var_count + (is_classifier ? 1 : 0) // for responses class_labels
+ (have_labels ? 1 : 0); // for cv_labels
-
+
buf_size = (work_var_count + 1 /*for sample_indices*/) * sample_count;
shared = _shared;
buf_count = shared ? 2 : 1;
-
+
if ( is_buf_16u )
{
CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_16UC1 ));
{
CV_CALL( buf = cvCreateMat( buf_count, buf_size, CV_32SC1 ));
CV_CALL( int_ptr = (int**)cvAlloc( sample_count*sizeof(int_ptr[0]) ));
- }
+ }
size = is_classifier ? (cat_var_count+1) : cat_var_count;
size = !size ? 1 : size;
CV_CALL( cat_count = cvCreateMat( 1, size, CV_32SC1 ));
CV_CALL( cat_ofs = cvCreateMat( 1, size, CV_32SC1 ));
-
+
size = is_classifier ? (cat_var_count + 1)*params.max_categories : cat_var_count*params.max_categories;
size = !size ? 1 : size;
CV_CALL( cat_map = cvCreateMat( 1, size, CV_32SC1 ));
{
int c_count, prev_label;
int* c_map;
-
+
if (is_buf_16u)
udst = (unsigned short*)(buf->data.s + vi*sample_count);
else
idst = buf->data.i + vi*sample_count;
-
+
// copy data
for( i = 0; i < sample_count; i++ )
{
_idst[i] = val;
pair16u32s_ptr[i].u = udst + i;
pair16u32s_ptr[i].i = _idst + i;
- }
+ }
else
{
idst[i] = val;
// replace labels for missing values with -1
for( ; i < sample_count; i++ )
*int_ptr[i] = -1;
- }
+ }
}
else if( ci < 0 ) // process ordered variable
{
else
idst[i] = i;
_fdst[i] = val;
-
+
}
if (is_buf_16u)
icvSortUShAux( udst, sample_count, _fdst);
else
icvSortIntAux( idst, sample_count, _fdst );
}
-
+
if( vi < var_count )
data_root->set_num_valid(vi, num_valid);
}
if( cv_n )
{
- unsigned short* udst = 0;
- int* idst = 0;
+ unsigned short* usdst = 0;
+ int* idst2 = 0;
if (is_buf_16u)
{
- udst = (unsigned short*)(buf->data.s + (get_work_var_count()-1)*sample_count);
+ usdst = (unsigned short*)(buf->data.s + (get_work_var_count()-1)*sample_count);
for( i = vi = 0; i < sample_count; i++ )
{
- udst[i] = (unsigned short)vi++;
+ usdst[i] = (unsigned short)vi++;
vi &= vi < cv_n ? -1 : 0;
}
int a = (*rng)(sample_count);
int b = (*rng)(sample_count);
unsigned short unsh = (unsigned short)vi;
- CV_SWAP( udst[a], udst[b], unsh );
+ CV_SWAP( usdst[a], usdst[b], unsh );
}
}
else
{
- idst = buf->data.i + (get_work_var_count()-1)*sample_count;
+ idst2 = buf->data.i + (get_work_var_count()-1)*sample_count;
for( i = vi = 0; i < sample_count; i++ )
{
- idst[i] = vi++;
+ idst2[i] = vi++;
vi &= vi < cv_n ? -1 : 0;
}
{
int a = (*rng)(sample_count);
int b = (*rng)(sample_count);
- CV_SWAP( idst[a], idst[b], vi );
+ CV_SWAP( idst2[a], idst2[b], vi );
}
}
}
- if ( cat_map )
+ if ( cat_map )
cat_map->cols = MAX( total_c_count, 1 );
max_split_size = cvAlign(sizeof(CvDTreeSplit) +
if (is_buf_16u)
{
- unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
+ unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
vi*sample_count + root->offset);
for( i = 0; i < count; i++ )
{
}
else
{
- int* idst = buf->data.i + root->buf_idx*buf->cols +
+ int* idst = buf->data.i + root->buf_idx*buf->cols +
vi*sample_count + root->offset;
for( i = 0; i < count; i++ )
{
if (is_buf_16u)
{
- unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
+ unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
vi*sample_count + data_root->offset);
for( i = 0; i < num_valid; i++ )
{
}
else
{
- int* idst_idx = buf->data.i + root->buf_idx*buf->cols +
+ int* idst_idx = buf->data.i + root->buf_idx*buf->cols +
vi*sample_count + root->offset;
for( i = 0; i < num_valid; i++ )
{
const int* sample_idx_src = get_sample_indices(data_root, (int*)(uchar*)inn_buf);
if (is_buf_16u)
{
- unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
+ unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*buf->cols +
workVarCount*sample_count + root->offset);
for (i = 0; i < count; i++)
sample_idx_dst[i] = (unsigned short)sample_idx_src[sidx[i]];
}
else
{
- int* sample_idx_dst = buf->data.i + root->buf_idx*buf->cols +
+ int* sample_idx_dst = buf->data.i + root->buf_idx*buf->cols +
workVarCount*sample_count + root->offset;
for (i = 0; i < count; i++)
sample_idx_dst[i] = sample_idx_src[sidx[i]];
void CvDTreeTrainData::get_vectors( const CvMat* _subsample_idx,
float* values, uchar* missing,
- float* responses, bool get_class_idx )
+ float* _responses, bool get_class_idx )
{
CvMat* subsample_idx = 0;
CvMat* subsample_co = 0;
}
// copy responses
- if( responses )
+ if( _responses )
{
if( is_classifier )
{
int idx = sidx ? sidx[i] : i;
int val = get_class_idx ? src[idx] :
cat_map->data.i[cat_ofs->data.i[cat_var_count]+src[idx]];
- responses[i] = (float)val;
+ _responses[i] = (float)val;
}
}
else
for( i = 0; i < count; i++ )
{
int idx = sidx ? sidx[i] : i;
- responses[i] = _values[idx];
+ _responses[i] = _values[idx];
}
}
}
cvReleaseMat( &cat_map );
cvReleaseMat( &priors );
cvReleaseMat( &priors_mult );
-
+
node_heap = split_heap = 0;
sample_count = var_all = var_count = max_c_count = ord_var_count = cat_var_count = 0;
buf_count = buf_size = 0;
shared = false;
-
+
data_root = 0;
rng = &cv::theRNG();
const float** ord_values, const int** sorted_indices, int* sample_indices_buf )
{
int vidx = var_idx ? var_idx->data.i[vi] : vi;
- int node_sample_count = n->sample_count;
+ int node_sample_count = n->sample_count;
int td_step = train_data->step/CV_ELEM_SIZE(train_data->type);
const int* sample_indices = get_sample_indices(n, sample_indices_buf);
*sorted_indices = buf->data.i + n->buf_idx*buf->cols +
vi*sample_count + n->offset;
else {
- const unsigned short* short_indices = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
+ const unsigned short* short_indices = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
vi*sample_count + n->offset );
for( int i = 0; i < node_sample_count; i++ )
sorted_indices_buf[i] = short_indices[i];
*sorted_indices = sorted_indices_buf;
}
-
+
if( tflag == CV_ROW_SAMPLE )
{
- for( int i = 0; i < node_sample_count &&
+ for( int i = 0; i < node_sample_count &&
((((*sorted_indices)[i] >= 0) && !is_buf_16u) || (((*sorted_indices)[i] != 65535) && is_buf_16u)); i++ )
{
int idx = (*sorted_indices)[i];
}
}
else
- for( int i = 0; i < node_sample_count &&
+ for( int i = 0; i < node_sample_count &&
((((*sorted_indices)[i] >= 0) && !is_buf_16u) || (((*sorted_indices)[i] != 65535) && is_buf_16u)); i++ )
{
int idx = (*sorted_indices)[i];
idx = sample_indices[idx];
ord_values_buf[i] = *(train_data->data.fl + vidx* td_step + idx);
}
-
+
*ord_values = ord_values_buf;
}
const float* CvDTreeTrainData::get_ord_responses( CvDTreeNode* n, float* values_buf, int*sample_indices_buf )
{
- int sample_count = n->sample_count;
+ int _sample_count = n->sample_count;
int r_step = CV_IS_MAT_CONT(responses->type) ? 1 : responses->step/CV_ELEM_SIZE(responses->type);
const int* indices = get_sample_indices(n, sample_indices_buf);
- for( int i = 0; i < sample_count &&
+ for( int i = 0; i < _sample_count &&
(((indices[i] >= 0) && !is_buf_16u) || ((indices[i] != 65535) && is_buf_16u)); i++ )
{
int idx = indices[i];
values_buf[i] = *(responses->data.fl + idx * r_step);
}
-
+
return values_buf;
}
cat_values = buf->data.i + n->buf_idx*buf->cols +
vi*sample_count + n->offset;
else {
- const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
+ const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*buf->cols +
vi*sample_count + n->offset);
for( int i = 0; i < n->sample_count; i++ )
cat_values_buf[i] = short_values[i];
const Mat& _missing_mask, CvDTreeParams _params )
{
CvMat tdata = _train_data, responses = _responses, vidx=_var_idx,
- sidx=_sample_idx, vtype=_var_type, mmask=_missing_mask;
+ sidx=_sample_idx, vtype=_var_type, mmask=_missing_mask;
return train(&tdata, _tflag, &responses, vidx.data.ptr ? &vidx : 0, sidx.data.ptr ? &sidx : 0,
vtype.data.ptr ? &vtype : 0, mmask.data.ptr ? &mmask : 0, _params);
}
const float* val = 0;
const int* sorted = 0;
data->get_ord_var_data( node, vi, val_buf, sorted_buf, &val, &sorted, sample_idx_buf);
-
+
assert( 0 <= split_point && split_point < n1-1 );
if( !data->have_priors )
{
fastFree(obj);
}
-
+
DTreeBestSplitFinder::DTreeBestSplitFinder( CvDTree* _tree, CvDTreeNode* _node)
{
tree = _tree;
}
CvDTreeSplit* split = 0;
- if( best_subset >= 0 )
+ if( best_subset >= 0 )
{
split = _split ? _split : data->new_split_cat( 0, -1.0f );
split->var_idx = vi;
{
int idx = labels[i];
if( !dir[i] && ( ((idx >= 0)&&(!data->is_buf_16u)) || ((idx != 65535)&&(data->is_buf_16u)) ))
-
+
{
int d = CV_DTREE_CAT_DIR(idx,subset);
dir[i] = (char)((d ^ inversed_mask) - inversed_mask);
{
unsigned short *ldst, *rdst, *ldst0, *rdst0;
//unsigned short tl, tr;
- ldst0 = ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
+ ldst0 = ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
vi*scount + left->offset);
rdst0 = rdst = (unsigned short*)(ldst + nl);
else
{
int *ldst0, *ldst, *rdst0, *rdst;
- ldst0 = ldst = buf->data.i + left->buf_idx*buf->cols +
+ ldst0 = ldst = buf->data.i + left->buf_idx*buf->cols +
vi*scount + left->offset;
- rdst0 = rdst = buf->data.i + right->buf_idx*buf->cols +
+ rdst0 = rdst = buf->data.i + right->buf_idx*buf->cols +
vi*scount + right->offset;
// split sorted
{
int ci = data->get_var_type(vi);
int n1 = node->get_num_valid(vi), nr1 = 0;
-
+
if( ci < 0 || (vi < data->var_count && !split_input_data) )
continue;
if (data->is_buf_16u)
{
- unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*buf->cols +
+ unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*buf->cols +
vi*scount + left->offset);
- unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*buf->cols +
+ unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*buf->cols +
vi*scount + right->offset);
-
+
for( i = 0; i < n; i++ )
{
int d = dir[i];
}
else
{
- int *ldst = buf->data.i + left->buf_idx*buf->cols +
+ int *ldst = buf->data.i + left->buf_idx*buf->cols +
vi*scount + left->offset;
- int *rdst = buf->data.i + right->buf_idx*buf->cols +
+ int *rdst = buf->data.i + right->buf_idx*buf->cols +
vi*scount + right->offset;
-
+
for( i = 0; i < n; i++ )
{
int d = dir[i];
*ldst = idx;
ldst++;
}
-
+
}
if( vi < data->var_count )
left->set_num_valid(vi, n1 - nr1);
right->set_num_valid(vi, nr1);
}
- }
+ }
}
int pos = data->get_work_var_count();
if (data->is_buf_16u)
{
- unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
+ unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
pos*scount + left->offset);
- unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
+ unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
pos*scount + right->offset);
for (i = 0; i < n; i++)
{
}
else
{
- int* ldst = buf->data.i + left->buf_idx*buf->cols +
+ int* ldst = buf->data.i + left->buf_idx*buf->cols +
pos*scount + left->offset;
- int* rdst = buf->data.i + right->buf_idx*buf->cols +
+ int* rdst = buf->data.i + right->buf_idx*buf->cols +
pos*scount + right->offset;
for (i = 0; i < n; i++)
{
}
}
}
-
+
// deallocate the parent node data that is not needed anymore
- data->free_node_data(node);
+ data->free_node_data(node);
}
float CvDTree::calc_error( CvMLData* _data, int type, vector<float> *resp )
{
CvMat sample, miss;
int si = sidx ? sidx[i] : i;
- cvGetRow( values, &sample, si );
- if( missing )
- cvGetRow( missing, &miss, si );
+ cvGetRow( values, &sample, si );
+ if( missing )
+ cvGetRow( missing, &miss, si );
float r = (float)predict( &sample, missing ? &miss : 0 )->value;
if( pred_resp )
pred_resp[i] = r;
{
CvMat sample, miss;
int si = sidx ? sidx[i] : i;
- cvGetRow( values, &sample, si );
- if( missing )
- cvGetRow( missing, &miss, si );
+ cvGetRow( values, &sample, si );
+ if( missing )
+ cvGetRow( missing, &miss, si );
float r = (float)predict( &sample, missing ? &miss : 0 )->value;
if( pred_resp )
pred_resp[i] = r;
float d = r - response->data.fl[si*r_step];
err += d*d;
}
- err = sample_count ? err / (float)sample_count : -FLT_MAX;
+ err = sample_count ? err / (float)sample_count : -FLT_MAX;
}
return err;
}
}
-void CvDTree::free_prune_data(bool cut_tree)
+void CvDTree::free_prune_data(bool _cut_tree)
{
CvDTreeNode* node = root;
for( parent = node->parent; parent && parent->right == node;
node = parent, parent = parent->parent )
{
- if( cut_tree && parent->Tn <= pruned_tree_idx )
+ if( _cut_tree && parent->Tn <= pruned_tree_idx )
{
data->free_node( parent->left );
data->free_node( parent->right );
{
int a = c = cofs[ci];
int b = (ci+1 >= data->cat_ofs->cols) ? data->cat_map->cols : cofs[ci+1];
-
+
int ival = cvRound(val);
if( ival != val )
CV_Error( CV_StsBadArg,
"one of input categorical variable is not an integer" );
-
+
int sh = 0;
while( a < b )
{
CV_Assert( _means.rows == (int)sizes.size() && covs.size() == sizes.size() );
CV_Assert( !data.empty() && data.rows == total );
CV_Assert( data.type() == dataType );
-
+
labels.create( data.rows, 1, labelType );
randn( data, Scalar::all(-1.0), Scalar::all(1.0) );
for( int i = bi; i < ei; i++, p++ )
{
Mat r = data.row(i);
- r = r * (*cit) + *mit;
+ r = r * (*cit) + *mit;
if( labelType == CV_32FC1 )
labels.at<float>(p, 0) = (float)l;
else if( labelType == CV_32SC1 )
const int iters = 100;
int sizesArr[] = { 5000, 7000, 8000 };
int pointsCount = sizesArr[0]+ sizesArr[1] + sizesArr[2];
-
+
Mat data( pointsCount, 2, CV_32FC1 ), labels;
vector<int> sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) );
Mat means;
vector<Mat> covs;
defaultDistribs( means, covs );
generateData( data, labels, sizes, means, covs, CV_32FC1, CV_32SC1 );
-
+
int code = cvtest::TS::OK;
float err;
Mat bestLabels;
class EM_Params
{
public:
- EM_Params(int nclusters=10, int covMatType=EM::COV_MAT_DIAGONAL, int startStep=EM::START_AUTO_STEP,
- const cv::TermCriteria& termCrit=cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, FLT_EPSILON),
- const cv::Mat* probs=0, const cv::Mat* weights=0,
- const cv::Mat* means=0, const std::vector<cv::Mat>* covs=0)
- : nclusters(nclusters), covMatType(covMatType), startStep(startStep),
- probs(probs), weights(weights), means(means), covs(covs), termCrit(termCrit)
+ EM_Params(int _nclusters=10, int _covMatType=EM::COV_MAT_DIAGONAL, int _startStep=EM::START_AUTO_STEP,
+ const cv::TermCriteria& _termCrit=cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 100, FLT_EPSILON),
+ const cv::Mat* _probs=0, const cv::Mat* _weights=0,
+ const cv::Mat* _means=0, const std::vector<cv::Mat>* _covs=0)
+ : nclusters(_nclusters), covMatType(_covMatType), startStep(_startStep),
+ probs(_probs), weights(_weights), means(_means), covs(_covs), termCrit(_termCrit)
{}
-
+
int nclusters;
int covMatType;
int startStep;
-
+
// all 4 following matrices should have type CV_32FC1
const cv::Mat* probs;
const cv::Mat* weights;
const cv::Mat* means;
const std::vector<cv::Mat>* covs;
-
+
cv::TermCriteria termCrit;
};
int currCode = runCase(caseIndex++, params, trainData, trainLabels, testData, testLabels, sizes);
code = currCode == cvtest::TS::OK ? code : currCode;
}
-
+
ts->set_failed_test_info( code );
}
{
public:
CV_GBTreesTest();
- ~CV_GBTreesTest();
-
+ ~CV_GBTreesTest();
+
protected:
void run(int);
int TestSaveLoad();
int checkPredictError(int test_num);
- int checkLoadSave();
-
+ int checkLoadSave();
+
string model_file_name1;
string model_file_name2;
string* datasets;
string data_path;
-
+
CvMLData* data;
CvGBTrees* gtb;
-
+
vector<float> test_resps1;
vector<float> test_resps2;
- int64 initSeed;
+ int64 initSeed;
};
CV_GBTreesTest::CV_GBTreesTest()
{
- int64 seeds[] = { CV_BIG_INT(0x00009fff4f9c8d52),
+ int64 seeds[] = { CV_BIG_INT(0x00009fff4f9c8d52),
CV_BIG_INT(0x0000a17166072c7c),
CV_BIG_INT(0x0201b32115cd1f9a),
CV_BIG_INT(0x0513cb37abcd1234),
};
int seedCount = sizeof(seeds)/sizeof(seeds[0]);
- cv::RNG& rng = cv::theRNG();
+ cv::RNG& rng = cv::theRNG();
initSeed = rng.state;
rng.state = seeds[rng(seedCount)];
if (data)
delete data;
delete[] datasets;
- cv::theRNG().state = initSeed;
+ cv::theRNG().state = initSeed;
}
int CV_GBTreesTest::TestTrainPredict(int test_num)
{
int code = cvtest::TS::OK;
-
+
int weak_count = 200;
float shrinkage = 0.1f;
float subsample_portion = 0.5f;
case (2) : loss_function_type = CvGBTrees::ABSOLUTE_LOSS; break;
case (3) : loss_function_type = CvGBTrees::HUBER_LOSS; break;
case (0) : loss_function_type = CvGBTrees::DEVIANCE_LOSS; break;
- default :
+ default :
{
ts->printf( cvtest::TS::LOG, "Bad test_num value in CV_GBTreesTest::TestTrainPredict(..) function." );
return cvtest::TS::FAIL_BAD_ARG_CHECK;
{
data = new CvMLData();
data->set_delimiter(',');
-
+
if (data->read_csv(datasets[dataset_num].c_str()))
{
ts->printf( cvtest::TS::LOG, "File reading error." );
CvTrainTestSplit spl( train_sample_count );
data->set_train_test_split( &spl );
}
-
- data->mix_train_and_test_idx();
-
-
+
+ data->mix_train_and_test_idx();
+
+
if (gtb) delete gtb;
gtb = new CvGBTrees();
bool tmp_code = true;
tmp_code = gtb->train(data, CvGBTreesParams(loss_function_type, weak_count,
shrinkage, subsample_portion,
max_depth, use_surrogates));
-
+
if (!tmp_code)
{
ts->printf( cvtest::TS::LOG, "Model training was failed.");
return cvtest::TS::FAIL_INVALID_OUTPUT;
}
-
+
code = checkPredictError(test_num);
-
+
return code;
}
{
if (!gtb)
return cvtest::TS::FAIL_GENERIC;
-
+
//float mean[] = {5.430247f, 13.5654f, 12.6569f, 13.1661f};
//float sigma[] = {0.4162694f, 3.21161f, 3.43297f, 3.00624f};
- float mean[] = {5.80226f, 12.68689f, 13.49095f, 13.19628f};
+ float mean[] = {5.80226f, 12.68689f, 13.49095f, 13.19628f};
float sigma[] = {0.4764534f, 3.166919f, 3.022405f, 2.868722f};
-
+
float current_error = gtb->calc_error(data, CV_TEST_ERROR);
-
+
if ( abs( current_error - mean[test_num]) > 6*sigma[test_num] )
{
ts->printf( cvtest::TS::LOG, "Test error is out of range:\n"
{
if (!gtb)
return cvtest::TS::FAIL_GENERIC;
-
+
model_file_name1 = cv::tempfile();
model_file_name2 = cv::tempfile();
gtb->load(model_file_name1.c_str());
gtb->calc_error(data, CV_TEST_ERROR, &test_resps2);
gtb->save(model_file_name2.c_str());
-
+
return checkLoadSave();
-
+
}
// 1. compare files
ifstream f1( model_file_name1.c_str() ), f2( model_file_name2.c_str() );
string s1, s2;
- int lineIdx = 0;
+ int lineIdx = 0;
CV_Assert( f1.is_open() && f2.is_open() );
for( ; !f1.eof() && !f2.eof(); lineIdx++ )
{
void CV_GBTreesTest::run(int)
{
- string data_path = string(ts->get_data_path());
+ string dataPath = string(ts->get_data_path());
datasets = new string[2];
- datasets[0] = data_path + string("spambase.data"); /*string("dataset_classification.csv");*/
- datasets[1] = data_path + string("housing_.data"); /*string("dataset_regression.csv");*/
+ datasets[0] = dataPath + string("spambase.data"); /*string("dataset_classification.csv");*/
+ datasets[1] = dataPath + string("housing_.data"); /*string("dataset_regression.csv");*/
int code = cvtest::TS::OK;
for (int i = 0; i < 4; i++)
{
-
+
int temp_code = TestTrainPredict(i);
if (temp_code != cvtest::TS::OK)
{
code = temp_code;
break;
}
-
+
else if (i==0)
{
temp_code = TestSaveLoad();
delete data;
data = 0;
}
-
+
delete gtb;
gtb = 0;
}
delete data;
data = 0;
-
+
ts->set_failed_test_info( code );
}
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/highgui/highgui.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
-#endif
-
-#ifdef HAVE_CVCONFIG_H
+#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
/**********************************************************************************************\
Implementation of SIFT is based on the code from http://blogs.oregonstate.edu/hess/code/sift/
Below is the original copyright.
-
+
// Copyright (c) 2006-2010, Rob Hess <hess@eecs.oregonstate.edu>
// All rights reserved.
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\**********************************************************************************************/
-
+
#include "precomp.hpp"
#include <iostream>
#include <stdarg.h>
// factor used to convert floating-point descriptor to unsigned char
static const float SIFT_INT_DESCR_FCTR = 512.f;
-
+
static const int SIFT_FIXPT_SCALE = 48;
-
-
+
+
static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma )
{
Mat gray, gray_fpt;
else
img.copyTo(gray);
gray.convertTo(gray_fpt, CV_16S, SIFT_FIXPT_SCALE, 0);
-
+
float sig_diff;
-
+
if( doubleImageSize )
{
sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
return gray_fpt;
}
}
-
-
+
+
void SIFT::buildGaussianPyramid( const Mat& base, vector<Mat>& pyr, int nOctaves ) const
{
vector<double> sig(nOctaveLayers + 3);
pyr.resize(nOctaves*(nOctaveLayers + 3));
-
+
// precompute Gaussian sigmas using the following formula:
// \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
sig[0] = sigma;
double sig_total = sig_prev*k;
sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev);
}
-
+
for( int o = 0; o < nOctaves; o++ )
{
for( int i = 0; i < nOctaveLayers + 3; i++ )
{
int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
dogpyr.resize( nOctaves*(nOctaveLayers + 2) );
-
+
for( int o = 0; o < nOctaves; o++ )
{
for( int i = 0; i < nOctaveLayers + 2; i++ )
}
}
-
+
// Computes a gradient orientation histogram at a specified pixel
static float calcOrientationHist( const Mat& img, Point pt, int radius,
float sigma, float* hist, int n )
{
int i, j, k, len = (radius*2+1)*(radius*2+1);
-
+
float expf_scale = -1.f/(2.f * sigma * sigma);
AutoBuffer<float> buf(len*4 + n+4);
float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
float* temphist = W + len + 2;
-
+
for( i = 0; i < n; i++ )
temphist[i] = 0.f;
-
+
for( i = -radius, k = 0; i <= radius; i++ )
{
int y = pt.y + i;
int x = pt.x + j;
if( x <= 0 || x >= img.cols - 1 )
continue;
-
+
float dx = (float)(img.at<short>(y, x+1) - img.at<short>(y, x-1));
float dy = (float)(img.at<short>(y-1, x) - img.at<short>(y+1, x));
-
+
X[k] = dx; Y[k] = dy; W[k] = (i*i + j*j)*expf_scale;
k++;
}
}
-
+
len = k;
-
+
// compute gradient values, orientations and the weights over the pixel neighborhood
exp(W, W, len);
fastAtan2(Y, X, Ori, len, true);
magnitude(X, Y, Mag, len);
-
+
for( k = 0; k < len; k++ )
{
int bin = cvRound((n/360.f)*Ori[k]);
bin += n;
temphist[bin] += W[k]*Mag[k];
}
-
+
// smooth the histogram
temphist[-1] = temphist[n-1];
temphist[-2] = temphist[n-2];
(temphist[i-1] + temphist[i+1])*(4.f/16.f) +
temphist[i]*(6.f/16.f);
}
-
+
float maxval = hist[0];
for( i = 1; i < n; i++ )
maxval = std::max(maxval, hist[i]);
-
+
return maxval;
}
-
+
//
// Interpolates a scale-space extremum's location and scale to subpixel
// accuracy to form an image feature. Rejects features with low contrast.
-// Based on Section 4 of Lowe's paper.
+// Based on Section 4 of Lowe's paper.
static bool adjustLocalExtrema( const vector<Mat>& dog_pyr, KeyPoint& kpt, int octv,
int& layer, int& r, int& c, int nOctaveLayers,
float contrastThreshold, float edgeThreshold, float sigma )
const float deriv_scale = img_scale*0.5f;
const float second_deriv_scale = img_scale;
const float cross_deriv_scale = img_scale*0.25f;
-
+
float xi=0, xr=0, xc=0, contr;
int i = 0;
-
+
for( ; i < SIFT_MAX_INTERP_STEPS; i++ )
{
int idx = octv*(nOctaveLayers+2) + layer;
const Mat& img = dog_pyr[idx];
const Mat& prev = dog_pyr[idx-1];
const Mat& next = dog_pyr[idx+1];
-
+
Vec3f dD((img.at<short>(r, c+1) - img.at<short>(r, c-1))*deriv_scale,
(img.at<short>(r+1, c) - img.at<short>(r-1, c))*deriv_scale,
(next.at<short>(r, c) - prev.at<short>(r, c))*deriv_scale);
-
+
float v2 = (float)img.at<short>(r, c)*2;
float dxx = (img.at<short>(r, c+1) + img.at<short>(r, c-1) - v2)*second_deriv_scale;
float dyy = (img.at<short>(r+1, c) + img.at<short>(r-1, c) - v2)*second_deriv_scale;
prev.at<short>(r, c+1) + prev.at<short>(r, c-1))*cross_deriv_scale;
float dys = (next.at<short>(r+1, c) - next.at<short>(r-1, c) -
prev.at<short>(r+1, c) + prev.at<short>(r-1, c))*cross_deriv_scale;
-
+
Matx33f H(dxx, dxy, dxs,
dxy, dyy, dys,
dxs, dys, dss);
-
+
Vec3f X = H.solve(dD, DECOMP_LU);
-
+
xi = -X[2];
xr = -X[1];
xc = -X[0];
-
+
if( std::abs( xi ) < 0.5f && std::abs( xr ) < 0.5f && std::abs( xc ) < 0.5f )
break;
-
+
c += cvRound( xc );
r += cvRound( xr );
layer += cvRound( xi );
-
+
if( layer < 1 || layer > nOctaveLayers ||
c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER ||
r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER )
return false;
}
-
+
/* ensure convergence of interpolation */
if( i >= SIFT_MAX_INTERP_STEPS )
return false;
-
+
{
int idx = octv*(nOctaveLayers+2) + layer;
const Mat& img = dog_pyr[idx];
(img.at<short>(r+1, c) - img.at<short>(r-1, c))*deriv_scale,
(next.at<short>(r, c) - prev.at<short>(r, c))*deriv_scale);
float t = dD.dot(Matx31f(xc, xr, xi));
-
+
contr = img.at<short>(r, c)*img_scale + t * 0.5f;
if( std::abs( contr ) * nOctaveLayers < contrastThreshold )
return false;
-
+
/* principal curvatures are computed using the trace and det of Hessian */
float v2 = img.at<short>(r, c)*2.f;
float dxx = (img.at<short>(r, c+1) + img.at<short>(r, c-1) - v2)*second_deriv_scale;
img.at<short>(r-1, c+1) + img.at<short>(r-1, c-1)) * cross_deriv_scale;
float tr = dxx + dyy;
float det = dxx * dyy - dxy * dxy;
-
+
if( det <= 0 || tr*tr*edgeThreshold >= (edgeThreshold + 1)*(edgeThreshold + 1)*det )
return false;
}
-
+
kpt.pt.x = (c + xc) * (1 << octv);
kpt.pt.y = (r + xr) * (1 << octv);
kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16);
kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2;
-
+
return true;
}
-
-
+
+
//
// Detects features at extrema in DoG scale space. Bad features are discarded
// based on contrast and ratio of principal curvatures.
const int n = SIFT_ORI_HIST_BINS;
float hist[n];
KeyPoint kpt;
-
+
keypoints.clear();
-
+
for( int o = 0; o < nOctaves; o++ )
for( int i = 1; i <= nOctaveLayers; i++ )
{
const Mat& next = dog_pyr[idx+1];
int step = (int)img.step1();
int rows = img.rows, cols = img.cols;
-
+
for( int r = SIFT_IMG_BORDER; r < rows-SIFT_IMG_BORDER; r++)
{
const short* currptr = img.ptr<short>(r);
const short* prevptr = prev.ptr<short>(r);
const short* nextptr = next.ptr<short>(r);
-
+
for( int c = SIFT_IMG_BORDER; c < cols-SIFT_IMG_BORDER; c++)
{
int val = currptr[c];
-
+
// find local extrema with pixel accuracy
if( std::abs(val) > threshold &&
((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
for( int j = 0; j < n; j++ )
{
int l = j > 0 ? j - 1 : n - 1;
- int r = j < n-1 ? j + 1 : 0;
-
- if( hist[j] > hist[l] && hist[j] > hist[r] && hist[j] >= mag_thr )
+ int r2 = j < n-1 ? j + 1 : 0;
+
+ if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr )
{
- float bin = j + 0.5f * (hist[l]-hist[r]) / (hist[l] - 2*hist[j] + hist[r]);
+ float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
kpt.angle = (float)((360.f/n) * bin);
keypoints.push_back(kpt);
}
}
}
-}
-
-
+}
+
+
static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float scl,
int d, int n, float* dst )
{
int radius = cvRound(hist_width * 1.4142135623730951f * (d + 1) * 0.5f);
cos_t /= hist_width;
sin_t /= hist_width;
-
+
int i, j, k, len = (radius*2+1)*(radius*2+1), histlen = (d+2)*(d+2)*(n+2);
int rows = img.rows, cols = img.cols;
-
+
AutoBuffer<float> buf(len*6 + histlen);
float *X = buf, *Y = X + len, *Mag = Y, *Ori = Mag + len, *W = Ori + len;
float *RBin = W + len, *CBin = RBin + len, *hist = CBin + len;
-
+
for( i = 0; i < d+2; i++ )
{
for( j = 0; j < d+2; j++ )
for( k = 0; k < n+2; k++ )
hist[(i*(d+2) + j)*(n+2) + k] = 0.;
}
-
+
for( i = -radius, k = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
float rbin = r_rot + d/2 - 0.5f;
float cbin = c_rot + d/2 - 0.5f;
int r = pt.y + i, c = pt.x + j;
-
+
if( rbin > -1 && rbin < d && cbin > -1 && cbin < d &&
r > 0 && r < rows - 1 && c > 0 && c < cols - 1 )
{
k++;
}
}
-
+
len = k;
fastAtan2(Y, X, Ori, len, true);
magnitude(X, Y, Mag, len);
exp(W, W, len);
-
+
for( k = 0; k < len; k++ )
{
float rbin = RBin[k], cbin = CBin[k];
float obin = (Ori[k] - ori)*bins_per_rad;
float mag = Mag[k]*W[k];
-
+
int r0 = cvFloor( rbin );
int c0 = cvFloor( cbin );
int o0 = cvFloor( obin );
rbin -= r0;
cbin -= c0;
obin -= o0;
-
+
if( o0 < 0 )
o0 += n;
if( o0 >= n )
o0 -= n;
-
+
// histogram update using tri-linear interpolation
float v_r1 = mag*rbin, v_r0 = mag - v_r1;
float v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11;
float v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101;
float v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011;
float v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001;
-
+
int idx = ((r0+1)*(d+2) + c0+1)*(n+2) + o0;
hist[idx] += v_rco000;
hist[idx+1] += v_rco001;
hist[idx+(d+3)*(n+2)] += v_rco110;
hist[idx+(d+3)*(n+2)+1] += v_rco111;
}
-
+
// finalize histogram, since the orientation histograms are circular
for( i = 0; i < d; i++ )
for( j = 0; j < d; j++ )
dst[k] = saturate_cast<uchar>(dst[k]*nrm2);
}
}
-
+
static void calcDescriptors(const vector<Mat>& gpyr, const vector<KeyPoint>& keypoints,
Mat& descriptors, int nOctaveLayers )
{
int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
-
+
for( size_t i = 0; i < keypoints.size(); i++ )
{
KeyPoint kpt = keypoints[i];
float size=kpt.size*scale;
Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale);
const Mat& img = gpyr[octv*(nOctaveLayers + 3) + layer];
-
+
calcSIFTDescriptor(img, ptf, kpt.angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
}
}
bool useProvidedKeypoints) const
{
Mat image = _image.getMat(), mask = _mask.getMat();
-
+
if( image.empty() || image.depth() != CV_8U )
CV_Error( CV_StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );
-
+
if( !mask.empty() && mask.type() != CV_8UC1 )
CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" );
-
+
Mat base = createInitialImage(image, false, (float)sigma);
vector<Mat> gpyr, dogpyr;
int nOctaves = cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2);
-
+
//double t, tf = getTickFrequency();
//t = (double)getTickCount();
buildGaussianPyramid(base, gpyr, nOctaves);
buildDoGPyramid(gpyr, dogpyr);
-
+
//t = (double)getTickCount() - t;
//printf("pyramid construction time: %g\n", t*1000./tf);
-
+
if( !useProvidedKeypoints )
{
//t = (double)getTickCount();
findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
KeyPointsFilter::removeDuplicated( keypoints );
-
+
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
-
+
if( nfeatures > 0 )
KeyPointsFilter::retainBest(keypoints, nfeatures);
//t = (double)getTickCount() - t;
// filter keypoints by mask
//KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
-
+
if( _descriptors.needed() )
{
//t = (double)getTickCount();
int dsize = descriptorSize();
_descriptors.create((int)keypoints.size(), dsize, CV_32F);
Mat descriptors = _descriptors.getMat();
-
+
calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers);
//t = (double)getTickCount() - t;
//printf("descriptor extraction time: %g\n", t*1000./tf);
void SIFT::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
{
(*this)(image, mask, keypoints, noArray());
-}
-
+}
+
void SIFT::computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
- * Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- * The name of Contributor may not be used to endorse or
- * promote products derived from this software without
- * specific prior written permission.
+ * Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ * The name of Contributor may not be used to endorse or
+ * promote products derived from this software without
+ * specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* OF SUCH DAMAGE.
*/
-/*
+/*
The following changes have been made, comparing to the original contribution:
1. A lot of small optimizations, less memory allocations, got rid of global buffers
2. Reversed order of cvGetQuadrangleSubPix and cvResize calls; probably less accurate, but much faster
The extraction of the patch of pixels surrounding a keypoint used to build a
descriptor has been simplified.
-KeyPoint descriptor normalisation has been changed from normalising each 4x4
-cell (resulting in a descriptor of magnitude 16) to normalising the entire
+KeyPoint descriptor normalisation has been changed from normalising each 4x4
+cell (resulting in a descriptor of magnitude 16) to normalising the entire
descriptor to magnitude 1.
The default number of octaves has been increased from 3 to 4 to match the
the higher octaves are sampled sparsely.
The default number of layers per octave has been reduced from 3 to 2, to prevent
-redundant calculation of similar sizes in consecutive octaves. This decreases
-computation time. The number of features extracted may be less, however the
+redundant calculation of similar sizes in consecutive octaves. This decreases
+computation time. The number of features extracted may be less, however the
additional features were mostly redundant.
The radius of the circle of gradient samples used to assign an orientation has
-been increased from 4 to 6 to match the description in the SURF paper. This is
+been increased from 4 to 6 to match the description in the SURF paper. This is
now defined by ORI_RADIUS, and could be made into a parameter.
The size of the sliding window used in orientation assignment has been reduced
from 120 to 60 degrees to match the description in the SURF paper. This is now
defined by ORI_WIN, and could be made into a parameter.
-Other options like HAAR_SIZE0, HAAR_SIZE_INC, SAMPLE_STEP0, ORI_SEARCH_INC,
-ORI_SIGMA and DESC_SIGMA have been separated from the code and documented.
+Other options like HAAR_SIZE0, HAAR_SIZE_INC, SAMPLE_STEP0, ORI_SEARCH_INC,
+ORI_SIGMA and DESC_SIGMA have been separated from the code and documented.
These could also be made into parameters.
Modifications by Ian Mahon
// This ensures that when looking for the neighbours of a sample, the layers
// above and below are aligned correctly.
static const int SURF_HAAR_SIZE_INC = 6;
-
-
+
+
struct SurfHF
{
int p0, p1, p2, p3;
float w;
+
+ SurfHF(): p0(0), p1(0), p2(0), p3(0), w(0) {}
};
inline float calcHaarPattern( const int* origin, const SurfHF* f, int n )
* Maxima location interpolation as described in "Invariant Features from
* Interest Point Groups" by Matthew Brown and David Lowe. This is performed by
* fitting a 3D quadratic to a set of neighbouring samples.
- *
- * The gradient vector and Hessian matrix at the initial keypoint location are
+ *
+ * The gradient vector and Hessian matrix at the initial keypoint location are
* approximated using central differences. The linear system Ax = b is then
- * solved, where A is the Hessian, b is the negative gradient, and x is the
+ * solved, where A is the Hessian, b is the negative gradient, and x is the
* offset of the interpolated maxima coordinates from the initial estimate.
* This is equivalent to an iteration of Netwon's optimisation algorithm.
*
N9[1][3]-2*N9[1][4]+N9[1][5], // 2nd deriv x, x
(N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4, // 2nd deriv x, y
(N9[2][5]-N9[2][3]-N9[0][5]+N9[0][3])/4, // 2nd deriv x, s
- (N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4, // 2nd deriv x, y
- N9[1][1]-2*N9[1][4]+N9[1][7], // 2nd deriv y, y
- (N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4, // 2nd deriv y, s
+ (N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4, // 2nd deriv x, y
+ N9[1][1]-2*N9[1][4]+N9[1][7], // 2nd deriv y, y
+ (N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4, // 2nd deriv y, s
(N9[2][5]-N9[2][3]-N9[0][5]+N9[0][3])/4, // 2nd deriv x, s
(N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4, // 2nd deriv y, s
N9[0][4]-2*N9[1][4]+N9[2][4]); // 2nd deriv s, s
Vec3f x = A.solve(b, DECOMP_LU);
-
+
bool ok = (x[0] != 0 || x[1] != 0 || x[2] != 0) &&
std::abs(x[0]) <= 1 && std::abs(x[1]) <= 1 && std::abs(x[2]) <= 1;
-
+
if( ok )
{
kpt.pt.x += x[0]*dx;
{
int layer = (*middleIndices)[i];
int octave = i / nOctaveLayers;
- findMaximaInLayer( *sum, *mask_sum, *dets, *traces, *sizes,
+ findMaximaInLayer( *sum, *mask_sum, *dets, *traces, *sizes,
*keypoints, octave, layer, hessianThreshold,
(*sampleSteps)[layer] );
}
}
};
-
+
static void fastHessianDetector( const Mat& sum, const Mat& mask_sum, vector<KeyPoint>& keypoints,
int nOctaves, int nOctaveLayers, float hessianThreshold )
{
// Allocate space and calculate properties of each layer
int index = 0, middleIndex = 0, step = SAMPLE_STEP0;
-
+
for( int octave = 0; octave < nOctaves; octave++ )
{
for( int layer = 0; layer < nOctaveLayers+2; layer++ )
const int dx_s[NX][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
const int dy_s[NY][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
- // Optimisation is better using nOriSampleBound than nOriSamples for
+ // Optimisation is better using nOriSampleBound than nOriSamples for
// array lengths. Maybe because it is a constant known at compile time
const int nOriSampleBound =(2*ORI_RADIUS+1)*(2*ORI_RADIUS+1);
Mat _patch(PATCH_SZ+1, PATCH_SZ+1, CV_8U, PATCH);
int dsize = extended ? 128 : 64;
-
+
int k, k1 = range.begin(), k2 = range.end();
float maxSize = 0;
for( k = k1; k < k2; k++ )
Ptr<CvMat> winbuf = cvCreateMat( 1, imaxSize*imaxSize, CV_8U );
for( k = k1; k < k2; k++ )
{
- int i, j, kk, x, y, nangle;
+ int i, j, kk, nangle;
float* vec;
SurfHF dx_t[NX], dy_t[NY];
KeyPoint& kp = (*keypoints)[k];
float s = size*1.2f/9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
- We ensure the gradient wavelet size is even to ensure the
+ We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
int grad_wav_size = 2*cvRound( 2*s );
if( sum->rows < grad_wav_size || sum->cols < grad_wav_size )
resizeHaarPattern( dy_s, dy_t, NY, 4, grad_wav_size, sum->cols );
for( kk = 0, nangle = 0; kk < nOriSamples; kk++ )
{
- x = cvRound( center.x + apt[kk].x*s - (float)(grad_wav_size-1)/2 );
- y = cvRound( center.y + apt[kk].y*s - (float)(grad_wav_size-1)/2 );
+ int x = cvRound( center.x + apt[kk].x*s - (float)(grad_wav_size-1)/2 );
+ int y = cvRound( center.y + apt[kk].y*s - (float)(grad_wav_size-1)/2 );
if( y < 0 || y >= sum->rows - grad_wav_size ||
x < 0 || x >= sum->cols - grad_wav_size )
continue;
kp.angle = descriptor_dir;
if( !descriptors || !descriptors->data )
continue;
-
+
/* Extract a window of pixels around the keypoint of size 20s */
int win_size = (int)((PATCH_SZ+1)*s);
CV_Assert( winbuf->cols >= win_size*win_size );
if( !upright )
{
- descriptor_dir *= (float)(CV_PI/180);
+ descriptor_dir *= (float)(CV_PI/180);
float sin_dir = std::sin(descriptor_dir);
float cos_dir = std::cos(descriptor_dir);
/* Subpixel interpolation version (slower). Subpixel not required since
the pixels will all get averaged when we scale down to 20 pixels */
- /*
+ /*
float w[] = { cos_dir, sin_dir, center.x,
-sin_dir, cos_dir , center.y };
CvMat W = cvMat(2, 3, CV_32F, w);
else
{
// extract rect - slightly optimized version of the code above
- // TODO: find faster code, as this is simply an extract rect operation,
+ // TODO: find faster code, as this is simply an extract rect operation,
// e.g. by using cvGetSubRect, problem is the border processing
// descriptor_dir == 90 grad
// sin_dir == 1
// cos_dir == 0
-
+
float win_offset = -(float)(win_size-1)/2;
int start_x = cvRound(center.x + win_offset);
int start_y = cvRound(center.y - win_offset);
int pixel_y = start_y;
for( j = 0; j < win_size; j++, pixel_y-- )
{
- x = MAX( pixel_x, 0 );
- y = MAX( pixel_y, 0 );
+ int x = MAX( pixel_x, 0 );
+ int y = MAX( pixel_y, 0 );
x = MIN( x, img->cols-1 );
y = MIN( y, img->rows-1 );
WIN[i*win_size + j] = img->at<uchar>(y, x);
}
- }
+ }
}
// Scale the window to size PATCH_SZ so each pixel's size is s. This
// makes calculating the gradients with wavelets of size 2s easy
for( i = 0; i < 4; i++ )
for( j = 0; j < 4; j++ )
{
- for( y = i*5; y < i*5+5; y++ )
+ for(int y = i*5; y < i*5+5; y++ )
{
- for( x = j*5; x < j*5+5; x++ )
+ for(int x = j*5; x < j*5+5; x++ )
{
float tx = DX[y][x], ty = DY[y][x];
if( ty >= 0 )
for( i = 0; i < 4; i++ )
for( j = 0; j < 4; j++ )
{
- for( y = i*5; y < i*5+5; y++ )
+ for(int y = i*5; y < i*5+5; y++ )
{
- for( x = j*5; x < j*5+5; x++ )
+ for(int x = j*5; x < j*5+5; x++ )
{
float tx = DX[y][x], ty = DY[y][x];
vec[0] += tx; vec[1] += ty;
{
(*this)(imgarg, maskarg, keypoints, noArray(), false);
}
-
+
void SURF::operator()(InputArray _img, InputArray _mask,
CV_OUT vector<KeyPoint>& keypoints,
OutputArray _descriptors,
{
Mat img = _img.getMat(), mask = _mask.getMat(), mask1, sum, msum;
bool doDescriptors = _descriptors.needed();
-
+
CV_Assert(!img.empty() && img.depth() == CV_8U);
if( img.channels() > 1 )
cvtColor(img, img, COLOR_BGR2GRAY);
-
+
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.size() == img.size()));
CV_Assert(hessianThreshold >= 0);
CV_Assert(nOctaves > 0);
CV_Assert(nOctaveLayers > 0);
-
+
integral(img, sum, CV_32S);
-
+
// Compute keypoints only if we are not asked for evaluating the descriptors are some given locations:
if( !useProvidedKeypoints )
{
}
fastHessianDetector( sum, msum, keypoints, nOctaves, nOctaveLayers, (float)hessianThreshold );
}
-
+
int i, j, N = (int)keypoints.size();
if( N > 0 )
{
bool _1d = false;
int dcols = extended ? 128 : 64;
size_t dsize = dcols*sizeof(float);
-
+
if( doDescriptors )
{
_1d = _descriptors.kind() == _InputArray::STD_VECTOR && _descriptors.type() == CV_32F;
descriptors = _descriptors.getMat();
}
}
-
+
// we call SURFInvoker in any case, even if we do not need descriptors,
// since it computes orientation of each feature.
parallel_for(BlockedRange(0, N), SURFInvoker(img, sum, keypoints, descriptors, extended, upright) );
-
+
// remove keypoints that were marked for deletion
for( i = j = 0; i < N; i++ )
{
void SURF::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
{
(*this)(image, mask, keypoints, noArray(), false);
-}
+}
void SURF::computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
{
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
int label; ///< Quantization
Feature() : x(0), y(0), label(0) {}
- Feature(int x, int y, int label) : x(x), y(y), label(label) {}
+ Feature(int x, int y, int label);
void read(const FileNode& fn);
void write(FileStorage& fs) const;
};
+inline Feature::Feature(int _x, int _y, int _label) : x(_x), y(_y), label(_label) {}
+
struct CV_EXPORTS Template
{
int width;
/// Candidate feature with a score
struct Candidate
{
- Candidate(int x, int y, int label, float score)
- : f(x, y, label), score(score)
- {
- }
+ Candidate(int x, int y, int label, float score);
/// Sort candidates with high score to the front
bool operator<(const Candidate& rhs) const
size_t num_features, float distance);
};
+inline QuantizedPyramid::Candidate::Candidate(int x, int y, int label, float _score) : f(x, y, label), score(_score) {}
+
/**
* \brief Interface for modalities that plug into the LINE template matching representation.
*
{
}
- Match(int x, int y, float similarity, const std::string& class_id, int template_id)
- : x(x), y(y), similarity(similarity), class_id(class_id), template_id(template_id)
- {
- }
+ Match(int x, int y, float similarity, const std::string& class_id, int template_id);
/// Sort matches with high similarity to the front
bool operator<(const Match& rhs) const
int template_id;
};
+inline Match::Match(int _x, int _y, float _similarity, const std::string& _class_id, int _template_id)
+ : x(_x), y(_y), similarity(_similarity), class_id(_class_id), template_id(_template_id)
+ {
+ }
+
/**
* \brief Object detector using the LINE template matching algorithm with any set of
* modalities.
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
namespace cv
{
-
+
// class for grouping object candidates, detected by Cascade Classifier, HOG etc.
// instance of the class is to be passed to cv::partition (see cxoperations.hpp)
class CV_EXPORTS SimilarRects
{
-public:
+public:
SimilarRects(double _eps) : eps(_eps) {}
inline bool operator()(const Rect& r1, const Rect& r2) const
{
std::abs(r1.y + r1.height - r2.y - r2.height) <= delta;
}
double eps;
-};
-
+};
+
void groupRectangles(vector<Rect>& rectList, int groupThreshold, double eps, vector<int>* weights, vector<double>* levelWeights)
{
}
return;
}
-
+
vector<int> labels;
int nclasses = partition(rectList, labels, SimilarRects(eps));
-
+
vector<Rect> rrects(nclasses);
vector<int> rweights(nclasses, 0);
- vector<int> rejectLevels(nclasses, 0);
+ vector<int> rejectLevels(nclasses, 0);
vector<double> rejectWeights(nclasses, DBL_MIN);
int i, j, nlabels = (int)labels.size();
for( i = 0; i < nlabels; i++ )
rweights[cls]++;
}
if ( levelWeights && weights && !weights->empty() && !levelWeights->empty() )
- {
- for( i = 0; i < nlabels; i++ )
- {
- int cls = labels[i];
+ {
+ for( i = 0; i < nlabels; i++ )
+ {
+ int cls = labels[i];
if( (*weights)[i] > rejectLevels[cls] )
{
rejectLevels[cls] = (*weights)[i];
}
else if( ( (*weights)[i] == rejectLevels[cls] ) && ( (*levelWeights)[i] > rejectWeights[cls] ) )
rejectWeights[cls] = (*levelWeights)[i];
- }
- }
-
+ }
+ }
+
for( i = 0; i < nclasses; i++ )
{
Rect r = rrects[i];
saturate_cast<int>(r.width*s),
saturate_cast<int>(r.height*s));
}
-
+
rectList.clear();
if( weights )
weights->clear();
- if( levelWeights )
- levelWeights->clear();
-
+ if( levelWeights )
+ levelWeights->clear();
+
for( i = 0; i < nclasses; i++ )
{
Rect r1 = rrects[i];
int n1 = levelWeights ? rejectLevels[i] : rweights[i];
- double w1 = rejectWeights[i];
+ double w1 = rejectWeights[i];
if( n1 <= groupThreshold )
continue;
// filter out small face rectangles inside large rectangles
for( j = 0; j < nclasses; j++ )
{
int n2 = rweights[j];
-
+
if( j == i || n2 <= groupThreshold )
continue;
Rect r2 = rrects[j];
-
+
int dx = saturate_cast<int>( r2.width * eps );
int dy = saturate_cast<int>( r2.height * eps );
-
+
if( i != j &&
r1.x >= r2.x - dx &&
r1.y >= r2.y - dy &&
(n2 > std::max(3, n1) || n1 < 3) )
break;
}
-
+
if( j == nclasses )
{
rectList.push_back(r1);
if( weights )
weights->push_back(n1);
- if( levelWeights )
- levelWeights->push_back(w1);
+ if( levelWeights )
+ levelWeights->push_back(w1);
}
}
}
class MeanshiftGrouping
{
public:
- MeanshiftGrouping(const Point3d& densKer, const vector<Point3d>& posV,
- const vector<double>& wV, double, int maxIter = 20)
+ MeanshiftGrouping(const Point3d& densKer, const vector<Point3d>& posV,
+ const vector<double>& wV, double, int maxIter = 20)
{
- densityKernel = densKer;
+ densityKernel = densKer;
weightsV = wV;
positionsV = posV;
positionsCount = (int)posV.size();
- meanshiftV.resize(positionsCount);
+ meanshiftV.resize(positionsCount);
distanceV.resize(positionsCount);
- iterMax = maxIter;
-
- for (unsigned i = 0; i<positionsV.size(); i++)
- {
- meanshiftV[i] = getNewValue(positionsV[i]);
- distanceV[i] = moveToMode(meanshiftV[i]);
- meanshiftV[i] -= positionsV[i];
- }
+ iterMax = maxIter;
+
+ for (unsigned i = 0; i<positionsV.size(); i++)
+ {
+ meanshiftV[i] = getNewValue(positionsV[i]);
+ distanceV[i] = moveToMode(meanshiftV[i]);
+ meanshiftV[i] -= positionsV[i];
+ }
}
-
- void getModes(vector<Point3d>& modesV, vector<double>& resWeightsV, const double eps)
+
+ void getModes(vector<Point3d>& modesV, vector<double>& resWeightsV, const double eps)
{
- for (size_t i=0; i <distanceV.size(); i++)
- {
- bool is_found = false;
- for(size_t j=0; j<modesV.size(); j++)
- {
- if ( getDistance(distanceV[i], modesV[j]) < eps)
- {
- is_found=true;
- break;
- }
- }
- if (!is_found)
- {
- modesV.push_back(distanceV[i]);
- }
- }
-
- resWeightsV.resize(modesV.size());
-
- for (size_t i=0; i<modesV.size(); i++)
- {
- resWeightsV[i] = getResultWeight(modesV[i]);
- }
+ for (size_t i=0; i <distanceV.size(); i++)
+ {
+ bool is_found = false;
+ for(size_t j=0; j<modesV.size(); j++)
+ {
+ if ( getDistance(distanceV[i], modesV[j]) < eps)
+ {
+ is_found=true;
+ break;
+ }
+ }
+ if (!is_found)
+ {
+ modesV.push_back(distanceV[i]);
+ }
+ }
+
+ resWeightsV.resize(modesV.size());
+
+ for (size_t i=0; i<modesV.size(); i++)
+ {
+ resWeightsV[i] = getResultWeight(modesV[i]);
+ }
}
protected:
- vector<Point3d> positionsV;
- vector<double> weightsV;
+ vector<Point3d> positionsV;
+ vector<double> weightsV;
- Point3d densityKernel;
- int positionsCount;
+ Point3d densityKernel;
+ int positionsCount;
- vector<Point3d> meanshiftV;
- vector<Point3d> distanceV;
- int iterMax;
- double modeEps;
+ vector<Point3d> meanshiftV;
+ vector<Point3d> distanceV;
+ int iterMax;
+ double modeEps;
- Point3d getNewValue(const Point3d& inPt) const
+ Point3d getNewValue(const Point3d& inPt) const
{
- Point3d resPoint(.0);
- Point3d ratPoint(.0);
- for (size_t i=0; i<positionsV.size(); i++)
- {
- Point3d aPt= positionsV[i];
- Point3d bPt = inPt;
- Point3d sPt = densityKernel;
-
- sPt.x *= exp(aPt.z);
- sPt.y *= exp(aPt.z);
-
- aPt.x /= sPt.x;
- aPt.y /= sPt.y;
- aPt.z /= sPt.z;
-
- bPt.x /= sPt.x;
- bPt.y /= sPt.y;
- bPt.z /= sPt.z;
-
- double w = (weightsV[i])*std::exp(-((aPt-bPt).dot(aPt-bPt))/2)/std::sqrt(sPt.dot(Point3d(1,1,1)));
-
- resPoint += w*aPt;
-
- ratPoint.x += w/sPt.x;
- ratPoint.y += w/sPt.y;
- ratPoint.z += w/sPt.z;
- }
- resPoint.x /= ratPoint.x;
- resPoint.y /= ratPoint.y;
- resPoint.z /= ratPoint.z;
- return resPoint;
+ Point3d resPoint(.0);
+ Point3d ratPoint(.0);
+ for (size_t i=0; i<positionsV.size(); i++)
+ {
+ Point3d aPt= positionsV[i];
+ Point3d bPt = inPt;
+ Point3d sPt = densityKernel;
+
+ sPt.x *= exp(aPt.z);
+ sPt.y *= exp(aPt.z);
+
+ aPt.x /= sPt.x;
+ aPt.y /= sPt.y;
+ aPt.z /= sPt.z;
+
+ bPt.x /= sPt.x;
+ bPt.y /= sPt.y;
+ bPt.z /= sPt.z;
+
+ double w = (weightsV[i])*std::exp(-((aPt-bPt).dot(aPt-bPt))/2)/std::sqrt(sPt.dot(Point3d(1,1,1)));
+
+ resPoint += w*aPt;
+
+ ratPoint.x += w/sPt.x;
+ ratPoint.y += w/sPt.y;
+ ratPoint.z += w/sPt.z;
+ }
+ resPoint.x /= ratPoint.x;
+ resPoint.y /= ratPoint.y;
+ resPoint.z /= ratPoint.z;
+ return resPoint;
}
- double getResultWeight(const Point3d& inPt) const
+ double getResultWeight(const Point3d& inPt) const
{
- double sumW=0;
- for (size_t i=0; i<positionsV.size(); i++)
- {
- Point3d aPt = positionsV[i];
- Point3d sPt = densityKernel;
-
- sPt.x *= exp(aPt.z);
- sPt.y *= exp(aPt.z);
-
- aPt -= inPt;
-
- aPt.x /= sPt.x;
- aPt.y /= sPt.y;
- aPt.z /= sPt.z;
-
- sumW+=(weightsV[i])*std::exp(-(aPt.dot(aPt))/2)/std::sqrt(sPt.dot(Point3d(1,1,1)));
- }
- return sumW;
+ double sumW=0;
+ for (size_t i=0; i<positionsV.size(); i++)
+ {
+ Point3d aPt = positionsV[i];
+ Point3d sPt = densityKernel;
+
+ sPt.x *= exp(aPt.z);
+ sPt.y *= exp(aPt.z);
+
+ aPt -= inPt;
+
+ aPt.x /= sPt.x;
+ aPt.y /= sPt.y;
+ aPt.z /= sPt.z;
+
+ sumW+=(weightsV[i])*std::exp(-(aPt.dot(aPt))/2)/std::sqrt(sPt.dot(Point3d(1,1,1)));
+ }
+ return sumW;
}
-
- Point3d moveToMode(Point3d aPt) const
+
+ Point3d moveToMode(Point3d aPt) const
{
- Point3d bPt;
- for (int i = 0; i<iterMax; i++)
- {
- bPt = aPt;
- aPt = getNewValue(bPt);
- if ( getDistance(aPt, bPt) <= modeEps )
- {
- break;
- }
- }
- return aPt;
+ Point3d bPt;
+ for (int i = 0; i<iterMax; i++)
+ {
+ bPt = aPt;
+ aPt = getNewValue(bPt);
+ if ( getDistance(aPt, bPt) <= modeEps )
+ {
+ break;
+ }
+ }
+ return aPt;
}
double getDistance(Point3d p1, Point3d p2) const
{
- Point3d ns = densityKernel;
- ns.x *= exp(p2.z);
- ns.y *= exp(p2.z);
- p2 -= p1;
- p2.x /= ns.x;
- p2.y /= ns.y;
- p2.z /= ns.z;
- return p2.dot(p2);
+ Point3d ns = densityKernel;
+ ns.x *= exp(p2.z);
+ ns.y *= exp(p2.z);
+ p2 -= p1;
+ p2.x /= ns.x;
+ p2.y /= ns.y;
+ p2.z /= ns.z;
+ return p2.dot(p2);
}
};
//new grouping function with using meanshift
-static void groupRectangles_meanshift(vector<Rect>& rectList, double detectThreshold, vector<double>* foundWeights,
- vector<double>& scales, Size winDetSize)
+static void groupRectangles_meanshift(vector<Rect>& rectList, double detectThreshold, vector<double>* foundWeights,
+ vector<double>& scales, Size winDetSize)
{
int detectionCount = (int)rectList.size();
vector<Point3d> hits(detectionCount), resultHits;
vector<double> hitWeights(detectionCount), resultWeights;
Point2d hitCenter;
- for (int i=0; i < detectionCount; i++)
+ for (int i=0; i < detectionCount; i++)
{
hitWeights[i] = (*foundWeights)[i];
hitCenter = (rectList[i].tl() + rectList[i].br())*(0.5); //center of rectangles
msGrouping.getModes(resultHits, resultWeights, 1);
- for (unsigned i=0; i < resultHits.size(); ++i)
+ for (unsigned i=0; i < resultHits.size(); ++i)
{
double scale = exp(resultHits[i].z);
hitCenter.x = resultHits[i].x;
hitCenter.y = resultHits[i].y;
Size s( int(winDetSize.width * scale), int(winDetSize.height * scale) );
- Rect resultRect( int(hitCenter.x-s.width/2), int(hitCenter.y-s.height/2),
- int(s.width), int(s.height) );
+ Rect resultRect( int(hitCenter.x-s.width/2), int(hitCenter.y-s.height/2),
+ int(s.width), int(s.height) );
- if (resultWeights[i] > detectThreshold)
+ if (resultWeights[i] > detectThreshold)
{
rectList.push_back(resultRect);
foundWeights->push_back(resultWeights[i]);
groupRectangles(rectList, groupThreshold, eps, &rejectLevels, &levelWeights);
}
//can be used for HOG detection algorithm only
-void groupRectangles_meanshift(vector<Rect>& rectList, vector<double>& foundWeights,
- vector<double>& foundScales, double detectThreshold, Size winDetSize)
+void groupRectangles_meanshift(vector<Rect>& rectList, vector<double>& foundWeights,
+ vector<double>& foundScales, double detectThreshold, Size winDetSize)
{
- groupRectangles_meanshift(rectList, detectThreshold, &foundWeights, foundScales, winDetSize);
+ groupRectangles_meanshift(rectList, detectThreshold, &foundWeights, foundScales, winDetSize);
}
-
+
FeatureEvaluator::~FeatureEvaluator() {}
bool FeatureEvaluator::read(const FileNode&) {return true;}
{
FileNode rnode = node[CC_RECTS];
FileNodeIterator it = rnode.begin(), it_end = rnode.end();
-
+
int ri;
for( ri = 0; ri < RECT_NUM; ri++ )
{
rect[ri].r = Rect();
rect[ri].weight = 0.f;
}
-
+
for(ri = 0; it != it_end; ++it, ri++)
{
FileNodeIterator it2 = (*it).begin();
it2 >> rect[ri].r.x >> rect[ri].r.y >>
rect[ri].r.width >> rect[ri].r.height >> rect[ri].weight;
}
-
+
tilted = (int)node[CC_TILTED] != 0;
return true;
}
featuresPtr = &(*features)[0];
FileNodeIterator it = node.begin(), it_end = node.end();
hasTiltedFeatures = false;
-
+
for(int i = 0; it != it_end; ++it, i++)
{
if(!featuresPtr[i].read(*it))
}
return true;
}
-
+
Ptr<FeatureEvaluator> HaarEvaluator::clone() const
{
HaarEvaluator* ret = new HaarEvaluator;
memcpy( ret->p, p, 4*sizeof(p[0]) );
memcpy( ret->pq, pq, 4*sizeof(pq[0]) );
ret->offset = offset;
- ret->varianceNormFactor = varianceNormFactor;
+ ret->varianceNormFactor = varianceNormFactor;
return ret;
}
int rn = image.rows+1, cn = image.cols+1;
origWinSize = _origWinSize;
normrect = Rect(1, 1, origWinSize.width-2, origWinSize.height-2);
-
+
if (image.cols < origWinSize.width || image.rows < origWinSize.height)
return false;
-
+
if( sum0.rows < rn || sum0.cols < cn )
{
sum0.create(rn, cn, CV_32S);
const double* sqdata = (const double*)sqsum.data;
size_t sumStep = sum.step/sizeof(sdata[0]);
size_t sqsumStep = sqsum.step/sizeof(sqdata[0]);
-
+
CV_SUM_PTRS( p[0], p[1], p[2], p[3], sdata, normrect, sumStep );
CV_SUM_PTRS( pq[0], pq[1], pq[2], pq[3], sqdata, normrect, sqsumStep );
-
+
size_t fi, nfeatures = features->size();
for( fi = 0; fi < nfeatures; fi++ )
if( image.cols < origWinSize.width || image.rows < origWinSize.height )
return false;
-
+
if( sum0.rows < rn || sum0.cols < cn )
sum0.create(rn, cn, CV_32S);
sum = Mat(rn, cn, CV_32S, sum0.data);
integral(image, sum);
-
+
size_t fi, nfeatures = features->size();
-
+
for( fi = 0; fi < nfeatures; fi++ )
featuresPtr[fi].updatePtrs( sum );
return true;
}
-
+
bool LBPEvaluator::setWindow( Point pt )
{
if( pt.x < 0 || pt.y < 0 ||
return false;
offset = pt.y * ((int)sum.step/sizeof(int)) + pt.x;
return true;
-}
+}
//---------------------------------------------- HOGEvaluator ---------------------------------------
bool HOGEvaluator::Feature :: read( const FileNode& node )
ret->featuresPtr = &(*ret->features)[0];
ret->offset = offset;
ret->hist = hist;
- ret->normSum = normSum;
+ ret->normSum = normSum;
return ret;
}
memset( histBuf, 0, histSize.width * sizeof(histBuf[0]) );
histBuf += histStep + 1;
for( y = 0; y < qangle.rows; y++ )
- {
+ {
histBuf[-1] = 0.f;
float strSum = 0.f;
for( x = 0; x < qangle.cols; x++ )
Ptr<FeatureEvaluator> FeatureEvaluator::create( int featureType )
{
return featureType == HAAR ? Ptr<FeatureEvaluator>(new HaarEvaluator) :
- featureType == LBP ? Ptr<FeatureEvaluator>(new LBPEvaluator) :
+ featureType == LBP ? Ptr<FeatureEvaluator>(new LBPEvaluator) :
featureType == HOG ? Ptr<FeatureEvaluator>(new HOGEvaluator) :
Ptr<FeatureEvaluator>();
}
}
CascadeClassifier::CascadeClassifier(const string& filename)
-{
- load(filename);
+{
+ load(filename);
}
CascadeClassifier::~CascadeClassifier()
{
-}
+}
bool CascadeClassifier::empty() const
{
oldCascade.release();
data = Data();
featureEvaluator.release();
-
+
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
-
+
if( read(fs.getFirstTopLevelNode()) )
return true;
-
+
fs.release();
-
+
oldCascade = Ptr<CvHaarClassifierCascade>((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
return !oldCascade.empty();
}
-
-int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& featureEvaluator, Point pt, double& weight )
+
+int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& evaluator, Point pt, double& weight )
{
CV_Assert( oldCascade.empty() );
-
+
assert( data.featureType == FeatureEvaluator::HAAR ||
data.featureType == FeatureEvaluator::LBP ||
data.featureType == FeatureEvaluator::HOG );
- if( !featureEvaluator->setWindow(pt) )
+ if( !evaluator->setWindow(pt) )
return -1;
if( data.isStumpBased )
{
if( data.featureType == FeatureEvaluator::HAAR )
- return predictOrderedStump<HaarEvaluator>( *this, featureEvaluator, weight );
+ return predictOrderedStump<HaarEvaluator>( *this, evaluator, weight );
else if( data.featureType == FeatureEvaluator::LBP )
- return predictCategoricalStump<LBPEvaluator>( *this, featureEvaluator, weight );
+ return predictCategoricalStump<LBPEvaluator>( *this, evaluator, weight );
else if( data.featureType == FeatureEvaluator::HOG )
- return predictOrderedStump<HOGEvaluator>( *this, featureEvaluator, weight );
+ return predictOrderedStump<HOGEvaluator>( *this, evaluator, weight );
else
return -2;
}
else
{
if( data.featureType == FeatureEvaluator::HAAR )
- return predictOrdered<HaarEvaluator>( *this, featureEvaluator, weight );
+ return predictOrdered<HaarEvaluator>( *this, evaluator, weight );
else if( data.featureType == FeatureEvaluator::LBP )
- return predictCategorical<LBPEvaluator>( *this, featureEvaluator, weight );
+ return predictCategorical<LBPEvaluator>( *this, evaluator, weight );
else if( data.featureType == FeatureEvaluator::HOG )
- return predictOrdered<HOGEvaluator>( *this, featureEvaluator, weight );
+ return predictOrdered<HOGEvaluator>( *this, evaluator, weight );
else
return -2;
}
}
-
-bool CascadeClassifier::setImage( Ptr<FeatureEvaluator>& featureEvaluator, const Mat& image )
+
+bool CascadeClassifier::setImage( Ptr<FeatureEvaluator>& evaluator, const Mat& image )
{
- return empty() ? false : featureEvaluator->setImage(image, data.origWinSize);
+ return empty() ? false : evaluator->setImage(image, data.origWinSize);
}
void CascadeClassifier::setMaskGenerator(Ptr<MaskGenerator> _maskGenerator)
struct CascadeClassifierInvoker
{
- CascadeClassifierInvoker( CascadeClassifier& _cc, Size _sz1, int _stripSize, int _yStep, double _factor,
+ CascadeClassifierInvoker( CascadeClassifier& _cc, Size _sz1, int _stripSize, int _yStep, double _factor,
ConcurrentRectVector& _vec, vector<int>& _levels, vector<double>& _weights, bool outputLevels, const Mat& _mask)
{
classifier = &_cc;
levelWeights = outputLevels ? &_weights : 0;
mask=_mask;
}
-
+
void operator()(const BlockedRange& range) const
{
Ptr<FeatureEvaluator> evaluator = classifier->featureEvaluator->clone();
result = -(int)classifier->data.stages.size();
if( classifier->data.stages.size() + result < 4 )
{
- rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor), winSize.width, winSize.height));
+ rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor), winSize.width, winSize.height));
rejectLevels->push_back(-result);
levelWeights->push_back(gypWeight);
}
- }
+ }
else if( result > 0 )
rectangles->push_back(Rect(cvRound(x*scalingFactor), cvRound(y*scalingFactor),
winSize.width, winSize.height));
}
}
}
-
+
CascadeClassifier* classifier;
ConcurrentRectVector* rectangles;
Size processingRectSize;
vector<double> *levelWeights;
Mat mask;
};
-
+
struct getRect { Rect operator ()(const CvAvgComp& e) const { return e.rect; } };
bool CascadeClassifier::detectSingleScale( const Mat& image, int stripCount, Size processingRectSize,
return featureEvaluator->setImage(image, data.origWinSize);
}
-void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects,
+void CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects,
vector<int>& rejectLevels,
vector<double>& levelWeights,
double scaleFactor, int minNeighbors,
- int flags, Size minObjectSize, Size maxObjectSize,
+ int flags, Size minObjectSize, Size maxObjectSize,
bool outputRejectLevels )
{
const double GROUP_EPS = 0.2;
-
+
CV_Assert( scaleFactor > 1 && image.depth() == CV_8U );
-
+
if( empty() )
return;
if( maxObjectSize.height == 0 || maxObjectSize.width == 0 )
maxObjectSize = image.size();
-
+
Mat grayImage = image;
if( grayImage.channels() > 1 )
{
cvtColor(grayImage, temp, CV_BGR2GRAY);
grayImage = temp;
}
-
+
Mat imageBuffer(image.rows + 1, image.cols + 1, CV_8U);
vector<Rect> candidates;
Size windowSize( cvRound(originalWindowSize.width*factor), cvRound(originalWindowSize.height*factor) );
Size scaledImageSize( cvRound( grayImage.cols/factor ), cvRound( grayImage.rows/factor ) );
Size processingRectSize( scaledImageSize.width - originalWindowSize.width + 1, scaledImageSize.height - originalWindowSize.height + 1 );
-
+
if( processingRectSize.width <= 0 || processingRectSize.height <= 0 )
break;
if( windowSize.width > maxObjectSize.width || windowSize.height > maxObjectSize.height )
break;
if( windowSize.width < minObjectSize.width || windowSize.height < minObjectSize.height )
continue;
-
+
Mat scaledImage( scaledImageSize, CV_8U, imageBuffer.data );
resize( grayImage, scaledImage, scaledImageSize, 0, 0, CV_INTER_LINEAR );
stripSize = processingRectSize.height;
#endif
- if( !detectSingleScale( scaledImage, stripCount, processingRectSize, stripSize, yStep, factor, candidates,
+ if( !detectSingleScale( scaledImage, stripCount, processingRectSize, stripSize, yStep, factor, candidates,
rejectLevels, levelWeights, outputRejectLevels ) )
break;
}
-
+
objects.resize(candidates.size());
std::copy(candidates.begin(), candidates.end(), objects.begin());
{
vector<int> fakeLevels;
vector<double> fakeWeights;
- detectMultiScale( image, objects, fakeLevels, fakeWeights, scaleFactor,
+ detectMultiScale( image, objects, fakeLevels, fakeWeights, scaleFactor,
minNeighbors, flags, minObjectSize, maxObjectSize, false );
-}
+}
bool CascadeClassifier::Data::read(const FileNode &root)
{
static const float THRESHOLD_EPS = 1e-5f;
-
+
// load stage params
string stageTypeStr = (string)root[CC_STAGE_TYPE];
if( stageTypeStr == CC_BOOST )
FileNode fn = root[CC_FEATURES];
if( fn.empty() )
return false;
-
+
return featureEvaluator->read(fn);
}
-
+
template<> void Ptr<CvHaarClassifierCascade>::delete_obj()
-{ cvReleaseHaarClassifierCascade(&obj); }
+{ cvReleaseHaarClassifierCascade(&obj); }
} // namespace cv
{
uchar binary[8] = {0,0,0,0,0,0,0,0};
uchar b = 0;
- int i, sum;
+ int sum;
sum = 0;
- for (i = 0; i < 64; i++)
+ for (int i = 0; i < 64; i++)
sum += sa.getpixel(1 + (i & 7), 1 + (i >> 3));
uchar mean = (uchar)(sum / 64);
- for (i = 0; i < 64; i++) {
+ for (int i = 0; i < 64; i++) {
b = (b << 1) + (sa.getpixel(pickup[i].x, pickup[i].y) <= mean);
if ((i & 7) == 7) {
binary[i >> 3] = b;
uchar c[5] = {0,0,0,0,0};
{
- int i, j;
uchar a[5] = {228, 48, 15, 111, 62};
int k = 5;
- for (i = 0; i < 3; i++) {
+ for (int i = 0; i < 3; i++) {
uchar t = binary[i] ^ c[4];
- for (j = k - 1; j != -1; j--) {
+ for (int j = k - 1; j != -1; j--) {
if (t == 0)
c[j] = 0;
else
deque <CvPoint> candidates;
{
int x, y;
- int r = cxy->rows;
- int c = cxy->cols;
- for (y = 0; y < r; y++) {
+ int rows = cxy->rows;
+ int cols = cxy->cols;
+ for (y = 0; y < rows; y++) {
const short *cd = (const short*)cvPtr2D(cxy, y, 0);
const short *ccd = (const short*)cvPtr2D(ccxy, y, 0);
- for (x = 0; x < c; x += 4, cd += 8, ccd += 8) {
+ for (x = 0; x < cols; x += 4, cd += 8, ccd += 8) {
__m128i v = _mm_loadu_si128((const __m128i*)cd);
__m128 cyxyxA = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(v, v), 16));
__m128 cyxyxB = _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(v, v), 16));
namespace cv
{
-
+
void findDataMatrix(InputArray _image,
vector<string>& codes,
OutputArray _corners,
deque <CvDataMatrixCode> rc = cvFindDataMatrix(&m);
int i, n = (int)rc.size();
Mat corners;
-
+
if( _corners.needed() )
{
_corners.create(n, 4, CV_32SC2);
corners = _corners.getMat();
}
-
+
if( _dmtx.needed() )
_dmtx.create(n, 1, CV_8U);
-
+
codes.resize(n);
-
+
for( i = 0; i < n; i++ )
{
CvDataMatrixCode& rc_i = rc[i];
codes[i] = string(rc_i.msg);
-
+
if( corners.data )
{
const Point* srcpt = (Point*)rc_i.corners->data.ptr;
dstpt[k] = srcpt[k];
}
cvReleaseMat(&rc_i.corners);
-
+
if( _dmtx.needed() )
{
_dmtx.create(rc_i.original->rows, rc_i.original->cols, rc_i.original->type, i);
Mat image = _image.getMat();
Mat corners = _corners.getMat();
int i, n = corners.rows;
-
+
if( n > 0 )
{
CV_Assert( corners.depth() == CV_32S &&
corners.cols*corners.channels() == 8 &&
n == (int)codes.size() );
}
-
+
for( i = 0; i < n; i++ )
{
Scalar c(0, 255, 0);
Scalar c2(255, 0,0);
const Point* pt = (const Point*)corners.ptr(i);
-
+
for( int k = 0; k < 4; k++ )
line(image, pt[k], pt[(k+1)%4], c);
//int baseline = 0;
putText(image, codes[i], pt[0], CV_FONT_HERSHEY_SIMPLEX, 0.8, c2, 1, CV_AA, false);
}
}
-
+
}
//\r
//\r
// API\r
-// int GetPointOfIntersection(const float *f, \r
- const float a, const float b, \r
+// int GetPointOfIntersection(const float *f,\r
+ const float a, const float b,\r
int q1, int q2, float *point);\r
// INPUT\r
// f - function on the regular grid\r
// RESULT\r
// Error status\r
*/\r
-int GetPointOfIntersection(const float *f, \r
- const float a, const float b, \r
+int GetPointOfIntersection(const float *f,\r
+ const float a, const float b,\r
int q1, int q2, float *point)\r
{\r
if (q1 == q2)\r
{\r
return DISTANCE_TRANSFORM_EQUAL_POINTS;\r
- } /* if (q1 == q2) */ \r
- (*point) = ( (f[q2] - a * q2 + b *q2 * q2) - \r
+ } /* if (q1 == q2) */\r
+ (*point) = ( (f[q2] - a * q2 + b *q2 * q2) -\r
(f[q1] - a * q1 + b * q1 * q1) ) / (2 * b * (q2 - q1));\r
return DISTANCE_TRANSFORM_OK;\r
}\r
//\r
// API\r
// int DistanceTransformOneDimensionalProblem(const float *f, const int n,\r
- const float a, const float b, \r
+ const float a, const float b,\r
float *distanceTransform,\r
- int *points); \r
+ int *points);\r
// INPUT\r
// f - function on the regular grid\r
// n - grid dimension\r
// Error status\r
*/\r
int DistanceTransformOneDimensionalProblem(const float *f, const int n,\r
- const float a, const float b, \r
+ const float a, const float b,\r
float *distanceTransform,\r
int *points)\r
{\r
// Allocation memory (must be free in this function)\r
v = (int *)malloc (sizeof(int) * n);\r
z = (float *)malloc (sizeof(float) * (n + 1));\r
- \r
+\r
v[0] = 0;\r
z[0] = (float)F_MIN; // left border of envelope\r
z[1] = (float)F_MAX; // right border of envelope\r
} /* if (tmp != DISTANCE_TRANSFORM_OK) */\r
if (pointIntersection <= z[k])\r
{\r
- // Envelope doesn't contain current parabola \r
+ // Envelope doesn't contain current parabola\r
do\r
{\r
k--;\r
// INPUT\r
// k - index of the previous cycle element\r
// n - number of matrix rows\r
-// q - parameter that equal \r
+// q - parameter that equal\r
(number_of_rows * number_of_columns - 1)\r
// OUTPUT\r
// None\r
// RESULT\r
// Error status\r
*/\r
-void TransposeCycleElements_int(int *a, int *cycle, int cycle_len)\r
+static void TransposeCycleElements_int(int *a, int *cycle, int cycle_len)\r
{\r
int i;\r
int buf;\r
int max_cycle_len;\r
\r
max_cycle_len = n * m;\r
- \r
+\r
// Allocation memory (must be free in this function)\r
cycle = (int *)malloc(sizeof(int) * max_cycle_len);\r
\r
k = GetNextCycleElement(i, n, q);\r
cycle[cycle_len] = i;\r
cycle_len++;\r
- \r
+\r
while (k > i)\r
- { \r
- cycle[cycle_len] = k; \r
+ {\r
+ cycle[cycle_len] = k;\r
cycle_len++;\r
- k = GetNextCycleElement(k, n, q); \r
+ k = GetNextCycleElement(k, n, q);\r
}\r
if (k == i)\r
{\r
// RESULT\r
// None\r
*/\r
-void Transpose_int(int *a, int n, int m)\r
+static void Transpose_int(int *a, int n, int m)\r
{\r
int *cycle;\r
int i, k, q, cycle_len;\r
int max_cycle_len;\r
\r
max_cycle_len = n * m;\r
- \r
+\r
// Allocation memory (must be free in this function)\r
cycle = (int *)malloc(sizeof(int) * max_cycle_len);\r
\r
k = GetNextCycleElement(i, n, q);\r
cycle[cycle_len] = i;\r
cycle_len++;\r
- \r
+\r
while (k > i)\r
- { \r
- cycle[cycle_len] = k; \r
+ {\r
+ cycle[cycle_len] = k;\r
cycle_len++;\r
- k = GetNextCycleElement(k, n, q); \r
+ k = GetNextCycleElement(k, n, q);\r
}\r
if (k == i)\r
{\r
/*\r
// Decision of two dimensional problem generalized distance transform\r
// on the regular grid at all points\r
-// min{d2(y' - y) + d4(y' - y)(y' - y) + \r
+// min{d2(y' - y) + d4(y' - y)(y' - y) +\r
min(d1(x' - x) + d3(x' - x)(x' - x) + f(x',y'))} (on x', y')\r
//\r
// API\r
-// int DistanceTransformTwoDimensionalProblem(const float *f, \r
+// int DistanceTransformTwoDimensionalProblem(const float *f,\r
const int n, const int m,\r
- const float coeff[4], \r
+ const float coeff[4],\r
float *distanceTransform,\r
- int *pointsX, int *pointsY); \r
+ int *pointsX, int *pointsY);\r
// INPUT\r
// f - function on the regular grid\r
// n - number of rows\r
// m - number of columns\r
// coeff - coefficients of optimizable function\r
- coeff[0] = d1, coeff[1] = d2, \r
+ coeff[0] = d1, coeff[1] = d2,\r
coeff[2] = d3, coeff[3] = d4\r
// OUTPUT\r
// distanceTransform - values of generalized distance transform\r
// RESULT\r
// Error status\r
*/\r
-int DistanceTransformTwoDimensionalProblem(const float *f, \r
+int DistanceTransformTwoDimensionalProblem(const float *f,\r
const int n, const int m,\r
- const float coeff[4], \r
+ const float coeff[4],\r
float *distanceTransform,\r
int *pointsX, int *pointsY)\r
{\r
for (i = 0; i < n; i++)\r
{\r
resOneDimProblem = DistanceTransformOneDimensionalProblem(\r
- f + i * m, m, \r
- coeff[0], coeff[2], \r
- &internalDistTrans[i * m], \r
- &internalPointsX[i * m]); \r
+ f + i * m, m,\r
+ coeff[0], coeff[2],\r
+ &internalDistTrans[i * m],\r
+ &internalPointsX[i * m]);\r
if (resOneDimProblem != DISTANCE_TRANSFORM_OK)\r
return DISTANCE_TRANSFORM_ERROR;\r
}\r
for (j = 0; j < m; j++)\r
{\r
resOneDimProblem = DistanceTransformOneDimensionalProblem(\r
- &internalDistTrans[j * n], n, \r
- coeff[1], coeff[3], \r
- distanceTransform + j * n, \r
+ &internalDistTrans[j * n], n,\r
+ coeff[1], coeff[3],\r
+ distanceTransform + j * n,\r
pointsY + j * n);\r
if (resOneDimProblem != DISTANCE_TRANSFORM_OK)\r
return DISTANCE_TRANSFORM_ERROR;\r
int height, width, numChannels;\r
int i, j, kk, c, ii, jj, d;\r
float * datadx, * datady;\r
- \r
+\r
//Ãîìåð êà Ãà ëà â öèêëå\r
- int ch; \r
+ int ch;\r
//ïåðåìåÃÃûå âû÷èñëåÃèÿ ìà ãÃèòóäû\r
float magnitude, x, y, tx, ty;\r
- \r
+\r
IplImage * dx, * dy;\r
int *nearest;\r
float *w, a_x, b_x;\r
// ÷åòÃûå èÃÃåêñû ÃÃ¥ êîÃòðà ñòÃîå èçîáðà æåÃèå\r
// ÃÃ¥ ÷åòÃûå èÃÃåêñû êîÃòðà ñòÃîå èçîáðà æåÃèå\r
int * alfa;\r
- \r
+\r
// âåêòîðû ãðà Ãèö ñåêòîðîâ\r
float boundary_x[NUM_SECTOR + 1];\r
float boundary_y[NUM_SECTOR + 1];\r
\r
numChannels = image->nChannels;\r
\r
- dx = cvCreateImage(cvSize(image->width, image->height), \r
+ dx = cvCreateImage(cvSize(image->width, image->height),\r
IPL_DEPTH_32F, 3);\r
- dy = cvCreateImage(cvSize(image->width, image->height), \r
+ dy = cvCreateImage(cvSize(image->width, image->height),\r
IPL_DEPTH_32F, 3);\r
\r
sizeX = width / k;\r
\r
cvFilter2D(image, dx, &kernel_dx, cvPoint(-1, 0));\r
cvFilter2D(image, dy, &kernel_dy, cvPoint(0, -1));\r
- \r
+\r
float arg_vector;\r
for(i = 0; i <= NUM_SECTOR; i++)\r
{\r
y = ty;\r
}\r
}/*for(ch = 1; ch < numChannels; ch++)*/\r
- \r
+\r
max = boundary_x[0] * x + boundary_y[0] * y;\r
maxi = 0;\r
- for (kk = 0; kk < NUM_SECTOR; kk++) \r
+ for (kk = 0; kk < NUM_SECTOR; kk++)\r
{\r
dotProd = boundary_x[kk] * x + boundary_y[kk] * y;\r
- if (dotProd > max) \r
+ if (dotProd > max)\r
{\r
max = dotProd;\r
maxi = kk;\r
}\r
- else \r
+ else\r
{\r
- if (-dotProd > max) \r
+ if (-dotProd > max)\r
{\r
max = -dotProd;\r
maxi = kk + NUM_SECTOR;\r
}\r
}\r
alfa[j * width * 2 + i * 2 ] = maxi % NUM_SECTOR;\r
- alfa[j * width * 2 + i * 2 + 1] = maxi; \r
+ alfa[j * width * 2 + i * 2 + 1] = maxi;\r
}/*for(i = 0; i < width; i++)*/\r
}/*for(j = 0; j < height; j++)*/\r
\r
//ïîäñ÷åò âåñîâ è ñìåùåÃèé\r
nearest = (int *)malloc(sizeof(int ) * k);\r
w = (float*)malloc(sizeof(float) * (k * 2));\r
- \r
+\r
for(i = 0; i < k / 2; i++)\r
{\r
nearest[i] = -1;\r
{\r
b_x = k / 2 + j + 0.5f;\r
a_x = k / 2 - j - 0.5f;\r
- w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x)); \r
- w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x)); \r
+ w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));\r
+ w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));\r
}/*for(j = 0; j < k / 2; j++)*/\r
for(j = k / 2; j < k; j++)\r
{\r
a_x = j - k / 2 + 0.5f;\r
b_x =-j + k / 2 - 0.5f + k;\r
- w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x)); \r
- w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x)); \r
+ w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));\r
+ w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));\r
}/*for(j = k / 2; j < k; j++)*/\r
\r
\r
{\r
for(jj = 0; jj < k; jj++)\r
{\r
- if ((i * k + ii > 0) && \r
- (i * k + ii < height - 1) && \r
- (j * k + jj > 0) && \r
+ if ((i * k + ii > 0) &&\r
+ (i * k + ii < height - 1) &&\r
+ (j * k + jj > 0) &&\r
(j * k + jj < width - 1))\r
{\r
d = (k * i + ii) * width + (j * k + jj);\r
- (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 ]] += \r
+ (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 ]] +=\r
r[d] * w[ii * 2] * w[jj * 2];\r
- (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += \r
+ (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=\r
r[d] * w[ii * 2] * w[jj * 2];\r
- if ((i + nearest[ii] >= 0) && \r
+ if ((i + nearest[ii] >= 0) &&\r
(i + nearest[ii] <= sizeY - 1))\r
{\r
- (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 ] ] += \r
+ (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 ] ] +=\r
r[d] * w[ii * 2 + 1] * w[jj * 2 ];\r
- (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += \r
+ (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=\r
r[d] * w[ii * 2 + 1] * w[jj * 2 ];\r
}\r
- if ((j + nearest[jj] >= 0) && \r
+ if ((j + nearest[jj] >= 0) &&\r
(j + nearest[jj] <= sizeX - 1))\r
{\r
- (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] += \r
+ (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=\r
r[d] * w[ii * 2] * w[jj * 2 + 1];\r
- (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += \r
+ (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=\r
r[d] * w[ii * 2] * w[jj * 2 + 1];\r
}\r
- if ((i + nearest[ii] >= 0) && \r
- (i + nearest[ii] <= sizeY - 1) && \r
- (j + nearest[jj] >= 0) && \r
+ if ((i + nearest[ii] >= 0) &&\r
+ (i + nearest[ii] <= sizeY - 1) &&\r
+ (j + nearest[jj] >= 0) &&\r
(j + nearest[jj] <= sizeX - 1))\r
{\r
- (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] += \r
+ (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=\r
r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];\r
- (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += \r
+ (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=\r
r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];\r
}\r
}\r
}/*for(ii = 0; ii < k; ii++)*/\r
}/*for(j = 1; j < sizeX - 1; j++)*/\r
}/*for(i = 1; i < sizeY - 1; i++)*/\r
- \r
+\r
cvReleaseImage(&dx);\r
cvReleaseImage(&dy);\r
\r
\r
free(w);\r
free(nearest);\r
- \r
+\r
free(r);\r
free(alfa);\r
\r
}\r
\r
/*\r
-// Feature map Normalization and Truncation \r
+// Feature map Normalization and Truncation\r
//\r
// API\r
// int normalizeAndTruncate(featureMap *map, const float alfa);\r
}/*for(j = 0; j < p; j++)*/\r
partOfNorm[i] = valOfNorm;\r
}/*for(i = 0; i < sizeX * sizeY; i++)*/\r
- \r
+\r
sizeX -= 2;\r
sizeY -= 2;\r
\r
// Error status\r
*/\r
int PCAFeatureMaps(CvLSVMFeatureMap *map)\r
-{ \r
+{\r
int i,j, ii, jj, k;\r
int sizeX, sizeY, p, pp, xp, yp, pos1, pos2;\r
float * newData;\r
float val;\r
float nx, ny;\r
- \r
+\r
sizeX = map->sizeX;\r
sizeY = map->sizeY;\r
p = map->numFeatures;\r
}/*for(jj = 0; jj < xp; jj++)*/\r
newData[pos2 + k] = val * nx;\r
k++;\r
- } /*for(ii = 0; ii < yp; ii++)*/ \r
+ } /*for(ii = 0; ii < yp; ii++)*/\r
}/*for(j = 0; j < sizeX; j++)*/\r
}/*for(i = 0; i < sizeY; i++)*/\r
//swop data\r
}\r
\r
\r
-int getPathOfFeaturePyramid(IplImage * image, \r
+static int getPathOfFeaturePyramid(IplImage * image,\r
float step, int numStep, int startIndex,\r
int sideLength, CvLSVMFeaturePyramid **maps)\r
{\r
CvLSVMFeatureMap *map;\r
IplImage *scaleTmp;\r
float scale;\r
- int i, err;\r
- \r
+ int i;\r
+\r
for(i = 0; i < numStep; i++)\r
{\r
scale = 1.0f / powf(step, (float)i);\r
scaleTmp = resize_opencv (image, scale);\r
- err = getFeatureMaps(scaleTmp, sideLength, &map);\r
- err = normalizeAndTruncate(map, VAL_OF_TRUNCATE);\r
- err = PCAFeatureMaps(map);\r
+ getFeatureMaps(scaleTmp, sideLength, &map);\r
+ normalizeAndTruncate(map, VAL_OF_TRUNCATE);\r
+ PCAFeatureMaps(map);\r
(*maps)->pyramid[startIndex + i] = map;\r
cvReleaseImage(&scaleTmp);\r
}/*for(i = 0; i < numStep; i++)*/\r
}\r
\r
/*\r
-// Getting feature pyramid \r
+// Getting feature pyramid\r
//\r
// API\r
-// int getFeaturePyramid(IplImage * image, const filterObject **all_F, \r
+// int getFeaturePyramid(IplImage * image, const filterObject **all_F,\r
const int n_f,\r
- const int lambda, const int k, \r
- const int startX, const int startY, \r
+ const int lambda, const int k,\r
+ const int startX, const int startY,\r
const int W, const int H, featurePyramid **maps);\r
// INPUT\r
// image - image\r
int numStep;\r
int maxNumCells;\r
int W, H;\r
- \r
+\r
if(image->depth == IPL_DEPTH_32F)\r
{\r
imgResize = image;\r
{\r
imgResize = cvCreateImage(cvSize(image->width , image->height) ,\r
IPL_DEPTH_32F , 3);\r
- cvConvert(image, imgResize); \r
+ cvConvert(image, imgResize);\r
}\r
- \r
+\r
W = imgResize->width;\r
H = imgResize->height;\r
\r
maxNumCells = H / SIDE_LENGTH;\r
}\r
numStep = (int)(logf((float) maxNumCells / (5.0f)) / logf( step )) + 1;\r
- \r
+\r
allocFeaturePyramidObject(maps, numStep + LAMBDA);\r
\r
- getPathOfFeaturePyramid(imgResize, step , LAMBDA, 0, \r
+ getPathOfFeaturePyramid(imgResize, step , LAMBDA, 0,\r
SIDE_LENGTH / 2, maps);\r
- getPathOfFeaturePyramid(imgResize, step, numStep, LAMBDA, \r
+ getPathOfFeaturePyramid(imgResize, step, numStep, LAMBDA,\r
SIDE_LENGTH , maps);\r
- \r
+\r
if(image->depth != IPL_DEPTH_32F)\r
{\r
cvReleaseImage(&imgResize);\r
#include "precomp.hpp"\r
#include "_lsvm_fft.h"\r
\r
-int getEntireRes(int number, int divisor, int *entire, int *res)\r
-{\r
- *entire = number / divisor;\r
- *res = number % divisor;\r
- return FFT_OK;\r
-}\r
+// static int getEntireRes(int number, int divisor, int *entire, int *res)\r
+// {\r
+// *entire = number / divisor;\r
+// *res = number % divisor;\r
+// return FFT_OK;\r
+// }\r
\r
-int getMultipliers(int n, int *n1, int *n2)\r
+static int getMultipliers(int n, int *n1, int *n2)\r
{\r
int multiplier, i;\r
if (n == 1)\r
// 1-dimensional FFT\r
//\r
// API\r
-// int fft(float *x_in, float *x_out, int n, int shift); \r
+// int fft(float *x_in, float *x_out, int n, int shift);\r
// INPUT\r
// x_in - input signal\r
// n - number of elements for searching Fourier image\r
// shift - shift between input elements\r
// OUTPUT\r
-// x_out - output signal (contains 2n elements in order \r
+// x_out - output signal (contains 2n elements in order\r
Re(x_in[0]), Im(x_in[0]), Re(x_in[1]), Im(x_in[1]) and etc.)\r
// RESULT\r
// Error status\r
// API\r
// int fftInverse(float *x_in, float *x_out, int n, int shift);\r
// INPUT\r
-// x_in - Fourier image of 1d input signal(contains 2n elements \r
- in order Re(x_in[0]), Im(x_in[0]), \r
+// x_in - Fourier image of 1d input signal(contains 2n elements\r
+ in order Re(x_in[0]), Im(x_in[0]),\r
Re(x_in[1]), Im(x_in[1]) and etc.)\r
// n - number of elements for searching counter FFT image\r
// shift - shift between input elements\r
// numColls - number of collumns\r
// OUTPUT\r
// x_out - output signal (contains (2 * numRows * numColls) elements\r
- in order Re(x_in[0][0]), Im(x_in[0][0]), \r
+ in order Re(x_in[0][0]), Im(x_in[0][0]),\r
Re(x_in[0][1]), Im(x_in[0][1]) and etc.)\r
// RESULT\r
// Error status\r
x_outTmp = (float *)malloc(sizeof(float) * (2 * size));\r
for (i = 0; i < numRows; i++)\r
{\r
- fft(x_in + i * 2 * numColls, \r
+ fft(x_in + i * 2 * numColls,\r
x_outTmp + i * 2 * numColls,\r
numColls, 2);\r
}\r
for (i = 0; i < numColls; i++)\r
{\r
- fft(x_outTmp + 2 * i, \r
- x_out + 2 * i, \r
+ fft(x_outTmp + 2 * i,\r
+ x_out + 2 * i,\r
numRows, 2 * numColls);\r
}\r
free(x_outTmp);\r
// API\r
// int fftInverse2d(float *x_in, float *x_out, int numRows, int numColls);\r
// INPUT\r
-// x_in - Fourier image of matrix (contains (2 * numRows * numColls) \r
- elements in order Re(x_in[0][0]), Im(x_in[0][0]), \r
+// x_in - Fourier image of matrix (contains (2 * numRows * numColls)\r
+ elements in order Re(x_in[0][0]), Im(x_in[0][0]),\r
Re(x_in[0][1]), Im(x_in[0][1]) and etc.)\r
// numRows - number of rows\r
// numColls - number of collumns\r
}\r
for (i = 0; i < numColls; i++)\r
{\r
- fftInverse(x_outTmp + 2 * i, \r
- x_out + 2 * i, \r
+ fftInverse(x_outTmp + 2 * i,\r
+ x_out + 2 * i,\r
numRows, 2 * numColls);\r
}\r
free(x_outTmp);\r
}
-CV_IMPL int
+static int
cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
CvPoint pt, double& stage_sum, int start_stage )
{
sum += calc_sum(node->feature.rect[1],p_offset) * node->feature.rect[1].weight;
if( node->feature.rect[2].p0 )
sum += calc_sum(node->feature.rect[2],p_offset) * node->feature.rect[2].weight;
-
+
stage_sum += classifier->alpha[sum >= t];
#else
// ayasin - NHM perf optim. Avoid use of costly flaky jcc
if( node->feature.rect[2].p0 )
_sum += calc_sum(node->feature.rect[2],p_offset) * node->feature.rect[2].weight;
__m128d sum = _mm_set_sd(_sum);
-
+
t = _mm_cmpgt_sd(t, sum);
stage_sum = _mm_add_sd(stage_sum, _mm_blendv_pd(b, a, t));
#endif
HaarDetectObjects_ScaleImage_Invoker( const CvHaarClassifierCascade* _cascade,
int _stripSize, double _factor,
const Mat& _sum1, const Mat& _sqsum1, Mat* _norm1,
- Mat* _mask1, Rect _equRect, ConcurrentRectVector& _vec,
+ Mat* _mask1, Rect _equRect, ConcurrentRectVector& _vec,
std::vector<int>& _levels, std::vector<double>& _weights,
bool _outputLevels )
{
rejectLevels = _outputLevels ? &_levels : 0;
levelWeights = _outputLevels ? &_weights : 0;
}
-
+
void operator()( const BlockedRange& range ) const
{
Size winSize0 = cascade->orig_window_size;
Size winSize(cvRound(winSize0.width*factor), cvRound(winSize0.height*factor));
int y1 = range.begin()*stripSize, y2 = min(range.end()*stripSize, sum1.rows - 1 - winSize0.height);
-
+
if (y2 <= y1 || sum1.cols <= 1 + winSize0.width)
return;
-
+
Size ssz(sum1.cols - 1 - winSize0.width, y2 - y1);
int x, y, ystep = factor > 2 ? 1 : 2;
-
+
#ifdef HAVE_IPP
if( cascade->hid_cascade->ipp_stages )
{
sqsum1.ptr<double>(y1), sqsum1.step,
norm1->ptr<float>(y1), norm1->step,
ippiSize(ssz.width, ssz.height), iequRect );
-
+
int positive = (ssz.width/ystep)*((ssz.height + ystep-1)/ystep);
if( ystep == 1 )
{
uchar* mask1row = mask1->ptr(y);
memset( mask1row, 0, ssz.width );
-
+
if( y % ystep == 0 )
for( x = 0; x < ssz.width; x += ystep )
mask1row[x] = (uchar)1;
}
-
+
for( int j = 0; j < cascade->count; j++ )
{
if( ippiApplyHaarClassifier_32f_C1R(
if( positive <= 0 )
break;
}
-
+
if( positive > 0 )
for( y = y1; y < y2; y += ystep )
{
{
if( result > 0 )
vec->push_back(Rect(cvRound(x*factor), cvRound(y*factor),
- winSize.width, winSize.height));
+ winSize.width, winSize.height));
}
}
}
-
+
const CvHaarClassifierCascade* cascade;
int stripSize;
double factor;
std::vector<int>* rejectLevels;
std::vector<double>* levelWeights;
};
-
+
struct HaarDetectObjects_ScaleCascade_Invoker
{
p = _p; pq = _pq;
vec = &_vec;
}
-
+
void operator()( const BlockedRange& range ) const
{
int iy, startY = range.begin(), endY = range.end();
const int *pq0 = pq[0], *pq1 = pq[1], *pq2 = pq[2], *pq3 = pq[3];
bool doCannyPruning = p0 != 0;
int sstep = (int)(sumstep/sizeof(p0[0]));
-
+
for( iy = startY; iy < endY; iy++ )
{
int ix, y = cvRound(iy*ystep), ixstep = 1;
for( ix = xrange.start; ix < xrange.end; ix += ixstep )
{
int x = cvRound(ix*ystep); // it should really be ystep, not ixstep
-
+
if( doCannyPruning )
{
int offset = y*sstep + x;
continue;
}
}
-
+
int result = cvRunHaarClassifierCascade( cascade, cvPoint(x, y), 0 );
if( result > 0 )
vec->push_back(Rect(x, y, winsize.width, winsize.height));
}
}
}
-
+
const CvHaarClassifierCascade* cascade;
double ystep;
size_t sumstep;
const int** pq;
ConcurrentRectVector* vec;
};
-
-
+
+
}
-
+
CvSeq*
-cvHaarDetectObjectsForROC( const CvArr* _img,
+cvHaarDetectObjectsForROC( const CvArr* _img,
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
- double scaleFactor, int minNeighbors, int flags,
+ double scaleFactor, int minNeighbors, int flags,
CvSize minSize, CvSize maxSize, bool outputRejectLevels )
{
const double GROUP_EPS = 0.2;
if( CV_MAT_DEPTH(img->type) != CV_8U )
CV_Error( CV_StsUnsupportedFormat, "Only 8-bit images are supported" );
-
+
if( scaleFactor <= 1 )
CV_Error( CV_StsOutOfRange, "scale factor must be > 1" );
if( findBiggestObject )
flags &= ~CV_HAAR_SCALE_IMAGE;
-
+
if( maxSize.height == 0 || maxSize.width == 0 )
{
maxSize.height = img->rows;
#else
const int stripCount = 1;
#endif
-
+
#ifdef HAVE_IPP
if( use_ipp )
{
}
else
#endif
- cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );
-
+ cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );
+
cv::Mat _norm1(&norm1), _mask1(&mask1);
cv::parallel_for(cv::BlockedRange(0, stripCount),
cv::HaarDetectObjects_ScaleImage_Invoker(cascade,
{
rectList.resize(allCandidates.size());
std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());
-
+
groupRectangles(rectList, std::max(minNeighbors, 1), GROUP_EPS);
-
+
if( !rectList.empty() )
{
size_t i, sz = rectList.size();
cv::Rect maxRect;
-
+
for( i = 0; i < sz; i++ )
{
if( rectList[i].area() > maxRect.area() )
maxRect = rectList[i];
}
-
+
allCandidates.push_back(maxRect);
-
+
scanROI = maxRect;
int dx = cvRound(maxRect.width*GROUP_EPS);
int dy = cvRound(maxRect.height*GROUP_EPS);
scanROI.y = std::max(scanROI.y - dy, 0);
scanROI.width = std::min(scanROI.width + dx*2, img->cols-1-scanROI.x);
scanROI.height = std::min(scanROI.height + dy*2, img->rows-1-scanROI.y);
-
+
double minScale = roughSearch ? 0.6 : 0.4;
minSize.width = cvRound(maxRect.width*minScale);
minSize.height = cvRound(maxRect.height*minScale);
rectList.resize(allCandidates.size());
if(!allCandidates.empty())
std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());
-
+
if( minNeighbors != 0 || findBiggestObject )
{
if( outputRejectLevels )
}
else
rweights.resize(rectList.size(),0);
-
+
if( findBiggestObject && rectList.size() )
{
CvAvgComp result_comp = {{0,0,0,0},0};
-
+
for( size_t i = 0; i < rectList.size(); i++ )
{
cv::Rect r = rectList[i];
}
CV_IMPL CvSeq*
-cvHaarDetectObjects( const CvArr* _img,
+cvHaarDetectObjects( const CvArr* _img,
CvHaarClassifierCascade* cascade, CvMemStorage* storage,
double scaleFactor,
int minNeighbors, int flags, CvSize minSize, CvSize maxSize )
{
std::vector<int> fakeLevels;
std::vector<double> fakeWeights;
- return cvHaarDetectObjectsForROC( _img, cascade, storage, fakeLevels, fakeWeights,
+ return cvHaarDetectObjectsForROC( _img, cascade, storage, fakeLevels, fakeWeights,
scaleFactor, minNeighbors, flags, minSize, maxSize, false );
}
HaarClassifierCascade::HaarClassifierCascade() {}
HaarClassifierCascade::HaarClassifierCascade(const String& filename)
{ load(filename); }
-
+
bool HaarClassifierCascade::load(const String& filename)
{
cascade = Ptr<CvHaarClassifierCascade>((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
Size blockSize = descriptor->blockSize;
Size blockStride = descriptor->blockStride;
Size cellSize = descriptor->cellSize;
- Size winSize = descriptor->winSize;
int i, j, nbins = descriptor->nbins;
int rawBlockSize = blockSize.width*blockSize.height;
(winSize.height/cacheStride.height)+1);
blockCache.create(cacheSize.height, cacheSize.width*blockHistogramSize);
blockCacheFlags.create(cacheSize);
- size_t i, cacheRows = blockCache.rows;
+ size_t cacheRows = blockCache.rows;
ymaxCached.resize(cacheRows);
- for( i = 0; i < cacheRows; i++ )
- ymaxCached[i] = -1;
+ for(size_t ii = 0; ii < cacheRows; ii++ )
+ ymaxCached[ii] = -1;
}
Mat_<float> weights(blockSize);
#include "_lsvm_matching.h"\r
\r
/*\r
-// Transformation filter displacement from the block space \r
+// Transformation filter displacement from the block space\r
// to the space of pixels at the initial image\r
//\r
// API\r
-// int convertPoints(int countLevel, CvPoint *points, int *levels, \r
+// int convertPoints(int countLevel, CvPoint *points, int *levels,\r
CvPoint **partsDisplacement, int kPoints, int n);\r
// INPUT\r
// countLevel - the number of levels in the feature pyramid\r
// RESULT\r
// Error status\r
*/\r
-int convertPoints(int /*countLevel*/, int lambda, \r
+int convertPoints(int /*countLevel*/, int lambda,\r
int initialImageLevel,\r
- CvPoint *points, int *levels, \r
- CvPoint **partsDisplacement, int kPoints, int n, \r
+ CvPoint *points, int *levels,\r
+ CvPoint **partsDisplacement, int kPoints, int n,\r
int maxXBorder,\r
int maxYBorder)\r
{\r
step = powf( 2.0f, 1.0f / ((float)lambda) );\r
\r
computeBorderSize(maxXBorder, maxYBorder, &bx, &by);\r
- \r
+\r
for (i = 0; i < kPoints; i++)\r
{\r
// scaling factor for root filter\r
// scaling factor for part filters\r
scale = SIDE_LENGTH * powf(step, (float)(levels[i] - lambda - initialImageLevel));\r
for (j = 0; j < n; j++)\r
- { \r
- partsDisplacement[i][j].x = (int)((partsDisplacement[i][j].x - \r
+ {\r
+ partsDisplacement[i][j].x = (int)((partsDisplacement[i][j].x -\r
2 * bx + 1) * scale);\r
- partsDisplacement[i][j].y = (int)((partsDisplacement[i][j].y - \r
+ partsDisplacement[i][j].y = (int)((partsDisplacement[i][j].y -\r
2 * by + 1) * scale);\r
}\r
}\r
// Elimination boxes that are outside the image boudaries\r
//\r
// API\r
-// int clippingBoxes(int width, int height, \r
+// int clippingBoxes(int width, int height,\r
CvPoint *points, int kPoints);\r
// INPUT\r
// width - image wediht\r
// kPoints - points number\r
// OUTPUT\r
// points - updated points (if coordinates less than zero then\r
- set zero coordinate, if coordinates more than image \r
+ set zero coordinate, if coordinates more than image\r
size then set coordinates equal image size)\r
// RESULT\r
// Error status\r
*/\r
-int clippingBoxes(int width, int height, \r
+int clippingBoxes(int width, int height,\r
CvPoint *points, int kPoints)\r
{\r
int i;\r
int maxXBorder, int maxYBorder);\r
\r
// INPUT\r
-// image - initial image \r
+// image - initial image\r
// maxXBorder - the largest root filter size (X-direction)\r
// maxYBorder - the largest root filter size (Y-direction)\r
// OUTPUT\r
// Computation of the root filter displacement and values of score function\r
//\r
// API\r
-// int searchObject(const featurePyramid *H, const filterObject **all_F, int n, \r
- float b, \r
+// int searchObject(const featurePyramid *H, const filterObject **all_F, int n,\r
+ float b,\r
int maxXBorder,\r
- int maxYBorder, \r
+ int maxYBorder,\r
CvPoint **points, int **levels, int *kPoints, float *score,\r
CvPoint ***partsDisplacement);\r
// INPUT\r
// image - initial image for searhing object\r
-// all_F - the set of filters (the first element is root filter, \r
+// all_F - the set of filters (the first element is root filter,\r
other elements - part filters)\r
// n - the number of part filters\r
// b - linear term of the score function\r
// maxXBorder - the largest root filter size (X-direction)\r
// maxYBorder - the largest root filter size (Y-direction)\r
// OUTPUT\r
-// points - positions (x, y) of the upper-left corner \r
+// points - positions (x, y) of the upper-left corner\r
of root filter frame\r
// levels - levels that correspond to each position\r
// kPoints - number of positions\r
// score - value of the score function\r
-// partsDisplacement - part filters displacement for each position \r
+// partsDisplacement - part filters displacement for each position\r
of the root filter\r
// RESULT\r
// Error status\r
*/\r
-int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F, \r
- int n, float b, \r
+int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,\r
+ int n, float b,\r
int maxXBorder,\r
- int maxYBorder, \r
+ int maxYBorder,\r
CvPoint **points, int **levels, int *kPoints, float *score,\r
CvPoint ***partsDisplacement)\r
{\r
int opResult;\r
\r
// Matching\r
- opResult = maxFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder, \r
- score, points, levels, \r
+ opResult = maxFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,\r
+ score, points, levels,\r
kPoints, partsDisplacement);\r
if (opResult != LATENT_SVM_OK)\r
{\r
return LATENT_SVM_SEARCH_OBJECT_FAILED;\r
}\r
- \r
- // Transformation filter displacement from the block space \r
+\r
+ // Transformation filter displacement from the block space\r
// to the space of pixels at the initial image\r
// that settles at the level number LAMBDA\r
- convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points), \r
- (*levels), (*partsDisplacement), (*kPoints), n, \r
+ convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),\r
+ (*levels), (*partsDisplacement), (*kPoints), n,\r
maxXBorder, maxYBorder);\r
\r
return LATENT_SVM_OK;\r
// Computation right bottom corners coordinates of bounding boxes\r
//\r
// API\r
-// int estimateBoxes(CvPoint *points, int *levels, int kPoints, \r
+// int estimateBoxes(CvPoint *points, int *levels, int kPoints,\r
int sizeX, int sizeY, CvPoint **oppositePoints);\r
// INPUT\r
// points - left top corners coordinates of bounding boxes\r
// RESULT\r
// Error status\r
*/\r
-int estimateBoxes(CvPoint *points, int *levels, int kPoints, \r
+static int estimateBoxes(CvPoint *points, int *levels, int kPoints,\r
int sizeX, int sizeY, CvPoint **oppositePoints)\r
{\r
int i;\r
// Computation of the root filter displacement and values of score function\r
//\r
// API\r
-// int searchObjectThreshold(const featurePyramid *H, \r
+// int searchObjectThreshold(const featurePyramid *H,\r
const filterObject **all_F, int n,\r
- float b, \r
- int maxXBorder, int maxYBorder, \r
+ float b,\r
+ int maxXBorder, int maxYBorder,\r
float scoreThreshold,\r
- CvPoint **points, int **levels, int *kPoints, \r
+ CvPoint **points, int **levels, int *kPoints,\r
float **score, CvPoint ***partsDisplacement);\r
// INPUT\r
// H - feature pyramid\r
-// all_F - the set of filters (the first element is root filter, \r
+// all_F - the set of filters (the first element is root filter,\r
other elements - part filters)\r
// n - the number of part filters\r
// b - linear term of the score function\r
// maxYBorder - the largest root filter size (Y-direction)\r
// scoreThreshold - score threshold\r
// OUTPUT\r
-// points - positions (x, y) of the upper-left corner \r
+// points - positions (x, y) of the upper-left corner\r
of root filter frame\r
// levels - levels that correspond to each position\r
// kPoints - number of positions\r
// score - values of the score function\r
-// partsDisplacement - part filters displacement for each position \r
+// partsDisplacement - part filters displacement for each position\r
of the root filter\r
// RESULT\r
// Error status\r
*/\r
-int searchObjectThreshold(const CvLSVMFeaturePyramid *H, \r
+int searchObjectThreshold(const CvLSVMFeaturePyramid *H,\r
const CvLSVMFilterObject **all_F, int n,\r
- float b, \r
- int maxXBorder, int maxYBorder, \r
+ float b,\r
+ int maxXBorder, int maxYBorder,\r
float scoreThreshold,\r
- CvPoint **points, int **levels, int *kPoints, \r
+ CvPoint **points, int **levels, int *kPoints,\r
float **score, CvPoint ***partsDisplacement,\r
int numThreads)\r
{\r
return opResult;\r
}\r
opResult = tbbThresholdFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,\r
- scoreThreshold, numThreads, score, \r
- points, levels, kPoints, \r
+ scoreThreshold, numThreads, score,\r
+ points, levels, kPoints,\r
partsDisplacement);\r
#else\r
- opResult = thresholdFunctionalScore(all_F, n, H, b, \r
- maxXBorder, maxYBorder, \r
- scoreThreshold, \r
- score, points, levels, \r
+ opResult = thresholdFunctionalScore(all_F, n, H, b,\r
+ maxXBorder, maxYBorder,\r
+ scoreThreshold,\r
+ score, points, levels,\r
kPoints, partsDisplacement);\r
\r
- (void)numThreads;\r
+ (void)numThreads;\r
#endif\r
if (opResult != LATENT_SVM_OK)\r
{\r
return LATENT_SVM_SEARCH_OBJECT_FAILED;\r
- } \r
- \r
- // Transformation filter displacement from the block space \r
+ }\r
+\r
+ // Transformation filter displacement from the block space\r
// to the space of pixels at the initial image\r
// that settles at the level number LAMBDA\r
- convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points), \r
- (*levels), (*partsDisplacement), (*kPoints), n, \r
+ convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),\r
+ (*levels), (*partsDisplacement), (*kPoints), n,\r
maxXBorder, maxYBorder);\r
\r
return LATENT_SVM_OK;\r
//\r
// API\r
// int showRootFilterBoxes(const IplImage *image,\r
- const filterObject *filter, \r
+ const filterObject *filter,\r
CvPoint *points, int *levels, int kPoints,\r
- CvScalar color, int thickness, \r
+ CvScalar color, int thickness,\r
int line_type, int shift);\r
// INPUT\r
// image - initial image\r
// Error status\r
*/\r
int showRootFilterBoxes(IplImage *image,\r
- const CvLSVMFilterObject *filter, \r
+ const CvLSVMFilterObject *filter,\r
CvPoint *points, int *levels, int kPoints,\r
- CvScalar color, int thickness, \r
+ CvScalar color, int thickness,\r
int line_type, int shift)\r
-{ \r
+{\r
int i;\r
float step;\r
CvPoint oppositePoint;\r
step = powf( 2.0f, 1.0f / ((float)LAMBDA));\r
- \r
+\r
for (i = 0; i < kPoints; i++)\r
{\r
// Drawing rectangle for filter\r
- getOppositePoint(points[i], filter->sizeX, filter->sizeY, \r
+ getOppositePoint(points[i], filter->sizeX, filter->sizeY,\r
step, levels[i] - LAMBDA, &oppositePoint);\r
- cvRectangle(image, points[i], oppositePoint, \r
+ cvRectangle(image, points[i], oppositePoint,\r
color, thickness, line_type, shift);\r
}\r
#ifdef HAVE_OPENCV_HIGHGUI\r
//\r
// API\r
// int showPartFilterBoxes(const IplImage *image,\r
- const filterObject *filter, \r
+ const filterObject *filter,\r
CvPoint *points, int *levels, int kPoints,\r
- CvScalar color, int thickness, \r
+ CvScalar color, int thickness,\r
int line_type, int shift);\r
// INPUT\r
// image - initial image\r
*/\r
int showPartFilterBoxes(IplImage *image,\r
const CvLSVMFilterObject **filters,\r
- int n, CvPoint **partsDisplacement, \r
+ int n, CvPoint **partsDisplacement,\r
int *levels, int kPoints,\r
- CvScalar color, int thickness, \r
+ CvScalar color, int thickness,\r
int line_type, int shift)\r
{\r
int i, j;\r
for (j = 0; j < n; j++)\r
{\r
// Drawing rectangles for part filters\r
- getOppositePoint(partsDisplacement[i][j], \r
- filters[j + 1]->sizeX, filters[j + 1]->sizeY, \r
+ getOppositePoint(partsDisplacement[i][j],\r
+ filters[j + 1]->sizeX, filters[j + 1]->sizeY,\r
step, levels[i] - 2 * LAMBDA, &oppositePoint);\r
- cvRectangle(image, partsDisplacement[i][j], oppositePoint, \r
+ cvRectangle(image, partsDisplacement[i][j], oppositePoint,\r
color, thickness, line_type, shift);\r
}\r
}\r
// Drawing boxes\r
//\r
// API\r
-// int showBoxes(const IplImage *img, \r
- const CvPoint *points, const CvPoint *oppositePoints, int kPoints, \r
+// int showBoxes(const IplImage *img,\r
+ const CvPoint *points, const CvPoint *oppositePoints, int kPoints,\r
CvScalar color, int thickness, int line_type, int shift);\r
// INPUT\r
// img - initial image\r
// RESULT\r
// Error status\r
*/\r
-int showBoxes(IplImage *img, \r
- const CvPoint *points, const CvPoint *oppositePoints, int kPoints, \r
+int showBoxes(IplImage *img,\r
+ const CvPoint *points, const CvPoint *oppositePoints, int kPoints,\r
CvScalar color, int thickness, int line_type, int shift)\r
{\r
int i;\r
for (i = 0; i < kPoints; i++)\r
{\r
- cvRectangle(img, points[i], oppositePoints[i], \r
+ cvRectangle(img, points[i], oppositePoints[i],\r
color, thickness, line_type, shift);\r
}\r
#ifdef HAVE_OPENCV_HIGHGUI\r
//\r
// API\r
// int getMaxFilterDims(const filterObject **filters, int kComponents,\r
- const int *kPartFilters, \r
+ const int *kPartFilters,\r
unsigned int *maxXBorder, unsigned int *maxYBorder);\r
// INPUT\r
-// filters - a set of filters (at first root filter, then part filters \r
+// filters - a set of filters (at first root filter, then part filters\r
and etc. for all components)\r
// kComponents - number of components\r
// kPartFilters - number of part filters for each component\r
// Error status\r
*/\r
int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,\r
- const int *kPartFilters, \r
+ const int *kPartFilters,\r
unsigned int *maxXBorder, unsigned int *maxYBorder)\r
{\r
- int i, componentIndex; \r
+ int i, componentIndex;\r
*maxXBorder = filters[0]->sizeX;\r
*maxYBorder = filters[0]->sizeY;\r
componentIndex = kPartFilters[0] + 1;\r
//\r
// API\r
// int searchObjectThresholdSomeComponents(const featurePyramid *H,\r
- const filterObject **filters, \r
+ const filterObject **filters,\r
int kComponents, const int *kPartFilters,\r
const float *b, float scoreThreshold,\r
CvPoint **points, CvPoint **oppPoints,\r
// Error status\r
*/\r
int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,\r
- const CvLSVMFilterObject **filters, \r
+ const CvLSVMFilterObject **filters,\r
int kComponents, const int *kPartFilters,\r
const float *b, float scoreThreshold,\r
CvPoint **points, CvPoint **oppPoints,\r
float **score, int *kPoints,\r
int numThreads)\r
{\r
- int error = 0;\r
+ //int error = 0;\r
int i, j, s, f, componentIndex;\r
unsigned int maxXBorder, maxYBorder;\r
CvPoint **pointsArr, **oppPointsArr, ***partsDisplacementArr;\r
float **scoreArr;\r
int *kPointsArr, **levelsArr;\r
- \r
+\r
// Allocation memory\r
pointsArr = (CvPoint **)malloc(sizeof(CvPoint *) * kComponents);\r
oppPointsArr = (CvPoint **)malloc(sizeof(CvPoint *) * kComponents);\r
kPointsArr = (int *)malloc(sizeof(int) * kComponents);\r
levelsArr = (int **)malloc(sizeof(int *) * kComponents);\r
partsDisplacementArr = (CvPoint ***)malloc(sizeof(CvPoint **) * kComponents);\r
- \r
+\r
// Getting maximum filter dimensions\r
- error = getMaxFilterDims(filters, kComponents, kPartFilters, &maxXBorder, &maxYBorder);\r
+ /*error = */getMaxFilterDims(filters, kComponents, kPartFilters, &maxXBorder, &maxYBorder);\r
componentIndex = 0;\r
*kPoints = 0;\r
// For each component perform searching\r
for (i = 0; i < kComponents; i++)\r
{\r
#ifdef HAVE_TBB\r
- error = searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],\r
+ int error = searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],\r
b[i], maxXBorder, maxYBorder, scoreThreshold,\r
- &(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]), \r
+ &(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),\r
&(scoreArr[i]), &(partsDisplacementArr[i]), numThreads);\r
if (error != LATENT_SVM_OK)\r
{\r
return LATENT_SVM_SEARCH_OBJECT_FAILED;\r
}\r
#else\r
- (void)numThreads;\r
+ (void)numThreads;\r
searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],\r
- b[i], maxXBorder, maxYBorder, scoreThreshold, \r
- &(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]), \r
+ b[i], maxXBorder, maxYBorder, scoreThreshold,\r
+ &(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),\r
&(scoreArr[i]), &(partsDisplacementArr[i]));\r
#endif\r
- estimateBoxes(pointsArr[i], levelsArr[i], kPointsArr[i], \r
- filters[componentIndex]->sizeX, filters[componentIndex]->sizeY, &(oppPointsArr[i])); \r
+ estimateBoxes(pointsArr[i], levelsArr[i], kPointsArr[i],\r
+ filters[componentIndex]->sizeX, filters[componentIndex]->sizeY, &(oppPointsArr[i]));\r
componentIndex += (kPartFilters[i] + 1);\r
*kPoints += kPointsArr[i];\r
- } \r
+ }\r
\r
*points = (CvPoint *)malloc(sizeof(CvPoint) * (*kPoints));\r
*oppPoints = (CvPoint *)malloc(sizeof(CvPoint) * (*kPoints));\r
return classNames.size();\r
}\r
\r
-string extractModelName( const string& filename )\r
+static string extractModelName( const string& filename )\r
{\r
size_t startPos = filename.rfind('/');\r
if( startPos == string::npos )\r
*
* \return The bounding box of all the templates in original image coordinates.
*/
-Rect cropTemplates(std::vector<Template>& templates)
+static Rect cropTemplates(std::vector<Template>& templates)
{
int min_x = std::numeric_limits<int>::max();
int min_y = std::numeric_limits<int>::max();
max_y = std::max(max_y, y);
}
}
-
+
/// @todo Why require even min_x, min_y?
if (min_x % 2 == 1) --min_x;
if (min_y % 2 == 1) --min_y;
templ.height = (max_y - min_y) >> templ.pyramid_level;
int offset_x = min_x >> templ.pyramid_level;
int offset_y = min_y >> templ.pyramid_level;
-
+
for (int j = 0; j < (int)templ.features.size(); ++j)
{
templ.features[j].x -= offset_x;
* \param threshold Magnitude threshold. Keep only gradients whose norms are
* larger than this.
*/
-void quantizedOrientations(const Mat& src, Mat& magnitude,
+static void quantizedOrientations(const Mat& src, Mat& magnitude,
Mat& angle, float threshold)
{
magnitude.create(src.size(), CV_32F);
{
if (mag_r[c] > threshold)
{
- // Compute histogram of quantized bins in 3x3 patch around pixel
+ // Compute histogram of quantized bins in 3x3 patch around pixel
int histogram[8] = {0, 0, 0, 0, 0, 0, 0, 0};
uchar* patch3x3_row = &quantized_unfiltered(r-1, c-1);
histogram[patch3x3_row[1]]++;
histogram[patch3x3_row[2]]++;
- patch3x3_row += quantized_unfiltered.step1();
+ patch3x3_row += quantized_unfiltered.step1();
histogram[patch3x3_row[0]]++;
histogram[patch3x3_row[1]]++;
histogram[patch3x3_row[2]]++;
- patch3x3_row += quantized_unfiltered.step1();
+ patch3x3_row += quantized_unfiltered.step1();
histogram[patch3x3_row[0]]++;
histogram[patch3x3_row[1]]++;
histogram[patch3x3_row[2]]++;
- // Find bin with the most votes from the patch
+ // Find bin with the most votes from the patch
int max_votes = 0;
int index = -1;
for (int i = 0; i < 8; ++i)
}
}
- // Only accept the quantization if majority of pixels in the patch agree
- static const int NEIGHBOR_THRESHOLD = 5;
+ // Only accept the quantization if majority of pixels in the patch agree
+ static const int NEIGHBOR_THRESHOLD = 5;
if (max_votes >= NEIGHBOR_THRESHOLD)
- quantized_angle.at<uchar>(r, c) = 1 << index;
+ quantized_angle.at<uchar>(r, c) = uchar(1 << index);
}
}
}
float strong_threshold;
};
-ColorGradientPyramid::ColorGradientPyramid(const Mat& src, const Mat& mask,
- float weak_threshold, size_t num_features,
- float strong_threshold)
- : src(src),
- mask(mask),
+ColorGradientPyramid::ColorGradientPyramid(const Mat& _src, const Mat& _mask,
+ float _weak_threshold, size_t _num_features,
+ float _strong_threshold)
+ : src(_src),
+ mask(_mask),
pyramid_level(0),
- weak_threshold(weak_threshold),
- num_features(num_features),
- strong_threshold(strong_threshold)
+ weak_threshold(_weak_threshold),
+ num_features(_num_features),
+ strong_threshold(_strong_threshold)
{
update();
}
{
}
-ColorGradient::ColorGradient(float weak_threshold, size_t num_features, float strong_threshold)
- : weak_threshold(weak_threshold),
- num_features(num_features),
- strong_threshold(strong_threshold)
+ColorGradient::ColorGradient(float _weak_threshold, size_t _num_features, float _strong_threshold)
+ : weak_threshold(_weak_threshold),
+ num_features(_num_features),
+ strong_threshold(_strong_threshold)
{
}
*
* \todo Should also need camera model, or at least focal lengths? Replace distance_threshold with mask?
*/
-void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
+static void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
int difference_threshold)
{
dst = Mat::zeros(src.size(), CV_8U);
int extract_threshold;
};
-DepthNormalPyramid::DepthNormalPyramid(const Mat& src, const Mat& mask,
- int distance_threshold, int difference_threshold, size_t num_features,
- int extract_threshold)
- : mask(mask),
+DepthNormalPyramid::DepthNormalPyramid(const Mat& src, const Mat& _mask,
+ int distance_threshold, int difference_threshold, size_t _num_features,
+ int _extract_threshold)
+ : mask(_mask),
pyramid_level(0),
- num_features(num_features),
- extract_threshold(extract_threshold)
+ num_features(_num_features),
+ extract_threshold(_extract_threshold)
{
quantizedNormals(src, normal, distance_threshold, difference_threshold);
}
{
}
-DepthNormal::DepthNormal(int distance_threshold, int difference_threshold, size_t num_features,
- int extract_threshold)
- : distance_threshold(distance_threshold),
- difference_threshold(difference_threshold),
- num_features(num_features),
- extract_threshold(extract_threshold)
+DepthNormal::DepthNormal(int _distance_threshold, int _difference_threshold, size_t _num_features,
+ int _extract_threshold)
+ : distance_threshold(_distance_threshold),
+ difference_threshold(_difference_threshold),
+ num_features(_num_features),
+ extract_threshold(_extract_threshold)
{
}
* Response maps *
\****************************************************************************************/
-void orUnaligned8u(const uchar * src, const int src_stride,
+static void orUnaligned8u(const uchar * src, const int src_stride,
uchar * dst, const int dst_stride,
const int width, const int height)
{
__m128i* dst_ptr = reinterpret_cast<__m128i*>(dst + c);
*dst_ptr = _mm_or_si128(*dst_ptr, val);
}
- }
+ }
#endif
for ( ; c < width; ++c)
dst[c] |= src[c];
* \param[out] dst Destination 8-bit spread image.
* \param T Sampling step. Spread labels T/2 pixels in each direction.
*/
-void spread(const Mat& src, Mat& dst, int T)
+static void spread(const Mat& src, Mat& dst, int T)
{
// Allocate and zero-initialize spread (OR'ed) image
dst = Mat::zeros(src.size(), CV_8U);
* \param[in] src The source 8-bit spread quantized image.
* \param[out] response_maps Vector of 8 response maps, one for each bit label.
*/
-void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
+static void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
{
CV_Assert((src.rows * src.cols) % 16 == 0);
response_maps.resize(8);
for (int i = 0; i < 8; ++i)
response_maps[i].create(src.size(), CV_8U);
-
+
Mat lsb4(src.size(), CV_8U);
Mat msb4(src.size(), CV_8U);
-
+
for (int r = 0; r < src.rows; ++r)
{
const uchar* src_r = src.ptr(r);
uchar* lsb4_r = lsb4.ptr(r);
uchar* msb4_r = msb4.ptr(r);
-
+
for (int c = 0; c < src.cols; ++c)
{
// Least significant 4 bits of spread image pixel
* each of which is a linear memory of length (W/T)*(H/T).
* \param T Sampling step.
*/
-void linearize(const Mat& response_map, Mat& linearized, int T)
+static void linearize(const Mat& response_map, Mat& linearized, int T)
{
CV_Assert(response_map.rows % T == 0);
CV_Assert(response_map.cols % T == 0);
int mem_width = response_map.cols / T;
int mem_height = response_map.rows / T;
linearized.create(T*T, mem_width * mem_height, CV_8U);
-
+
// Outer two for loops iterate over top-left T^2 starting pixels
int index = 0;
for (int r_start = 0; r_start < T; ++r_start)
{
uchar* memory = linearized.ptr(index);
++index;
-
+
// Inner two loops copy every T-th pixel into the linear memory
for (int r = r_start; r < response_map.rows; r += T)
{
* Linearized similarities *
\****************************************************************************************/
-const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
- const Feature& f, int T, int W)
+static const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
+ const Feature& f, int T, int W)
{
// Retrieve the TxT grid of linear memories associated with the feature label
const Mat& memory_grid = linear_memories[f.label];
* \param size Size (W, H) of the original input image.
* \param T Sampling step.
*/
-void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
+static void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
Mat& dst, Size size, int T)
{
// 63 features or less is a special case because the max similarity per-feature is 4.
}
#endif
for ( ; j < template_positions; ++j)
- dst_ptr[j] += lm_ptr[j];
+ dst_ptr[j] = uchar(dst_ptr[j] + lm_ptr[j]);
}
}
* \param T Sampling step.
* \param center Center of the local region.
*/
-void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
+static void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
Mat& dst, Size size, int T, Point center)
{
// Similar to whole-image similarity() above. This version takes a position 'center'
for (int row = 0; row < 16; ++row)
{
for (int col = 0; col < 16; ++col)
- dst_ptr[col] += lm_ptr[col];
+ dst_ptr[col] = uchar(dst_ptr[col] + lm_ptr[col]);
dst_ptr += 16;
lm_ptr += W;
}
}
}
-void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int length)
+static void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int length)
{
const uchar * end = src1 + length;
* \param[in] similarities Source 8-bit similarity images.
* \param[out] dst Destination 16-bit similarity image.
*/
-void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
+static void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
{
if (similarities.size() == 1)
{
{
}
-Detector::Detector(const std::vector< Ptr<Modality> >& modalities,
+Detector::Detector(const std::vector< Ptr<Modality> >& _modalities,
const std::vector<int>& T_pyramid)
- : modalities(modalities),
+ : modalities(_modalities),
pyramid_levels(static_cast<int>(T_pyramid.size())),
T_at_level(T_pyramid)
{
// Used to filter out weak matches
struct MatchPredicate
{
- MatchPredicate(float threshold) : threshold(threshold) {}
+ MatchPredicate(float _threshold) : threshold(_threshold) {}
bool operator() (const Match& m) { return m.similarity < threshold; }
float threshold;
};
int max_x = size.width - tp[start].width - border;
int max_y = size.height - tp[start].height - border;
- std::vector<Mat> similarities(modalities.size());
- Mat total_similarity;
+ std::vector<Mat> similarities2(modalities.size());
+ Mat total_similarity2;
for (int m = 0; m < (int)candidates.size(); ++m)
{
- Match& match = candidates[m];
- int x = match.x * 2 + 1; /// @todo Support other pyramid distance
- int y = match.y * 2 + 1;
+ Match& match2 = candidates[m];
+ int x = match2.x * 2 + 1; /// @todo Support other pyramid distance
+ int y = match2.y * 2 + 1;
// Require 8 (reduced) row/cols to the up/left
x = std::max(x, border);
y = std::min(y, max_y);
// Compute local similarity maps for each modality
- int num_features = 0;
+ int numFeatures = 0;
for (int i = 0; i < (int)modalities.size(); ++i)
{
const Template& templ = tp[start + i];
- num_features += static_cast<int>(templ.features.size());
- similarityLocal(lms[i], templ, similarities[i], size, T, Point(x, y));
+ numFeatures += static_cast<int>(templ.features.size());
+ similarityLocal(lms[i], templ, similarities2[i], size, T, Point(x, y));
}
- addSimilarities(similarities, total_similarity);
+ addSimilarities(similarities2, total_similarity2);
// Find best local adjustment
int best_score = 0;
int best_r = -1, best_c = -1;
- for (int r = 0; r < total_similarity.rows; ++r)
+ for (int r = 0; r < total_similarity2.rows; ++r)
{
- ushort* row = total_similarity.ptr<ushort>(r);
- for (int c = 0; c < total_similarity.cols; ++c)
+ ushort* row = total_similarity2.ptr<ushort>(r);
+ for (int c = 0; c < total_similarity2.cols; ++c)
{
int score = row[c];
if (score > best_score)
}
}
// Update current match
- match.x = (x / T - 8 + best_c) * T + offset;
- match.y = (y / T - 8 + best_r) * T + offset;
- match.similarity = (best_score * 100.f) / (4 * num_features);
+ match2.x = (x / T - 8 + best_c) * T + offset;
+ match2.y = (y / T - 8 + best_r) * T + offset;
+ match2.similarity = (best_score * 100.f) / (4 * numFeatures);
}
// Filter out any matches that drop below the similarity threshold
tps[template_id].resize(templates_fn.size());
FileNodeIterator templ_it = templates_fn.begin(), templ_it_end = templates_fn.end();
- int i = 0;
+ int idx = 0;
for ( ; templ_it != templ_it_end; ++templ_it)
{
- tps[template_id][i++].read(*templ_it);
+ tps[template_id][idx++].read(*templ_it);
}
}
#include "_lsvmparser.h"\r
#include "_lsvm_error.h"\r
\r
+namespace\r
+{\r
int isMODEL (char *str){\r
char stag [] = "<Model>";\r
char etag [] = "</Model>";\r
if(ch == '>'){\r
tagBuf[j ] = ch;\r
tagBuf[j + 1] = '\0';\r
- \r
+\r
tagVal = getTeg(tagBuf);\r
- \r
+\r
if(tagVal == ERFILTER){\r
//printf("</RootFilter>\n");\r
return;\r
}\r
\r
tag = 0;\r
- i = 0; \r
+ i = 0;\r
}else{\r
if((tag == 0)&& (st == 1)){\r
buf[i] = ch; i++;\r
tagBuf[j] = ch; j++;\r
}\r
}\r
- } \r
+ }\r
}\r
}\r
\r
if(ch == '>'){\r
tagBuf[j ] = ch;\r
tagBuf[j + 1] = '\0';\r
- \r
+\r
tagVal = getTeg(tagBuf);\r
- \r
+\r
if(tagVal == ETAGV){\r
//printf(" </V>\n");\r
return;\r
//printf(" <Vy>%d</Vy>\n", model->V.y);\r
}\r
tag = 0;\r
- i = 0; \r
+ i = 0;\r
}else{\r
if((tag == 0)&& (st == 1)){\r
buf[i] = ch; i++;\r
tagBuf[j] = ch; j++;\r
}\r
}\r
- } \r
+ }\r
}\r
}\r
void parserD (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){\r
if(ch == '>'){\r
tagBuf[j ] = ch;\r
tagBuf[j + 1] = '\0';\r
- \r
+\r
tagVal = getTeg(tagBuf);\r
- \r
+\r
if(tagVal == ETAGD){\r
//printf(" </D>\n");\r
return;\r
if(tagVal == EDx){\r
st = 0;\r
buf[i] = '\0';\r
- \r
+\r
model->fineFunction[0] = (float)atof(buf);\r
//printf(" <Dx>%f</Dx>\n", model->fineFunction[0]);\r
}\r
if(tagVal == EDy){\r
st = 0;\r
buf[i] = '\0';\r
- \r
+\r
model->fineFunction[1] = (float)atof(buf);\r
//printf(" <Dy>%f</Dy>\n", model->fineFunction[1]);\r
}\r
if(tagVal == EDxx){\r
st = 0;\r
buf[i] = '\0';\r
- \r
+\r
model->fineFunction[2] = (float)atof(buf);\r
//printf(" <Dxx>%f</Dxx>\n", model->fineFunction[2]);\r
}\r
if(tagVal == EDyy){\r
st = 0;\r
buf[i] = '\0';\r
- \r
+\r
model->fineFunction[3] = (float)atof(buf);\r
//printf(" <Dyy>%f</Dyy>\n", model->fineFunction[3]);\r
}\r
\r
tag = 0;\r
- i = 0; \r
+ i = 0;\r
}else{\r
if((tag == 0)&& (st == 1)){\r
buf[i] = ch; i++;\r
tagBuf[j] = ch; j++;\r
}\r
}\r
- } \r
+ }\r
}\r
}\r
\r
if(ch == '>'){\r
tagBuf[j ] = ch;\r
tagBuf[j + 1] = '\0';\r
- \r
+\r
tagVal = getTeg(tagBuf);\r
- \r
+\r
if(tagVal == EPFILTER){\r
//printf("</PathFilter>\n");\r
return;\r
//printf("WEIGHTS OK\n");\r
}\r
tag = 0;\r
- i = 0; \r
+ i = 0;\r
}else{\r
if((tag == 0)&& (st == 1)){\r
buf[i] = ch; i++;\r
tagBuf[j] = ch; j++;\r
}\r
}\r
- } \r
+ }\r
}\r
}\r
void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last, int *max){\r
int tag;\r
int tagVal;\r
char ch;\r
- int i,j;\r
- char buf[1024];\r
+ int /*i,*/j;\r
+ //char buf[1024];\r
char tagBuf[1024];\r
//printf("<PartFilters>\n");\r
\r
- i = 0;\r
+ //i = 0;\r
j = 0;\r
st = 0;\r
tag = 0;\r
if(ch == '>'){\r
tagBuf[j ] = ch;\r
tagBuf[j + 1] = '\0';\r
- \r
+\r
tagVal = getTeg(tagBuf);\r
- \r
+\r
if(tagVal == EPFILTERs){\r
//printf("</PartFilters>\n");\r
return;\r
N_path++;\r
}\r
tag = 0;\r
- i = 0; \r
+ //i = 0;\r
}else{\r
if((tag == 0)&& (st == 1)){\r
- buf[i] = ch; i++;\r
+ //buf[i] = ch; i++;\r
}else{\r
tagBuf[j] = ch; j++;\r
}\r
}\r
- } \r
+ }\r
}\r
}\r
void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model, float *b, int *last, int *max){\r
int tag;\r
int tagVal;\r
char ch;\r
- int i,j;\r
- char buf[1024];\r
+ int /*i,*/j;\r
+ //char buf[1024];\r
char tagBuf[1024];\r
//printf("<Component> %d\n", *N_comp);\r
\r
- i = 0;\r
+ //i = 0;\r
j = 0;\r
st = 0;\r
tag = 0;\r
if(ch == '>'){\r
tagBuf[j ] = ch;\r
tagBuf[j + 1] = '\0';\r
- \r
+\r
tagVal = getTeg(tagBuf);\r
- \r
+\r
if(tagVal == ECOMP){\r
(*N_comp) ++;\r
return;\r
parserPFilterS (xmlf, p, model, last, max);\r
}\r
tag = 0;\r
- i = 0; \r
+ //i = 0;\r
}else{\r
if((tag == 0)&& (st == 1)){\r
- buf[i] = ch; i++;\r
+ //buf[i] = ch; i++;\r
}else{\r
tagBuf[j] = ch; j++;\r
}\r
}\r
- } \r
+ }\r
}\r
}\r
void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max, int **comp, float **b, int *count, float * score){\r
int i,j, ii = 0;\r
char buf[1024];\r
char tagBuf[1024];\r
- \r
+\r
//printf("<Model>\n");\r
- \r
+\r
i = 0;\r
j = 0;\r
st = 0;\r
if(ch == '>'){\r
tagBuf[j ] = ch;\r
tagBuf[j + 1] = '\0';\r
- \r
+\r
tagVal = getTeg(tagBuf);\r
- \r
+\r
if(tagVal == EMODEL){\r
//printf("</Model>\n");\r
for(ii = 0; ii <= *last; ii++){\r
bb = (float *)malloc(sizeof(float));\r
* comp = cmp;\r
* b = bb;\r
- * count = N_comp + 1; \r
+ * count = N_comp + 1;\r
} else {\r
cmp = (int *)malloc(sizeof(int) * (N_comp + 1));\r
bb = (float *)malloc(sizeof(float) * (N_comp + 1));\r
free(* b );\r
* comp = cmp;\r
* b = bb;\r
- * count = N_comp + 1; \r
+ * count = N_comp + 1;\r
}\r
parserComp(xmlf, p, &N_comp, model, &((*b)[N_comp]), last, max);\r
cmp[N_comp - 1] = *last;\r
//printf("<ScoreThreshold>%f</ScoreThreshold>\n", score);\r
}\r
tag = 0;\r
- i = 0; \r
+ i = 0;\r
}else{\r
if((tag == 0)&& (st == 1)){\r
buf[i] = ch; i++;\r
tagBuf[j] = ch; j++;\r
}\r
}\r
- } \r
+ }\r
}\r
}\r
\r
+}//namespace\r
+\r
int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, int *max, int **comp, float **b, int *count, float * score){\r
- int st = 0;\r
+ //int st = 0;\r
int tag;\r
char ch;\r
- int i,j;\r
+ int /*i,*/j;\r
FILE *xmlf;\r
- char buf[1024];\r
+ //char buf[1024];\r
char tagBuf[1024];\r
\r
(*max) = 10;\r
xmlf = fopen(filename, "rb");\r
if(xmlf == NULL)\r
return LSVM_PARSER_FILE_NOT_FOUND;\r
- \r
- i = 0;\r
+\r
+ //i = 0;\r
j = 0;\r
- st = 0;\r
+ //st = 0;\r
tag = 0;\r
while(!feof(xmlf)){\r
ch = (char)fgetc( xmlf );\r
}else {\r
if(ch == '>'){\r
tag = 0;\r
- i = 0;\r
+ //i = 0;\r
tagBuf[j ] = ch;\r
tagBuf[j + 1] = '\0';\r
if(getTeg(tagBuf) == MODEL){\r
}\r
}else{\r
if(tag == 0){\r
- buf[i] = ch; i++;\r
+ //buf[i] = ch; i++;\r
}else{\r
tagBuf[j] = ch; j++;\r
}\r
}\r
- } \r
+ }\r
}\r
- \r
+\r
fclose(xmlf);\r
return LATENT_SVM_OK;\r
}\r
int loadModel(\r
const char *modelPath,\r
CvLSVMFilterObject ***filters,\r
- int *kFilters, \r
- int *kComponents, \r
- int **kPartFilters, \r
- float **b, \r
- float *scoreThreshold){ \r
+ int *kFilters,\r
+ int *kComponents,\r
+ int **kPartFilters,\r
+ float **b,\r
+ float *scoreThreshold){\r
int last;\r
int max;\r
int *comp;\r
int count;\r
int i;\r
- int err;\r
+ int err;\r
float score;\r
//printf("start_parse\n\n");\r
\r
err = LSVMparser(modelPath, filters, &last, &max, &comp, b, &count, &score);\r
- if(err != LATENT_SVM_OK){\r
- return err;\r
- }\r
+ if(err != LATENT_SVM_OK){\r
+ return err;\r
+ }\r
(*kFilters) = last + 1;\r
(*kComponents) = count;\r
(*scoreThreshold) = (float) score;\r
*/\r
int convolution(const CvLSVMFilterObject *Fi, const CvLSVMFeatureMap *map, float *f)\r
{\r
- int n1, m1, n2, m2, p, size, diff1, diff2;\r
+ int n1, m1, n2, m2, p, /*size,*/ diff1, diff2;\r
int i1, i2, j1, j2, k;\r
float tmp_f1, tmp_f2, tmp_f3, tmp_f4;\r
float *pMap = NULL;\r
\r
diff1 = n1 - n2 + 1;\r
diff2 = m1 - m2 + 1;\r
- size = diff1 * diff2;\r
+ //size = diff1 * diff2;\r
for (j1 = diff2 - 1; j1 >= 0; j1--)\r
{\r
\r
float **scoreFi,\r
int **pointsX, int **pointsY)\r
{\r
- int n1, m1, n2, m2, p, size, diff1, diff2;\r
+ int n1, m1, n2, m2, /*p,*/ size, diff1, diff2;\r
float *f;\r
int i1, j1;\r
int res;\r
m1 = pyramid->sizeX;\r
n2 = Fi->sizeY;\r
m2 = Fi->sizeX;\r
- p = pyramid->numFeatures;\r
+ //p = pyramid->numFeatures;\r
(*scoreFi) = NULL;\r
(*pointsX) = NULL;\r
(*pointsY) = NULL;\r
float **scoreFi,\r
int **pointsX, int **pointsY)\r
{\r
- int n1, m1, n2, m2, p, size, diff1, diff2;\r
+ int n1, m1, n2, m2, /*p,*/ size, diff1, diff2;\r
float *f;\r
int i1, j1;\r
int res;\r
m1 = featMapImage->dimX;\r
n2 = Fi->sizeY;\r
m2 = Fi->sizeX;\r
- p = featMapImage->numFeatures;\r
+ //p = featMapImage->numFeatures;\r
(*scoreFi) = NULL;\r
(*pointsX) = NULL;\r
(*pointsY) = NULL;\r
return LATENT_SVM_OK;\r
}\r
\r
-CvLSVMFeatureMap* featureMapBorderPartFilter(CvLSVMFeatureMap *map,\r
+static CvLSVMFeatureMap* featureMapBorderPartFilter(CvLSVMFeatureMap *map,\r
int maxXBorder, int maxYBorder)\r
{\r
int bx, by;\r
float *score, CvPoint **points,\r
int *kPoints, CvPoint ***partsDisplacement)\r
{\r
- int i, j, k, dimX, dimY, nF0, mF0, p;\r
+ int i, j, k, dimX, dimY, nF0, mF0/*, p*/;\r
int diff1, diff2, index, last, partsLevel;\r
CvLSVMFilterDisposition **disposition;\r
float *f;\r
dimY = H->pyramid[level]->sizeY;\r
\r
// Number of features\r
- p = H->pyramid[level]->numFeatures;\r
+ //p = H->pyramid[level]->numFeatures;\r
\r
// Getting dimension of root filter\r
nF0 = all_F[0]->sizeY;\r
float **score, CvPoint **points, int *kPoints,\r
CvPoint ***partsDisplacement)\r
{\r
- int i, j, k, dimX, dimY, nF0, mF0, p;\r
+ int i, j, k, dimX, dimY, nF0, mF0/*, p*/;\r
int diff1, diff2, index, last, partsLevel;\r
CvLSVMFilterDisposition **disposition;\r
float *f;\r
dimY = H->pyramid[level]->sizeY;\r
\r
// Number of features\r
- p = H->pyramid[level]->numFeatures;\r
+ //p = H->pyramid[level]->numFeatures;\r
\r
// Getting dimension of root filter\r
nF0 = all_F[0]->sizeY;\r
return LATENT_SVM_OK;\r
}\r
\r
+#ifdef HAVE_TBB\r
/*\r
// Creating schedule of pyramid levels processing\r
//\r
// RESULT\r
// Error status\r
*/\r
-int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,\r
+static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,\r
const int n, const int bx, const int by,\r
const int threadsNum, int *kLevels, int **processingLevels)\r
{\r
int rootFilterDim, sumPartFiltersDim, i, numLevels, dbx, dby, numDotProducts;\r
- int averNumDotProd, j, minValue, argMin, lambda, maxValue, k;\r
+ int j, minValue, argMin, lambda, maxValue, k;\r
int *dotProd, *weights, *disp;\r
if (H == NULL || all_F == NULL)\r
{\r
(H->pyramid[i]->sizeY + dby) * sumPartFiltersDim;\r
numDotProducts += dotProd[i];\r
}\r
- // Average number of dot products that would be performed at the best\r
- averNumDotProd = numDotProducts / threadsNum;\r
// Allocation memory for saving dot product number performed by each thread\r
weights = (int *)malloc(sizeof(int) * threadsNum);\r
// Allocation memory for saving dispertion\r
return LATENT_SVM_OK;\r
}\r
\r
-#ifdef HAVE_TBB\r
/*\r
// int tbbThresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,\r
const CvLSVMFeaturePyramid *H,\r
}\r
#endif\r
\r
-void sort(int n, const float* x, int* indices)\r
+static void sort(int n, const float* x, int* indices)\r
{\r
int i, j;\r
for (i = 0; i < n; i++)\r
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4710 4711 4514 4996 )
-#endif
-
-#ifdef HAVE_CVCONFIG_H
+#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
//#define TOTAL_NO_PAIR_E "totalNoPairE"
#define DETECTOR_NAMES "detector_names"
-#define DETECTORS "detectors"
+#define DETECTORS "detectors"
#define IMAGE_FILENAMES "image_filenames"
#define VALIDATION "validation"
-#define FILENAME "fn"
+#define FILENAME "fn"
-#define C_SCALE_CASCADE "scale_cascade"
+#define C_SCALE_CASCADE "scale_cascade"
class CV_DetectorTest : public cvtest::BaseTest
{
virtual int prepareData( FileStorage& fs );
virtual void run( int startFrom );
virtual string& getValidationFilename();
-
- virtual void readDetector( const FileNode& fn ) = 0;
- virtual void writeDetector( FileStorage& fs, int di ) = 0;
+
+ virtual void readDetector( const FileNode& fn ) = 0;
+ virtual void writeDetector( FileStorage& fs, int di ) = 0;
int runTestCase( int detectorIdx, vector<vector<Rect> >& objects );
virtual int detectMultiScale( int di, const Mat& img, vector<Rect>& objects ) = 0;
int validate( int detectorIdx, vector<vector<Rect> >& objects );
FileNodeIterator it = fn[DETECTOR_NAMES].begin();
for( ; it != fn[DETECTOR_NAMES].end(); )
{
- string name;
- it >> name;
- detectorNames.push_back(name);
- readDetector(fn[DETECTORS][name]);
+ string _name;
+ it >> _name;
+ detectorNames.push_back(_name);
+ readDetector(fn[DETECTORS][_name]);
}
}
test_case_count = (int)detectorNames.size();
}
validationFS << "]"; // DETECTOR_NAMES
- // write detectors
- validationFS << DETECTORS << "{";
- assert( detectorNames.size() == detectorFilenames.size() );
- nit = detectorNames.begin();
- for( int di = 0; di < detectorNames.size(), nit != detectorNames.end(); ++nit, di++ )
- {
- validationFS << *nit << "{";
- writeDetector( validationFS, di );
- validationFS << "}";
- }
- validationFS << "}";
-
+ // write detectors
+ validationFS << DETECTORS << "{";
+ assert( detectorNames.size() == detectorFilenames.size() );
+ nit = detectorNames.begin();
+ for( int di = 0; di < detectorNames.size(), nit != detectorNames.end(); ++nit, di++ )
+ {
+ validationFS << *nit << "{";
+ writeDetector( validationFS, di );
+ validationFS << "}";
+ }
+ validationFS << "}";
+
// write image filenames
validationFS << IMAGE_FILENAMES << "[";
vector<string>::const_iterator it = imageFilenames.begin();
return cvtest::TS::FAIL_INVALID_TEST_DATA;
}
int code = detectMultiScale( detectorIdx, image, imgObjects );
- if( code != cvtest::TS::OK )
- return code;
+ if( code != cvtest::TS::OK )
+ return code;
objects.push_back( imgObjects );
vector<Rect> valRects;
if( node.node->data.seq != 0 )
{
- for( FileNodeIterator it = node.begin(); it != node.end(); )
+ for( FileNodeIterator it2 = node.begin(); it2 != node.end(); )
{
Rect r;
- it >> r.x >> r.y >> r.width >> r.height;
+ it2 >> r.x >> r.y >> r.width >> r.height;
valRects.push_back(r);
}
}
totalValRectCount += (int)valRects.size();
-
+
// compare rectangles
- vector<uchar> map(valRects.size(), 0);
+ vector<uchar> map(valRects.size(), 0);
for( vector<Rect>::const_iterator cr = it->begin();
cr != it->end(); ++cr )
{
{
Rect vr = valRects[minIdx];
if( map[minIdx] != 0 || (minDist > dist) || (abs(cr->width - vr.width) > wDiff) ||
- (abs(cr->height - vr.height) > hDiff) )
+ (abs(cr->height - vr.height) > hDiff) )
noPair++;
- else
- map[minIdx] = 1;
+ else
+ map[minIdx] = 1;
}
}
noPair += (int)count_if( map.begin(), map.end(), isZero );
public:
CV_CascadeDetectorTest();
protected:
- virtual void readDetector( const FileNode& fn );
- virtual void writeDetector( FileStorage& fs, int di );
+ virtual void readDetector( const FileNode& fn );
+ virtual void writeDetector( FileStorage& fs, int di );
virtual int detectMultiScale( int di, const Mat& img, vector<Rect>& objects );
- vector<int> flags;
+ vector<int> flags;
};
CV_CascadeDetectorTest::CV_CascadeDetectorTest()
void CV_CascadeDetectorTest::readDetector( const FileNode& fn )
{
- string filename;
- int flag;
- fn[FILENAME] >> filename;
- detectorFilenames.push_back(filename);
- fn[C_SCALE_CASCADE] >> flag;
- if( flag )
- flags.push_back( 0 );
- else
- flags.push_back( CV_HAAR_SCALE_IMAGE );
+ string filename;
+ int flag;
+ fn[FILENAME] >> filename;
+ detectorFilenames.push_back(filename);
+ fn[C_SCALE_CASCADE] >> flag;
+ if( flag )
+ flags.push_back( 0 );
+ else
+ flags.push_back( CV_HAAR_SCALE_IMAGE );
}
void CV_CascadeDetectorTest::writeDetector( FileStorage& fs, int di )
{
- int sc = flags[di] & CV_HAAR_SCALE_IMAGE ? 0 : 1;
- fs << FILENAME << detectorFilenames[di];
- fs << C_SCALE_CASCADE << sc;
+ int sc = flags[di] & CV_HAAR_SCALE_IMAGE ? 0 : 1;
+ fs << FILENAME << detectorFilenames[di];
+ fs << C_SCALE_CASCADE << sc;
}
int CV_CascadeDetectorTest::detectMultiScale( int di, const Mat& img,
vector<Rect>& objects)
{
- string dataPath = ts->get_data_path(), filename;
- filename = dataPath + detectorFilenames[di];
+ string dataPath = ts->get_data_path(), filename;
+ filename = dataPath + detectorFilenames[di];
CascadeClassifier cascade( filename );
- if( cascade.empty() )
- {
- ts->printf( cvtest::TS::LOG, "cascade %s can not be opened");
- return cvtest::TS::FAIL_INVALID_TEST_DATA;
- }
+ if( cascade.empty() )
+ {
+ ts->printf( cvtest::TS::LOG, "cascade %s can not be opened");
+ return cvtest::TS::FAIL_INVALID_TEST_DATA;
+ }
Mat grayImg;
cvtColor( img, grayImg, CV_BGR2GRAY );
equalizeHist( grayImg, grayImg );
cascade.detectMultiScale( grayImg, objects, 1.1, 3, flags[di] );
- return cvtest::TS::OK;
+ return cvtest::TS::OK;
}
//----------------------------------------------- HOGDetectorTest -----------------------------------
public:
CV_HOGDetectorTest();
protected:
- virtual void readDetector( const FileNode& fn );
- virtual void writeDetector( FileStorage& fs, int di );
+ virtual void readDetector( const FileNode& fn );
+ virtual void writeDetector( FileStorage& fs, int di );
virtual int detectMultiScale( int di, const Mat& img, vector<Rect>& objects );
};
void CV_HOGDetectorTest::readDetector( const FileNode& fn )
{
- string filename;
- if( fn[FILENAME].node->data.seq != 0 )
- fn[FILENAME] >> filename;
- detectorFilenames.push_back( filename);
+ string filename;
+ if( fn[FILENAME].node->data.seq != 0 )
+ fn[FILENAME] >> filename;
+ detectorFilenames.push_back( filename);
}
void CV_HOGDetectorTest::writeDetector( FileStorage& fs, int di )
{
- fs << FILENAME << detectorFilenames[di];
+ fs << FILENAME << detectorFilenames[di];
}
int CV_HOGDetectorTest::detectMultiScale( int di, const Mat& img,
else
assert(0);
hog.detectMultiScale(img, objects);
- return cvtest::TS::OK;
+ return cvtest::TS::OK;
}
TEST(Objdetect_CascadeDetector, regression) { CV_CascadeDetectorTest test; test.safe_run(); }
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/photo/photo.hpp"
#include "opencv2/highgui/highgui.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
//HEAP::iterator Heap_Iterator;
//HEAP Heap;
-float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, const CvMat* t)
+static float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, const CvMat* t)
{
double sol, a11, a22, m12;
a11=CV_MAT_ELEM(*t,float,i1,j1);
a22=CV_MAT_ELEM(*t,float,i2,j2);
m12=MIN(a11,a22);
-
+
if( CV_MAT_ELEM(*f,uchar,i1,j1) != INSIDE )
if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE )
if( fabs(a11-a22) >= 1.0 )
sol = 1+a22;
else
sol = 1+m12;
-
+
return (float)sol;
}
cv::Ptr<CvMat> mask, band, f, t, out;
cv::Ptr<CvPriorityQueueFloat> Heap, Out;
cv::Ptr<IplConvKernel> el_cross, el_range;
-
+
CvMat input_hdr, mask_hdr, output_hdr;
CvMat* input_img, *inpaint_mask, *output_img;
- int range=cvRound(inpaintRange);
+ int range=cvRound(inpaintRange);
int erows, ecols;
input_img = cvGetMat( _input_img, &input_hdr );
inpaint_mask = cvGetMat( _inpaint_mask, &mask_hdr );
output_img = cvGetMat( _output_img, &output_hdr );
-
+
if( !CV_ARE_SIZES_EQ(input_img,output_img) || !CV_ARE_SIZES_EQ(input_img,inpaint_mask))
CV_Error( CV_StsUnmatchedSizes, "All the input and output images must have the same size" );
-
+
if( (CV_MAT_TYPE(input_img->type) != CV_8UC1 &&
CV_MAT_TYPE(input_img->type) != CV_8UC3) ||
!CV_ARE_TYPES_EQ(input_img,output_img) )
band = cvCreateMat(erows, ecols, CV_8UC1);
mask = cvCreateMat(erows, ecols, CV_8UC1);
el_cross = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_CROSS,NULL);
-
+
cvCopy( input_img, output_img );
cvSet(mask,cvScalar(KNOWN,0,0,0));
COPY_MASK_BORDER1_C1(inpaint_mask,mask,uchar);
cvSet(f,cvScalar(BAND,0,0,0),band);
cvSet(f,cvScalar(INSIDE,0,0,0),mask);
cvSet(t,cvScalar(0,0,0,0),band);
-
+
if( flags == CV_INPAINT_TELEA )
{
out = cvCreateMat(erows, ecols, CV_8UC1);
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
-#endif
-
#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
static int failmsg(const char *fmt, ...)
{
char str[1000];
-
+
va_list ap;
va_start(ap, fmt);
vsnprintf(str, sizeof(str), fmt, ap);
va_end(ap);
-
+
PyErr_SetString(PyExc_TypeError, str);
return 0;
}
{
public:
PyAllowThreads() : _state(PyEval_SaveThread()) {}
- ~PyAllowThreads()
+ ~PyAllowThreads()
{
PyEval_RestoreThread(_state);
}
{
public:
PyEnsureGIL() : _state(PyGILState_Ensure()) {}
- ~PyEnsureGIL()
+ ~PyEnsureGIL()
{
PyGILState_Release(_state);
}
public:
NumpyAllocator() {}
~NumpyAllocator() {}
-
+
void allocate(int dims, const int* sizes, int type, int*& refcount,
uchar*& datastart, uchar*& data, size_t* step)
{
step[i] = (size_t)_strides[i];
datastart = data = (uchar*)PyArray_DATA(o);
}
-
+
void deallocate(int* refcount, uchar* datastart, uchar* data)
{
PyEnsureGIL gil;
};
NumpyAllocator g_numpyAllocator;
-
+
enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 };
static int pyopencv_to(const PyObject* o, Mat& m, const char* name = "<unknown>", bool allowND=true)
m.allocator = &g_numpyAllocator;
return true;
}
-
+
if( !PyArray_Check(o) )
{
failmsg("%s is not a numpy array", name);
return false;
}
-
+
int typenum = PyArray_TYPE(o);
int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
- typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S :
+ typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S :
typenum == NPY_INT || typenum == NPY_LONG ? CV_32S :
typenum == NPY_FLOAT ? CV_32F :
typenum == NPY_DOUBLE ? CV_64F : -1;
-
+
if( type < 0 )
{
failmsg("%s data type = %d is not supported", name, typenum);
return false;
}
-
+
int ndims = PyArray_NDIM(o);
if(ndims >= CV_MAX_DIM)
{
failmsg("%s dimensionality (=%d) is too high", name, ndims);
return false;
}
-
+
int size[CV_MAX_DIM+1];
size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type);
const npy_intp* _sizes = PyArray_DIMS(o);
const npy_intp* _strides = PyArray_STRIDES(o);
bool transposed = false;
-
+
for(int i = 0; i < ndims; i++)
{
size[i] = (int)_sizes[i];
step[i] = (size_t)_strides[i];
}
-
+
if( ndims == 0 || step[ndims-1] > elemsize ) {
size[ndims] = 1;
step[ndims] = elemsize;
ndims++;
}
-
+
if( ndims >= 2 && step[0] < step[1] )
{
std::swap(size[0], size[1]);
std::swap(step[0], step[1]);
transposed = true;
}
-
+
if( ndims == 3 && size[2] <= CV_CN_MAX && step[1] == elemsize*size[2] )
{
ndims--;
type |= CV_MAKETYPE(0, size[2]);
}
-
+
if( ndims > 2 && !allowND )
{
failmsg("%s has more than 2 dimensions", name);
return false;
}
-
+
m = Mat(ndims, size, type, PyArray_DATA(o), step);
-
+
if( m.data )
{
m.refcount = refcountFromPyObject(o);
// (since Mat destructor will decrement the reference counter)
};
m.allocator = &g_numpyAllocator;
-
+
if( transposed )
{
Mat tmp;
}
return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0;
}
-
+
static inline PyObject* pyopencv_from(const CvSlice& r)
{
return Py_BuildValue("(ii)", r.start_index, r.end_index);
-}
-
+}
+
static inline bool pyopencv_to(PyObject* obj, Point& p, const char* name = "<unknown>")
{
if(!obj || obj == Py_None)
return false;
int i, j, n = (int)PySequence_Fast_GET_SIZE(seq);
value.resize(n);
-
+
int type = DataType<_Tp>::type;
int depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);
PyObject** items = PySequence_Fast_ITEMS(seq);
-
+
for( i = 0; i < n; i++ )
{
PyObject* item = items[i];
PyObject* seq_i = 0;
PyObject** items_i = &item;
_Cp* data = (_Cp*)&value[i];
-
+
if( channels == 2 && PyComplex_CheckExact(item) )
{
Py_complex c = PyComplex_AsCComplex(obj);
break;
continue;
}
-
+
seq_i = PySequence_Fast(item, name);
if( !seq_i || (int)PySequence_Fast_GET_SIZE(seq_i) != channels )
{
}
items_i = PySequence_Fast_ITEMS(seq_i);
}
-
+
for( j = 0; j < channels; j++ )
{
PyObject* item_ij = items_i[j];
Py_DECREF(seq);
return i == n;
}
-
+
static PyObject* from(const vector<_Tp>& value)
{
if(value.empty())
return false;
int i, n = (int)PySequence_Fast_GET_SIZE(seq);
value.resize(n);
-
+
PyObject** items = PySequence_Fast_ITEMS(seq);
-
+
for( i = 0; i < n; i++ )
{
PyObject* item = items[i];
int i, n = (int)value.size();
PyObject* seq = PyList_New(n);
for( i = 0; i < n; i++ )
- {
+ {
PyObject* item = pyopencv_from(value[i]);
if(!item)
break;
{
return pyopencv_to_generic_vec(obj, value, name);
}
-
+
static PyObject* from(const vector<vector<_Tp> >& value)
{
return pyopencv_from_generic_vec(value);
{
return pyopencv_to_generic_vec(obj, value, name);
}
-
+
static PyObject* from(const vector<Mat>& value)
{
return pyopencv_from_generic_vec(value);
{
return pyopencv_to_generic_vec(obj, value, name);
}
-
+
static PyObject* from(const vector<KeyPoint>& value)
{
return pyopencv_from_generic_vec(value);
{
return pyopencv_to_generic_vec(obj, value, name);
}
-
+
static PyObject* from(const vector<DMatch>& value)
{
return pyopencv_from_generic_vec(value);
{
return pyopencv_to_generic_vec(obj, value, name);
}
-
+
static PyObject* from(const vector<string>& value)
{
return pyopencv_from_generic_vec(value);
bool ok = false;
PyObject* keys = PyObject_CallMethod(o,(char*)"keys",0);
PyObject* values = PyObject_CallMethod(o,(char*)"values",0);
-
+
if( keys && values )
{
int i, n = (int)PyList_GET_SIZE(keys);
}
ok = i == n && !PyErr_Occurred();
}
-
+
Py_XDECREF(keys);
Py_XDECREF(values);
return ok;
{
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
-
+
PyObject *o = (PyObject*)param;
PyObject *args = Py_BuildValue("iiiiO", event, x, y, flags, PyTuple_GetItem(o, 1));
-
+
PyObject *r = PyObject_Call(PyTuple_GetItem(o, 0), args, NULL);
if (r == NULL)
PyErr_Print();
char* name;
PyObject *on_mouse;
PyObject *param = NULL;
-
+
if (!PyArg_ParseTupleAndKeywords(args, kw, "sO|O", (char**)keywords, &name, &on_mouse, ¶m))
return NULL;
if (!PyCallable_Check(on_mouse)) {
Py_RETURN_NONE;
}
-void OnChange(int pos, void *param)
+static void OnChange(int pos, void *param)
{
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
-
+
PyObject *o = (PyObject*)param;
PyObject *args = Py_BuildValue("(i)", pos);
PyObject *r = PyObject_Call(PyTuple_GetItem(o, 0), args, NULL);
char* window_name;
int *value = new int;
int count;
-
+
if (!PyArg_ParseTuple(args, "ssiiO", &trackbar_name, &window_name, value, &count, &on_change))
return NULL;
if (!PyCallable_Check(on_change)) {
#if defined WIN32 || defined _WIN32
__declspec(dllexport)
#endif
+void initcv2();
void initcv2()
{
#if PYTHON_USE_NUMPY
import_array();
#endif
-
+
#if PYTHON_USE_NUMPY
#include "pyopencv_generated_type_reg.h"
#endif
opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, NULL);
PyDict_SetItemString(d, "error", opencv_error);
-
+
PyObject* cv_m = init_cv();
- PyDict_SetItemString(d, "cv", cv_m);
+ PyDict_SetItemString(d, "cv", cv_m);
#define PUBLISH(I) PyDict_SetItemString(d, #I, PyInt_FromLong(I))
#define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I))
/************************************************************************/
-CvMat *PyCvMat_AsCvMat(PyObject *o)
+static CvMat *PyCvMat_AsCvMat(PyObject *o)
{
assert(0); // not yet implemented: reference counting for CvMat in Kalman is unclear...
return NULL;
}
#define cvReleaseIplConvKernel(x) cvReleaseStructuringElement(x)
+
+#if defined _MSC_VER && _MSC_VER >= 1200
+ #pragma warning( push )
+ #pragma warning( disable : 4244 )
+#endif
+
#include "generated3.i"
+#if defined _MSC_VER && _MSC_VER >= 1200
+ #pragma warning( pop )
+#endif
+
/* iplimage */
static void iplimage_dealloc(PyObject *self)
bps = CV_MAT_CN(m->type) * 8;
break;
default:
- return failmsg("Unrecognised depth %d", CV_MAT_DEPTH(m->type)), (PyObject*)0;
+ failmsg("Unrecognized depth %d", CV_MAT_DEPTH(m->type));
+ return (PyObject*)0;
}
int bpl = m->cols * bps; // bytes per line
sizeof(memtrack_t), /*basicsize*/
};
-Py_ssize_t memtrack_getreadbuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
+static Py_ssize_t memtrack_getreadbuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
{
*ptrptr = &((memtrack_t*)self)->ptr;
return ((memtrack_t*)self)->size;
}
-Py_ssize_t memtrack_getwritebuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
+static Py_ssize_t memtrack_getwritebuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
{
*ptrptr = ((memtrack_t*)self)->ptr;
return ((memtrack_t*)self)->size;
}
-Py_ssize_t memtrack_getsegcount(PyObject *self, Py_ssize_t *lenp)
+static Py_ssize_t memtrack_getsegcount(PyObject *self, Py_ssize_t *lenp)
{
return (Py_ssize_t)1;
}
#define CVPY_VALIDATE_DrawChessboardCorners() do { \
if ((patternSize.width * patternSize.height) != corners.count) \
- return (PyObject*)failmsg("Size is %dx%d, but corner list is length %d", patternSize.width, patternSize.height, corners.count); \
+ return (PyObject*)0; \
} while (0)
#define cvGetRotationMatrix2D cv2DRotationMatrix
#define cvKMeans2(samples, nclusters, labels, termcrit, attempts, flags, centers) \
cppKMeans(samples, nclusters, labels, termcrit, attempts, flags, centers)
+#if defined _MSC_VER && _MSC_VER >= 1200
+ #pragma warning( push )
+ #pragma warning( disable : 4244 )
+#endif
+
#include "generated0.i"
+#if defined _MSC_VER && _MSC_VER >= 1200
+ #pragma warning( pop )
+#endif
+
static PyMethodDef old_methods[] = {
#if PYTHON_USE_NUMPY
/************************************************************************/
/* Module init */
-PyObject* init_cv()
+static PyObject* init_cv()
{
PyObject *m, *d;
cvSetErrMode(CV_ErrModeParent);
enum { NO, FEATHER, MULTI_BAND };\r
static Ptr<Blender> createDefault(int type, bool try_gpu = false);\r
\r
- void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes); \r
+ void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);\r
virtual void prepare(Rect dst_roi);\r
virtual void feed(const Mat &img, const Mat &mask, Point tl);\r
virtual void blend(Mat &dst, Mat &dst_mask);\r
class CV_EXPORTS FeatherBlender : public Blender\r
{\r
public:\r
- FeatherBlender(float sharpness = 0.02f) { setSharpness(sharpness); }\r
+ FeatherBlender(float sharpness = 0.02f);\r
\r
float sharpness() const { return sharpness_; }\r
void setSharpness(float val) { sharpness_ = val; }\r
Mat dst_weight_map_;\r
};\r
\r
+inline FeatherBlender::FeatherBlender(float _sharpness) { setSharpness(_sharpness); }\r
+\r
\r
class CV_EXPORTS MultiBandBlender : public Blender\r
{\r
public:\r
virtual ~Estimator() {}\r
\r
- void operator ()(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, \r
+ void operator ()(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,\r
std::vector<CameraParams> &cameras)\r
{ estimate(features, pairwise_matches, cameras); }\r
\r
protected:\r
- virtual void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, \r
+ virtual void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,\r
std::vector<CameraParams> &cameras) = 0;\r
};\r
\r
HomographyBasedEstimator(bool is_focals_estimated = false)\r
: is_focals_estimated_(is_focals_estimated) {}\r
\r
-private: \r
- void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, \r
+private:\r
+ void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,\r
std::vector<CameraParams> &cameras);\r
\r
bool is_focals_estimated_;\r
{\r
public:\r
const Mat refinementMask() const { return refinement_mask_.clone(); }\r
- void setRefinementMask(const Mat &mask) \r
- { \r
+ void setRefinementMask(const Mat &mask)\r
+ {\r
CV_Assert(mask.type() == CV_8U && mask.size() == Size(3, 3));\r
- refinement_mask_ = mask.clone(); \r
+ refinement_mask_ = mask.clone();\r
}\r
\r
double confThresh() const { return conf_thresh_; }\r
void setTermCriteria(const CvTermCriteria& term_criteria) { term_criteria_ = term_criteria; }\r
\r
protected:\r
- BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement) \r
- : num_params_per_cam_(num_params_per_cam), \r
- num_errs_per_measurement_(num_errs_per_measurement) \r
- { \r
+ BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement)\r
+ : num_params_per_cam_(num_params_per_cam),\r
+ num_errs_per_measurement_(num_errs_per_measurement)\r
+ {\r
setRefinementMask(Mat::ones(3, 3, CV_8U));\r
- setConfThresh(1.); \r
+ setConfThresh(1.);\r
setTermCriteria(cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON));\r
}\r
\r
// Runs bundle adjustment\r
- virtual void estimate(const std::vector<ImageFeatures> &features, \r
+ virtual void estimate(const std::vector<ImageFeatures> &features,\r
const std::vector<MatchesInfo> &pairwise_matches,\r
std::vector<CameraParams> &cameras);\r
\r
\r
\r
// Minimizes reprojection error.\r
-// It can estimate focal length, aspect ratio, principal point. \r
+// It can estimate focal length, aspect ratio, principal point.\r
// You can affect only on them via the refinement mask.\r
class CV_EXPORTS BundleAdjusterReproj : public BundleAdjusterBase\r
{\r
};\r
\r
\r
-enum CV_EXPORTS WaveCorrectKind\r
+enum WaveCorrectKind\r
{\r
WAVE_CORRECT_HORIZ,\r
WAVE_CORRECT_VERT\r
std::string CV_EXPORTS matchesGraphAsString(std::vector<std::string> &pathes, std::vector<MatchesInfo> &pairwise_matches,\r
float conf_threshold);\r
\r
-std::vector<int> CV_EXPORTS leaveBiggestComponent(std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches, \r
+std::vector<int> CV_EXPORTS leaveBiggestComponent(std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,\r
float conf_threshold);\r
\r
-void CV_EXPORTS findMaxSpanningTree(int num_images, const std::vector<MatchesInfo> &pairwise_matches, \r
+void CV_EXPORTS findMaxSpanningTree(int num_images, const std::vector<MatchesInfo> &pairwise_matches,\r
Graph &span_tree, std::vector<int> ¢ers);\r
\r
} // namespace detail\r
\r
// TODO remove LOG macros, add logging class\r
#if ENABLE_LOG\r
-#if ANDROID\r
+#ifdef ANDROID\r
#include <iostream>\r
#include <sstream>\r
#include <android/log.h>\r
{ \\r
LOG_STITCHING_MSG(_msg); \\r
} \\r
- break; \\r
- } \r
+ break; \\r
+ }\r
\r
\r
#define LOG(msg) LOG_(1, msg)\r
\r
struct CV_EXPORTS GraphEdge\r
{\r
- GraphEdge(int from, int to, float weight) \r
- : from(from), to(to), weight(weight) {}\r
+ GraphEdge(int from, int to, float weight);\r
bool operator <(const GraphEdge& other) const { return weight < other.weight; }\r
bool operator >(const GraphEdge& other) const { return weight > other.weight; }\r
\r
float weight;\r
};\r
\r
+inline GraphEdge::GraphEdge(int _from, int _to, float _weight) : from(_from), to(_to), weight(_weight) {}\r
+\r
\r
class CV_EXPORTS Graph\r
{\r
void addEdge(int from, int to, float weight);\r
template <typename B> B forEach(B body) const;\r
template <typename B> B walkBreadthFirst(int from, B body) const;\r
- \r
+\r
private:\r
std::vector< std::list<GraphEdge> > edges_;\r
};\r
const cv::Mat& matchingMask() const { return matching_mask_; }
void setMatchingMask(const cv::Mat &mask)
- {
+ {
CV_Assert(mask.type() == CV_8U && mask.cols == mask.rows);
- matching_mask_ = mask.clone();
+ matching_mask_ = mask.clone();
}
Ptr<detail::BundleAdjusterBase> bundleAdjuster() { return bundle_adjuster_; }
Ptr<WarperCreator> warper() { return warper_; }
const Ptr<WarperCreator> warper() const { return warper_; }
- void setWarper(Ptr<WarperCreator> warper) { warper_ = warper; }
+ void setWarper(Ptr<WarperCreator> creator) { warper_ = creator; }
Ptr<detail::ExposureCompensator> exposureCompensator() { return exposure_comp_; }
const Ptr<detail::ExposureCompensator> exposureCompensator() const { return exposure_comp_; }
Ptr<detail::Blender> blender() { return blender_; }
const Ptr<detail::Blender> blender() const { return blender_; }
- void setBlender(Ptr<detail::Blender> blender) { blender_ = blender; }
+ void setBlender(Ptr<detail::Blender> b) { blender_ = b; }
private:
Stitcher() {}
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/stitching/stitcher.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
#include "perf_precomp.hpp"\r
\r
#include "opencv2/highgui/highgui.hpp"\r
+#include "opencv2/core/internal.hpp"\r
#include "opencv2/flann/flann.hpp"\r
#include "opencv2/opencv_modules.hpp"\r
\r
typedef TestBaseWithParam<String> stitch;\r
typedef TestBaseWithParam<String> match;\r
\r
-#if HAVE_OPENCV_NONFREE\r
+#ifdef HAVE_OPENCV_NONFREE\r
#define TEST_DETECTORS testing::Values("surf", "orb")\r
#else\r
#define TEST_DETECTORS testing::Values<String>("orb")\r
PERF_TEST_P(stitch, a123, TEST_DETECTORS)\r
{\r
Mat pano;\r
- \r
+\r
vector<Mat> imgs;\r
imgs.push_back( imread( getDataPath("stitching/a1.jpg") ) );\r
imgs.push_back( imread( getDataPath("stitching/a2.jpg") ) );\r
imgs.push_back( imread( getDataPath("stitching/a3.jpg") ) );\r
\r
- Stitcher::Status status;\r
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"\r
? (detail::FeaturesFinder*)new detail::OrbFeaturesFinder()\r
: (detail::FeaturesFinder*)new detail::SurfFeaturesFinder();\r
stitcher.setRegistrationResol(WORK_MEGAPIX);\r
\r
startTimer();\r
- status = stitcher.stitch(imgs, pano);\r
+ stitcher.stitch(imgs, pano);\r
stopTimer();\r
}\r
}\r
PERF_TEST_P(stitch, b12, TEST_DETECTORS)\r
{\r
Mat pano;\r
- \r
+\r
vector<Mat> imgs;\r
imgs.push_back( imread( getDataPath("stitching/b1.jpg") ) );\r
imgs.push_back( imread( getDataPath("stitching/b2.jpg") ) );\r
\r
- Stitcher::Status status;\r
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"\r
? (detail::FeaturesFinder*)new detail::OrbFeaturesFinder()\r
: (detail::FeaturesFinder*)new detail::SurfFeaturesFinder();\r
stitcher.setRegistrationResol(WORK_MEGAPIX);\r
\r
startTimer();\r
- status = stitcher.stitch(imgs, pano);\r
+ stitcher.stitch(imgs, pano);\r
stopTimer();\r
}\r
}\r
Mat_<int> N(num_images, num_images); N.setTo(0);\r
Mat_<double> I(num_images, num_images); I.setTo(0);\r
\r
- Rect dst_roi = resultRoi(corners, images);\r
+ //Rect dst_roi = resultRoi(corners, images);\r
Mat subimg1, subimg2;\r
Mat_<uchar> submask1, submask2, intersect;\r
\r
\r
block_corners.push_back(corners[img_idx] + bl_tl);\r
block_images.push_back(images[img_idx](Rect(bl_tl, bl_br)));\r
- block_masks.push_back(make_pair(masks[img_idx].first(Rect(bl_tl, bl_br)), \r
+ block_masks.push_back(make_pair(masks[img_idx].first(Rect(bl_tl, bl_br)),\r
masks[img_idx].second));\r
}\r
}\r
vector<double> gains = compensator.gains();\r
gain_maps_.resize(num_images);\r
\r
- Mat_<float> ker(1, 3); \r
+ Mat_<float> ker(1, 3);\r
ker(0,0) = 0.25; ker(0,1) = 0.5; ker(0,2) = 0.25;\r
\r
int bl_idx = 0;\r
for (int by = 0; by < bl_per_img.height; ++by)\r
for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx)\r
gain_maps_[img_idx](by, bx) = static_cast<float>(gains[bl_idx]);\r
- \r
+\r
sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker);\r
sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker);\r
}\r
: matcher(other.matcher), features(other.features),\r
pairwise_matches(other.pairwise_matches), near_pairs(other.near_pairs) {}\r
\r
- MatchPairsBody(FeaturesMatcher &matcher, const vector<ImageFeatures> &features,\r
- vector<MatchesInfo> &pairwise_matches, vector<pair<int,int> > &near_pairs)\r
- : matcher(matcher), features(features),\r
- pairwise_matches(pairwise_matches), near_pairs(near_pairs) {}\r
+ MatchPairsBody(FeaturesMatcher &_matcher, const vector<ImageFeatures> &_features,\r
+ vector<MatchesInfo> &_pairwise_matches, vector<pair<int,int> > &_near_pairs)\r
+ : matcher(_matcher), features(_features),\r
+ pairwise_matches(_pairwise_matches), near_pairs(_near_pairs) {}\r
\r
void operator ()(const BlockedRange &r) const\r
{\r
\r
struct IncDistance\r
{\r
- IncDistance(vector<int> &dists) : dists(&dists[0]) {}\r
+ IncDistance(vector<int> &vdists) : dists(&vdists[0]) {}\r
void operator ()(const GraphEdge &edge) { dists[edge.to] = dists[edge.from] + 1; }\r
int* dists;\r
};\r
\r
struct CalcRotation\r
{\r
- CalcRotation(int num_images, const vector<MatchesInfo> &pairwise_matches, vector<CameraParams> &cameras)\r
- : num_images(num_images), pairwise_matches(&pairwise_matches[0]), cameras(&cameras[0]) {}\r
+ CalcRotation(int _num_images, const vector<MatchesInfo> &_pairwise_matches, vector<CameraParams> &_cameras)\r
+ : num_images(_num_images), pairwise_matches(&_pairwise_matches[0]), cameras(&_cameras[0]) {}\r
\r
void operator ()(const GraphEdge &edge)\r
{\r
// Compute number of correspondences\r
total_num_matches_ = 0;\r
for (size_t i = 0; i < edges_.size(); ++i)\r
- total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ + \r
+ total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ +\r
edges_[i].second].num_inliers);\r
\r
- CvLevMarq solver(num_images_ * num_params_per_cam_, \r
+ CvLevMarq solver(num_images_ * num_params_per_cam_,\r
total_num_matches_ * num_errs_per_measurement_,\r
term_criteria_);\r
\r
\r
svd(cameras[i].R, SVD::FULL_UV);\r
Mat R = svd.u * svd.vt;\r
- if (determinant(R) < 0) \r
+ if (determinant(R) < 0)\r
R *= -1;\r
\r
Mat rvec;\r
calcDeriv(err1_, err2_, 2 * step, jac.col(i * 7));\r
cam_params_.at<double>(i * 7, 0) = val;\r
}\r
- if (refinement_mask_.at<uchar>(0, 2)) \r
+ if (refinement_mask_.at<uchar>(0, 2))\r
{\r
val = cam_params_.at<double>(i * 7 + 1, 0);\r
cam_params_.at<double>(i * 7 + 1, 0) = val - step;\r
calcDeriv(err1_, err2_, 2 * step, jac.col(i * 7 + 1));\r
cam_params_.at<double>(i * 7 + 1, 0) = val;\r
}\r
- if (refinement_mask_.at<uchar>(1, 2)) \r
+ if (refinement_mask_.at<uchar>(1, 2))\r
{\r
val = cam_params_.at<double>(i * 7 + 2, 0);\r
cam_params_.at<double>(i * 7 + 2, 0) = val - step;\r
\r
svd(cameras[i].R, SVD::FULL_UV);\r
Mat R = svd.u * svd.vt;\r
- if (determinant(R) < 0) \r
+ if (determinant(R) < 0)\r
R *= -1;\r
\r
Mat rvec;\r
double mult = sqrt(f1 * f2);\r
err.at<double>(3 * match_idx, 0) = mult * (x1 - x2);\r
err.at<double>(3 * match_idx + 1, 0) = mult * (y1 - y2);\r
- err.at<double>(3 * match_idx + 2, 0) = mult * (z1 - z2); \r
+ err.at<double>(3 * match_idx + 2, 0) = mult * (z1 - z2);\r
\r
match_idx++;\r
}\r
continue;\r
int comp1 = comps.findSetByElem(i);\r
int comp2 = comps.findSetByElem(j);\r
- if (comp1 != comp2) \r
+ if (comp1 != comp2)\r
comps.mergeSets(comp1, comp2);\r
}\r
}\r
vector<int> indices_removed;\r
for (int i = 0; i < num_images; ++i)\r
if (comps.findSetByElem(i) == max_comp)\r
- indices.push_back(i); \r
+ indices.push_back(i);\r
else\r
indices_removed.push_back(i);\r
\r
\r
LOG("Removed some images, because can't match them or there are too similar images: (");\r
LOG(indices_removed[0] + 1);\r
- for (size_t i = 1; i < indices_removed.size(); ++i) \r
+ for (size_t i = 1; i < indices_removed.size(); ++i)\r
LOG(", " << indices_removed[i]+1);\r
LOGLN(").");\r
LOGLN("Try to decrease --match_conf value and/or check if you're stitching duplicates.");\r
#ifdef HAVE_OPENCV_GPU
if (try_use_gpu && gpu::getCudaEnabledDeviceCount() > 0)
{
-#if HAVE_OPENCV_NONFREE
+#ifdef HAVE_OPENCV_NONFREE
stitcher.setFeaturesFinder(new detail::SurfFeaturesFinderGpu());
#else
stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder());
else
#endif
{
-#if HAVE_OPENCV_NONFREE
+#ifdef HAVE_OPENCV_NONFREE
stitcher.setFeaturesFinder(new detail::SurfFeaturesFinder());
#else
stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder());
}
// Warp images and their masks
- Ptr<detail::RotationWarper> warper = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
+ Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
for (size_t i = 0; i < imgs_.size(); ++i)
{
Mat_<float> K;
cameras_[i].K().convertTo(K, CV_32F);
- K(0,0) *= (float)seam_work_aspect_;
+ K(0,0) *= (float)seam_work_aspect_;
K(0,2) *= (float)seam_work_aspect_;
- K(1,1) *= (float)seam_work_aspect_;
+ K(1,1) *= (float)seam_work_aspect_;
K(1,2) *= (float)seam_work_aspect_;
- corners[i] = warper->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
+ corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
sizes[i] = images_warped[i].size();
- warper->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
+ w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
}
vector<Mat> images_warped_f(imgs_.size());
Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
- double compose_seam_aspect = 1;
+ //double compose_seam_aspect = 1;
double compose_work_aspect = 1;
bool is_blender_prepared = false;
is_compose_scale_set = true;
// Compute relative scales
- compose_seam_aspect = compose_scale / seam_scale_;
+ //compose_seam_aspect = compose_scale / seam_scale_;
compose_work_aspect = compose_scale / work_scale_;
// Update warped image scale
warped_image_scale_ *= static_cast<float>(compose_work_aspect);
- warper = warper_->create((float)warped_image_scale_);
+ w = warper_->create((float)warped_image_scale_);
// Update corners and sizes
for (size_t i = 0; i < imgs_.size(); ++i)
Mat K;
cameras_[i].K().convertTo(K, CV_32F);
- Rect roi = warper->warpRoi(sz, K, cameras_[i].R);
+ Rect roi = w->warpRoi(sz, K, cameras_[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
cameras_[img_idx].K().convertTo(K, CV_32F);
// Warp the current image
- warper->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
+ w->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
- warper->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
+ w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
// Compensate exposure
exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);
#include "test_precomp.hpp"\r
#include "opencv2/opencv_modules.hpp"\r
\r
-#if HAVE_OPENCV_NONFREE\r
+#ifdef HAVE_OPENCV_NONFREE\r
\r
using namespace cv;\r
using namespace std;\r
rois.push_back(Rect(img.cols / 2, img.rows / 2, img.cols - img.cols / 2, img.rows - img.rows / 2));\r
detail::ImageFeatures roi_features;\r
(*finder)(img, roi_features, rois);\r
- \r
+\r
int tl_rect_count = 0, br_rect_count = 0, bad_count = 0;\r
for (size_t i = 0; i < roi_features.keypoints.size(); ++i)\r
{\r
+#ifdef __GNUC__\r
+# pragma GCC diagnostic ignored "-Wmissing-declarations"\r
+#endif\r
+\r
#ifndef __OPENCV_TEST_PRECOMP_HPP__\r
#define __OPENCV_TEST_PRECOMP_HPP__\r
\r
endif()
set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE)
-
+
ocv_add_module(ts opencv_core)
ocv_glob_module_sources()
ocv_module_include_directories()
if(BUILD_SHARED_LIBS AND NOT MINGW)
add_definitions(-DGTEST_CREATE_SHARED_LIBRARY=1)
- if (MSVC AND NOT ENABLE_NOISY_WARNINGS)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4275")
- endif()
else()
add_definitions(-DGTEST_CREATE_SHARED_LIBRARY=0)
endif()
# include <android/api-level.h>
# define GTEST_HAS_CLONE (__ANDROID_API__ > 7 && __arm__)
# define GTEST_HAS_POSIX_RE (__ANDROID_API__ > 7)
-# define GTEST_HAS_STD_WSTRING _GLIBCXX_USE_WCHAR_T
+# if defined _GLIBCXX_USE_WCHAR_T && _GLIBCXX_USE_WCHAR_T
+# define GTEST_HAS_STD_WSTRING 1
+# else
+# define GTEST_HAS_STD_WSTRING 0
+#endif
#endif
#include <stdarg.h> // for va_list
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4275 4355 4127 )
+#ifdef _MSC_VER
+#pragma warning( disable: 4127 )
#endif
+#define GTEST_DONT_DEFINE_FAIL 0
+#define GTEST_DONT_DEFINE_SUCCEED 0
+#define GTEST_DONT_DEFINE_ASSERT_EQ 0
+#define GTEST_DONT_DEFINE_ASSERT_NE 0
+#define GTEST_DONT_DEFINE_ASSERT_LE 0
+#define GTEST_DONT_DEFINE_ASSERT_LT 0
+#define GTEST_DONT_DEFINE_ASSERT_GE 0
+#define GTEST_DONT_DEFINE_ASSERT_GT 0
+#define GTEST_DONT_DEFINE_TEST 0
+
#include "opencv2/ts/ts_gtest.h"
+
+#ifndef GTEST_USES_SIMPLE_RE
+# define GTEST_USES_SIMPLE_RE 0
+#endif
+#ifndef GTEST_USES_POSIX_RE
+# define GTEST_USES_POSIX_RE 0
+#endif
+
#include "opencv2/core/core.hpp"
namespace cvtest
using cv::Rect;
class CV_EXPORTS TS;
-
+
CV_EXPORTS int64 readSeed(const char* str);
-
+
CV_EXPORTS void randUni( RNG& rng, Mat& a, const Scalar& param1, const Scalar& param2 );
inline unsigned randInt( RNG& rng )
{
return (double)rng;
}
-
-
+
+
CV_EXPORTS const char* getTypeName( int type );
CV_EXPORTS int typeByName( const char* type_name );
CV_EXPORTS string vec2str(const string& sep, const int* v, size_t nelems);
-
+
inline int clipInt( int val, int min_val, int max_val )
{
if( val < min_val )
CV_EXPORTS double getMinVal(int depth);
CV_EXPORTS double getMaxVal(int depth);
-
+
CV_EXPORTS Size randomSize(RNG& rng, double maxSizeLog);
-CV_EXPORTS void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz);
+CV_EXPORTS void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz);
CV_EXPORTS int randomType(RNG& rng, int typeMask, int minChannels, int maxChannels);
CV_EXPORTS Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi);
CV_EXPORTS Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double maxVal, bool useRoi);
CV_EXPORTS void convert(const Mat& src, Mat& dst, int dtype, double alpha=1, double beta=0);
CV_EXPORTS void copy(const Mat& src, Mat& dst, const Mat& mask=Mat(), bool invertMask=false);
CV_EXPORTS void set(Mat& dst, const Scalar& gamma, const Mat& mask=Mat());
-
+
// working with multi-channel arrays
CV_EXPORTS void extract( const Mat& a, Mat& plane, int coi );
CV_EXPORTS void insert( const Mat& plane, Mat& a, int coi );
// checks that the array does not have NaNs and/or Infs and all the elements are
// within [min_val,max_val). idx is the index of the first "bad" element.
CV_EXPORTS int check( const Mat& data, double min_val, double max_val, vector<int>* idx );
-
+
// modifies values that are close to zero
CV_EXPORTS void patchZeros( Mat& mat, double level );
-
+
CV_EXPORTS void transpose(const Mat& src, Mat& dst);
CV_EXPORTS void erode(const Mat& src, Mat& dst, const Mat& _kernel, Point anchor=Point(-1,-1),
int borderType=IPL_BORDER_CONSTANT, const Scalar& borderValue=Scalar());
int borderType, const Scalar& borderValue=Scalar());
CV_EXPORTS Mat calcSobelKernel2D( int dx, int dy, int apertureSize, int origin=0 );
CV_EXPORTS Mat calcLaplaceKernel2D( int aperture_size );
-
+
CV_EXPORTS void initUndistortMap( const Mat& a, const Mat& k, Size sz, Mat& mapx, Mat& mapy );
-
+
CV_EXPORTS void minMaxLoc(const Mat& src, double* minval, double* maxval,
vector<int>* minloc, vector<int>* maxloc, const Mat& mask=Mat());
CV_EXPORTS double norm(const Mat& src, int normType, const Mat& mask=Mat());
CV_EXPORTS double norm(const Mat& src1, const Mat& src2, int normType, const Mat& mask=Mat());
CV_EXPORTS Scalar mean(const Mat& src, const Mat& mask=Mat());
-
+
CV_EXPORTS bool cmpUlps(const Mat& data, const Mat& refdata, int expMaxDiff, double* realMaxDiff, vector<int>* idx);
-
+
// compares two arrays. max_diff is the maximum actual difference,
// success_err_level is maximum allowed difference, idx is the index of the first
// element for which difference is >success_err_level
CV_EXPORTS int cmpEps( const Mat& data, const Mat& refdata, double* max_diff,
double success_err_level, vector<int>* idx,
bool element_wise_relative_error );
-
+
// a wrapper for the previous function. in case of error prints the message to log file.
CV_EXPORTS int cmpEps2( TS* ts, const Mat& data, const Mat& refdata, double success_err_level,
bool element_wise_relative_error, const char* desc );
-
+
CV_EXPORTS int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len,
double eps, const char* param_name );
-
+
CV_EXPORTS void logicOp(const Mat& src1, const Mat& src2, Mat& dst, char c);
CV_EXPORTS void logicOp(const Mat& src, const Scalar& s, Mat& dst, char c);
CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst);
-CV_EXPORTS void min(const Mat& src, double s, Mat& dst);
+CV_EXPORTS void min(const Mat& src, double s, Mat& dst);
CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst);
-CV_EXPORTS void max(const Mat& src, double s, Mat& dst);
-
+CV_EXPORTS void max(const Mat& src, double s, Mat& dst);
+
CV_EXPORTS void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop);
-CV_EXPORTS void compare(const Mat& src, double s, Mat& dst, int cmpop);
+CV_EXPORTS void compare(const Mat& src, double s, Mat& dst, int cmpop);
CV_EXPORTS void gemm(const Mat& src1, const Mat& src2, double alpha,
const Mat& src3, double beta, Mat& dst, int flags);
CV_EXPORTS void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& shift );
const Mat* m;
};
-CV_EXPORTS std::ostream& operator << (std::ostream& out, const MatInfo& m);
-
+CV_EXPORTS std::ostream& operator << (std::ostream& out, const MatInfo& m);
+
struct CV_EXPORTS MatComparator
{
public:
MatComparator(double maxdiff, int context);
-
+
::testing::AssertionResult operator()(const char* expr1, const char* expr2,
const Mat& m1, const Mat& m2);
-
+
double maxdiff;
double realmaxdiff;
vector<int> loc0;
struct TestInfo
{
TestInfo();
-
+
// pointer to the test
BaseTest* test;
// seed value right before the data for the failed test case is prepared.
uint64 rng_seed;
-
+
// seed value right before running the test
uint64 rng_seed0;
struct CV_EXPORTS TSParams
{
TSParams();
-
+
// RNG seed, passed to and updated by every test executed.
uint64 rng_seed;
-
+
// whether to use IPP, MKL etc. or not
bool use_optimized;
-
+
// extensivity of the tests, scale factor for test_case_count
double test_case_count_scale;
};
-
+
class CV_EXPORTS TS
{
public:
};
static TS* ptr();
-
+
// initialize test system before running the first test
virtual void init( const string& modulename );
-
+
// low-level printing functions that are used by individual tests and by the system itself
virtual void printf( int streams, const char* fmt, ... );
virtual void vprintf( int streams, const char* fmt, va_list arglist );
// sets information about a failed test
virtual void set_failed_test_info( int fail_code );
-
+
virtual void set_gtest_status();
// test error codes
// returns textual description of failure code
static string str_from_code( int code );
-
+
protected:
// these are allocated within a test to try keep them valid in case of stack corruption
// information about the current test
TestInfo current_test_info;
-
+
// the path to data files used by tests
string data_path;
-
+
TSParams params;
std::string output_buf[MAX_IDX];
};
virtual void run_func(void) = 0;
int test_case_idx;
int progress;
- double t, freq;
+ double t, freq;
template<class F>
int run_test_case( int expected_code, const string& _descr, F f)
t = new_t;
}
progress = update_progress(progress, test_case_idx, 0, dt);
-
+
int errcount = 0;
bool thrown = false;
const char* descr = _descr.c_str() ? _descr.c_str() : "";
errcount = 1;
}
test_case_idx++;
-
+
return errcount;
}
};
-
+
struct CV_EXPORTS DefaultRngAuto
{
const uint64 old_state;
-
+
DefaultRngAuto() : old_state(cv::theRNG().state) { cv::theRNG().state = (uint64)-1; }
~DefaultRngAuto() { cv::theRNG().state = old_state; }
-
+
DefaultRngAuto& operator=(const DefaultRngAuto&);
};
-
+
}
// fills c with zeros
#endif // __GNUC__
// Determines the platform on which Google Test is compiled.
+#define GTEST_OS_CYGWIN 0
+#define GTEST_OS_SYMBIAN 0
+#define GTEST_OS_WINDOWS 0
+#define GTEST_OS_WINDOWS_MOBILE 0
+#define GTEST_OS_WINDOWS_MINGW 0
+#define GTEST_OS_WINDOWS_DESKTOP 0
+#define GTEST_OS_MAC 0
+#define GTEST_OS_MAC_IOS 0
+#define GTEST_OS_LINUX 0
+#define GTEST_OS_LINUX_ANDROID 0
+#define GTEST_OS_ZOS 0
+#define GTEST_OS_SOLARIS 0
+#define GTEST_OS_AIX 0
+#define GTEST_OS_HPUX 0
+#define GTEST_OS_NACL 0
+
+
#ifdef __CYGWIN__
+# undef GTEST_OS_CYGWIN
# define GTEST_OS_CYGWIN 1
#elif defined __SYMBIAN32__
+# undef GTEST_OS_SYMBIAN
# define GTEST_OS_SYMBIAN 1
#elif defined _WIN32
+# undef GTEST_OS_WINDOWS
# define GTEST_OS_WINDOWS 1
# ifdef _WIN32_WCE
+# undef GTEST_OS_WINDOWS_MOBILE
# define GTEST_OS_WINDOWS_MOBILE 1
# elif defined(__MINGW__) || defined(__MINGW32__)
+# undef GTEST_OS_WINDOWS_MINGW
# define GTEST_OS_WINDOWS_MINGW 1
# else
+# undef GTEST_OS_WINDOWS_DESKTOP
# define GTEST_OS_WINDOWS_DESKTOP 1
# endif // _WIN32_WCE
#elif defined __APPLE__
+# undef GTEST_OS_MAC
# define GTEST_OS_MAC 1
# include <TargetConditionals.h>
# if TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+# undef GTEST_OS_MAC_IOS
# define GTEST_OS_MAC_IOS 1
# endif
#include <TargetConditionals.h>
#if TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
-#define GTEST_OS_MAC_IOS 1
+# undef GTEST_OS_MAC_IOS
+# define GTEST_OS_MAC_IOS 1
#endif
#elif defined __linux__
+# undef GTEST_OS_LINUX
# define GTEST_OS_LINUX 1
# ifdef ANDROID
+# undef GTEST_OS_LINUX_ANDROID
# define GTEST_OS_LINUX_ANDROID 1
# endif // ANDROID
#elif defined __MVS__
+# undef GTEST_OS_ZOS
# define GTEST_OS_ZOS 1
#elif defined(__sun) && defined(__SVR4)
+# undef GTEST_OS_SOLARIS
# define GTEST_OS_SOLARIS 1
#elif defined(_AIX)
+# undef GTEST_OS_AIX
# define GTEST_OS_AIX 1
#elif defined(__hpux)
+# undef GTEST_OS_HPUX
# define GTEST_OS_HPUX 1
#elif defined __native_client__
+# undef GTEST_OS_NACL
# define GTEST_OS_NACL 1
#endif // __CYGWIN__
// <stddef.h>.
# include <regex.h> // NOLINT
-# define GTEST_USES_POSIX_RE 1
+# define GTEST_USES_POSIX_RE 1
+# define GTEST_USES_SIMPLE_RE 0
#elif GTEST_OS_WINDOWS
// <regex.h> is not available on Windows. Use our own simple regex
// implementation instead.
# define GTEST_USES_SIMPLE_RE 1
+# define GTEST_USES_POSIX_RE 0
#else
// <regex.h> may not be available on this platform. Use our own
// simple regex implementation instead.
# define GTEST_USES_SIMPLE_RE 1
+# define GTEST_USES_POSIX_RE 0
#endif // GTEST_HAS_POSIX_RE
// Determines whether test results can be streamed to a socket.
#if GTEST_OS_LINUX
# define GTEST_CAN_STREAM_RESULTS_ 1
+#else
+# define GTEST_CAN_STREAM_RESULTS_ 0
#endif
// Defines some utility macros.
};
// This interface knows how to report a test part result.
-class TestPartResultReporterInterface {
+class GTEST_API_ TestPartResultReporterInterface {
public:
virtual ~TestPartResultReporterInterface() {}
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4127 4251)
-#endif
-
#include "opencv2/core/core_c.h"
#include "opencv2/ts/ts.hpp"
-#if GTEST_LINKED_AS_SHARED_LIBRARY
+#ifdef GTEST_LINKED_AS_SHARED_LIBRARY
#error ts module should not have GTEST_LINKED_AS_SHARED_LIBRARY defined
#endif
static jmp_buf tsJmpMark;
-void signalHandler( int sig_code )
+static void signalHandler( int sig_code )
{
int code = TS::FAIL_EXCEPTION;
switch( sig_code )
read_params( ts->get_file_storage() );
ts->update_context( 0, -1, true );
ts->update_context( this, -1, true );
-
+
if( !::testing::GTEST_FLAG(catch_exceptions) )
run( start_from );
else
{
const char* errorStr = cvErrorStr(exc.code);
char buf[1 << 16];
-
+
sprintf( buf, "OpenCV Error: %s (%s) in %s, file %s, line %d",
errorStr, exc.err.c_str(), exc.func.size() > 0 ?
exc.func.c_str() : "unknown function", exc.file.c_str(), exc.line );
ts->set_failed_test_info( TS::FAIL_EXCEPTION );
}
}
-
+
ts->set_gtest_status();
}
int errcount = 0;
bool thrown = false;
const char* descr = _descr.c_str() ? _descr.c_str() : "";
-
+
try
{
run_func();
test_case_idx = -1;
}
-
+
TS::TS()
{
} // ctor
void TS::init( const string& modulename )
{
char* datapath_dir = getenv("OPENCV_TEST_DATA_PATH");
-
+
if( datapath_dir )
{
char buf[1024];
sprintf( buf, "%s%s%s/", datapath_dir, haveSlash ? "" : "/", modulename.c_str() );
data_path = string(buf);
}
-
+
cv::redirectError((cv::ErrorCallback)tsErrorCallback, this);
if( ::testing::GTEST_FLAG(catch_exceptions) )
signal( tsSigId[i], SIG_DFL );
#endif
}
-
+
if( params.use_optimized == 0 )
cv::setUseOptimized(false);
-
+
rng = RNG(params.rng_seed);
}
int code = get_err_code();
if( code >= 0 )
return SUCCEED();
-
+
char seedstr[32];
sprintf(seedstr, "%08x%08x", (unsigned)(current_test_info.rng_seed>>32),
(unsigned)(current_test_info.rng_seed));
-
+
string logs = "";
if( !output_buf[SUMMARY_IDX].empty() )
logs += "\n-----------------------------------\n\tSUM: " + output_buf[SUMMARY_IDX];
if( !output_buf[CONSOLE_IDX].empty() )
logs += "\n-----------------------------------\n\tCONSOLE: " + output_buf[CONSOLE_IDX];
logs += "\n-----------------------------------\n";
-
+
FAIL() << "\n\tfailure reason: " << str_from_code(code) <<
"\n\ttest case #" << current_test_info.test_case_idx <<
"\n\tseed: " << seedstr << logs;
CvFileStorage* TS::get_file_storage() { return 0; }
-
+
void TS::update_context( BaseTest* test, int test_case_idx, bool update_ts_context )
{
if( current_test_info.test != test )
rng = RNG(params.rng_seed);
current_test_info.rng_seed0 = current_test_info.rng_seed = rng.state;
}
-
+
current_test_info.test = test;
current_test_info.test_case_idx = test_case_idx;
current_test_info.code = 0;
current_test_info.rng_seed = rng.state;
}
-
+
void TS::set_failed_test_info( int fail_code )
{
if( current_test_info.code >= 0 )
va_end( l );
}
}
-
+
TS ts;
TS* TS::ptr() { return &ts; }
}
return result;
}
-
-
+
+
Size randomSize(RNG& rng, double maxSizeLog)
{
double width_log = rng.uniform(0., maxSizeLog);
depth == CV_32F ? FLT_MAX : depth == CV_64F ? DBL_MAX : -1;
CV_Assert(val != -1);
return val;
-}
-
+}
+
Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi)
{
Size size0 = size;
size0.width += std::max(rng.uniform(0, 10) - 5, 0);
size0.height += std::max(rng.uniform(0, 10) - 5, 0);
}
-
+
Mat m(size0, type);
-
+
rng.fill(m, RNG::UNIFORM, Scalar::all(minVal), Scalar::all(maxVal));
if( size0 == size )
return m;
}
eqsize = eqsize && size[i] == size0[i];
}
-
+
Mat m(dims, &size0[0], type);
-
+
rng.fill(m, RNG::UNIFORM, Scalar::all(minVal), Scalar::all(maxVal));
if( eqsize )
return m;
return m(&r[0]);
}
-
+
void add(const Mat& _a, double alpha, const Mat& _b, double beta,
Scalar gamma, Mat& c, int ctype, bool calcAbs)
{
}
else
CV_Assert(a.size == b.size);
-
+
if( ctype < 0 )
ctype = a.depth();
ctype = CV_MAKETYPE(CV_MAT_DEPTH(ctype), a.channels());
c.create(a.dims, &a.size[0], ctype);
const Mat *arrays[] = {&a, &b, &c, 0};
Mat planes[3], buf[3];
-
+
NAryMatIterator it(arrays, planes);
size_t i, nplanes = it.nplanes;
- int cn=a.channels();
+ int cn=a.channels();
int total = (int)planes[0].total(), maxsize = std::min(12*12*std::max(12/cn, 1), total);
-
+
CV_Assert(planes[0].rows == 1);
buf[0].create(1, maxsize, CV_64FC(cn));
if(!b.empty())
buf[1].create(1, maxsize, CV_64FC(cn));
buf[2].create(1, maxsize, CV_64FC(cn));
scalarToRawData(gamma, buf[2].data, CV_64FC(cn), (int)(maxsize*cn));
-
+
for( i = 0; i < nplanes; i++, ++it)
{
for( int j = 0; j < total; j += maxsize )
Mat apart0 = planes[0].colRange(j, j2);
Mat cpart0 = planes[2].colRange(j, j2);
Mat apart = buf[0].colRange(0, j2 - j);
-
+
apart0.convertTo(apart, apart.type(), alpha);
size_t k, n = (j2 - j)*cn;
double* aptr = (double*)apart.data;
const double* gptr = (const double*)buf[2].data;
-
+
if( b.empty() )
{
for( k = 0; k < n; k++ )
Mat bpart = buf[1].colRange(0, (int)(j2 - j));
bpart0.convertTo(bpart, bpart.type(), beta);
const double* bptr = (const double*)bpart.data;
-
+
for( k = 0; k < n; k++ )
aptr[k] += bptr[k] + gptr[k];
}
CV_Assert(0);
}
}
-
+
void convert(const Mat& src, Mat& dst, int dtype, double alpha, double beta)
{
dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), src.channels());
copy( src, dst );
return;
}
-
+
const Mat *arrays[]={&src, &dst, 0};
Mat planes[2];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total()*planes[0].channels();
size_t i, nplanes = it.nplanes;
-
+
for( i = 0; i < nplanes; i++, ++it)
{
const uchar* sptr = planes[0].data;
uchar* dptr = planes[1].data;
-
+
switch( src.depth() )
{
case CV_8U:
}
}
}
-
-
+
+
void copy(const Mat& src, Mat& dst, const Mat& mask, bool invertMask)
{
dst.create(src.dims, &src.size[0], src.type());
-
+
if(mask.empty())
{
const Mat* arrays[] = {&src, &dst, 0};
NAryMatIterator it(arrays, planes);
size_t i, nplanes = it.nplanes;
size_t planeSize = planes[0].total()*src.elemSize();
-
+
for( i = 0; i < nplanes; i++, ++it )
memcpy(planes[1].data, planes[0].data, planeSize);
-
+
return;
}
-
+
CV_Assert( src.size == mask.size && mask.type() == CV_8U );
-
+
const Mat *arrays[]={&src, &dst, &mask, 0};
Mat planes[3];
-
+
NAryMatIterator it(arrays, planes);
size_t j, k, elemSize = src.elemSize(), total = planes[0].total();
size_t i, nplanes = it.nplanes;
-
+
for( i = 0; i < nplanes; i++, ++it)
{
const uchar* sptr = planes[0].data;
uchar* dptr = planes[1].data;
const uchar* mptr = planes[2].data;
-
+
for( j = 0; j < total; j++, sptr += elemSize, dptr += elemSize )
{
if( (mptr[j] != 0) ^ invertMask )
}
}
-
+
void set(Mat& dst, const Scalar& gamma, const Mat& mask)
{
double buf[12];
scalarToRawData(gamma, &buf, dst.type(), dst.channels());
const uchar* gptr = (const uchar*)&buf[0];
-
+
if(mask.empty())
{
const Mat* arrays[] = {&dst, 0};
NAryMatIterator it(arrays, &plane);
size_t i, nplanes = it.nplanes;
size_t j, k, elemSize = dst.elemSize(), planeSize = plane.total()*elemSize;
-
+
for( k = 1; k < elemSize; k++ )
if( gptr[k] != gptr[0] )
break;
bool uniform = k >= elemSize;
-
+
for( i = 0; i < nplanes; i++, ++it )
{
uchar* dptr = plane.data;
}
return;
}
-
+
CV_Assert( dst.size == mask.size && mask.type() == CV_8U );
-
+
const Mat *arrays[]={&dst, &mask, 0};
Mat planes[2];
-
+
NAryMatIterator it(arrays, planes);
size_t j, k, elemSize = dst.elemSize(), total = planes[0].total();
size_t i, nplanes = it.nplanes;
-
+
for( i = 0; i < nplanes; i++, ++it)
{
uchar* dptr = planes[0].data;
const uchar* mptr = planes[1].data;
-
+
for( j = 0; j < total; j++, dptr += elemSize )
{
if( mptr[j] )
{
CV_Assert( dst.size == src.size && src.depth() == dst.depth() &&
0 <= coi && coi < dst.channels() );
-
+
const Mat* arrays[] = {&src, &dst, 0};
Mat planes[2];
NAryMatIterator it(arrays, planes);
size_t i, nplanes = it.nplanes;
size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr = planes[0].data;
uchar* dptr = planes[1].data + coi*size0;
-
+
for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
{
for( k = 0; k < size0; k++ )
}
}
-
+
void extract(const Mat& src, Mat& dst, int coi)
{
dst.create( src.dims, &src.size[0], src.depth() );
CV_Assert( 0 <= coi && coi < src.channels() );
-
+
const Mat* arrays[] = {&src, &dst, 0};
Mat planes[2];
NAryMatIterator it(arrays, planes);
size_t i, nplanes = it.nplanes;
size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr = planes[0].data + coi*size1;
uchar* dptr = planes[1].data;
-
+
for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
{
for( k = 0; k < size1; k++ )
}
}
}
-
-
+
+
void transpose(const Mat& src, Mat& dst)
{
CV_Assert(src.dims == 2);
dst.create(src.cols, src.rows, src.type());
int i, j, k, esz = (int)src.elemSize();
-
+
for( i = 0; i < dst.rows; i++ )
{
const uchar* sptr = src.ptr(0) + i*esz;
uchar* dptr = dst.ptr(i);
-
+
for( j = 0; j < dst.cols; j++, sptr += src.step[0], dptr += esz )
{
for( k = 0; k < esz; k++ )
}
}
-
+
template<typename _Tp> static void
randUniInt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
{
}
}
-
+
template<typename _Tp> static void
randUniFlt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
{
}
}
-
+
void randUni( RNG& rng, Mat& a, const Scalar& param0, const Scalar& param1 )
{
Scalar scale = param0;
Scalar delta = param1;
double C = a.depth() < CV_32F ? 1./(65536.*65536.) : 1.;
-
+
for( int k = 0; k < 4; k++ )
{
double s = scale.val[k] - delta.val[k];
const Mat *arrays[]={&a, 0};
Mat plane;
-
+
NAryMatIterator it(arrays, &plane);
size_t i, nplanes = it.nplanes;
- int depth = a.depth(), cn = a.channels();
+ int depth = a.depth(), cn = a.channels();
size_t total = plane.total()*cn;
-
+
for( i = 0; i < nplanes; i++, ++it )
{
switch( depth )
}
}
}
-
-
+
+
template<typename _Tp> static void
erode_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
{
int width = dst.cols*src.channels(), n = (int)ofsvec.size();
const int* ofs = &ofsvec[0];
-
+
for( int y = 0; y < dst.rows; y++ )
{
const _Tp* sptr = src.ptr<_Tp>(y);
_Tp* dptr = dst.ptr<_Tp>(y);
-
+
for( int x = 0; x < width; x++ )
{
_Tp result = sptr[x + ofs[0]];
}
}
-
+
template<typename _Tp> static void
dilate_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
{
int width = dst.cols*src.channels(), n = (int)ofsvec.size();
const int* ofs = &ofsvec[0];
-
+
for( int y = 0; y < dst.rows; y++ )
{
const _Tp* sptr = src.ptr<_Tp>(y);
_Tp* dptr = dst.ptr<_Tp>(y);
-
+
for( int x = 0; x < width; x++ )
{
_Tp result = sptr[x + ofs[0]];
}
}
}
-
-
+
+
void erode(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
int borderType, const Scalar& _borderValue)
{
anchor.x, kernel.cols - anchor.x - 1,
borderType, borderValue);
dst.create( _src.size(), src.type() );
-
+
vector<int> ofs;
int step = (int)(src.step/src.elemSize1()), cn = src.channels();
for( int i = 0; i < kernel.rows; i++ )
ofs.push_back(i*step + j*cn);
if( ofs.empty() )
ofs.push_back(anchor.y*step + anchor.x*cn);
-
+
switch( src.depth() )
{
case CV_8U:
anchor.x, kernel.cols - anchor.x - 1,
borderType, borderValue);
dst.create( _src.size(), src.type() );
-
+
vector<int> ofs;
int step = (int)(src.step/src.elemSize1()), cn = src.channels();
for( int i = 0; i < kernel.rows; i++ )
ofs.push_back(i*step + j*cn);
if( ofs.empty() )
ofs.push_back(anchor.y*step + anchor.x*cn);
-
+
switch( src.depth() )
{
case CV_8U:
default:
CV_Assert(0);
}
-}
+}
+
-
template<typename _Tp> static void
filter2D_(const Mat& src, Mat& dst, const vector<int>& ofsvec, const vector<double>& coeffvec)
{
const int* ofs = &ofsvec[0];
const double* coeff = &coeffvec[0];
int width = dst.cols*dst.channels(), ncoeffs = (int)ofsvec.size();
-
+
for( int y = 0; y < dst.rows; y++ )
{
const _Tp* sptr = src.ptr<_Tp>(y);
double* dptr = dst.ptr<double>(y);
-
+
for( int x = 0; x < width; x++ )
{
double s = 0;
}
}
}
-
-
+
+
void filter2D(const Mat& _src, Mat& dst, int ddepth, const Mat& kernel,
Point anchor, double delta, int borderType, const Scalar& _borderValue)
{
anchor.x, kernel.cols - anchor.x - 1,
borderType, borderValue);
_dst.create( _src.size(), CV_MAKETYPE(CV_64F, src.channels()) );
-
+
vector<int> ofs;
vector<double> coeff(kernel.rows*kernel.cols);
Mat cmat(kernel.rows, kernel.cols, CV_64F, &coeff[0]);
convert(kernel, cmat, cmat.type());
-
+
int step = (int)(src.step/src.elemSize1()), cn = src.channels();
for( int i = 0; i < kernel.rows; i++ )
for( int j = 0; j < kernel.cols; j++ )
ofs.push_back(i*step + j*cn);
-
+
switch( src.depth() )
{
case CV_8U:
default:
CV_Assert(0);
}
-
+
convert(_dst, dst, ddepth, 1, delta);
}
else
CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
return p;
-}
-
+}
+
void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int right,
int borderType, const Scalar& borderValue)
dst.create(src.rows + top + bottom, src.cols + left + right, src.type());
int i, j, k, esz = (int)src.elemSize();
int width = src.cols*esz, width1 = dst.cols*esz;
-
+
if( borderType == IPL_BORDER_CONSTANT )
{
vector<uchar> valvec((src.cols + left + right)*esz);
uchar* val = &valvec[0];
scalarToRawData(borderValue, val, src.type(), (src.cols + left + right)*src.channels());
-
+
left *= esz;
right *= esz;
for( i = 0; i < src.rows; i++ )
for( j = 0; j < right; j++ )
dptr[j + width] = val[j];
}
-
+
for( i = 0; i < top; i++ )
{
uchar* dptr = dst.ptr(i);
for( j = 0; j < width1; j++ )
dptr[j] = val[j];
}
-
+
for( i = 0; i < bottom; i++ )
{
uchar* dptr = dst.ptr(i + top + src.rows);
for( k = 0; k < esz; k++ )
rtab[i*esz + k] = j + k;
}
-
+
left *= esz;
right *= esz;
for( i = 0; i < src.rows; i++ )
{
const uchar* sptr = src.ptr(i);
uchar* dptr = dst.ptr(i + top);
-
+
for( j = 0; j < left; j++ )
dptr[j] = sptr[ltab[j]];
if( dptr + left != sptr )
for( j = 0; j < right; j++ )
dptr[j + left + width] = sptr[rtab[j]];
}
-
+
for( i = 0; i < top; i++ )
{
j = borderInterpolate(i - top, src.rows, borderType);
const uchar* sptr = dst.ptr(j + top);
uchar* dptr = dst.ptr(i);
-
+
for( k = 0; k < width1; k++ )
dptr[k] = sptr[k];
}
-
+
for( i = 0; i < bottom; i++ )
{
j = borderInterpolate(i + src.rows, src.rows, borderType);
const uchar* sptr = dst.ptr(j + top);
uchar* dptr = dst.ptr(i + top + src.rows);
-
+
for( k = 0; k < width1; k++ )
dptr[k] = sptr[k];
}
}
}
-
+
template<typename _Tp> static void
minMaxLoc_(const _Tp* src, size_t total, size_t startidx,
{
_Tp maxval = saturate_cast<_Tp>(*_maxval), minval = saturate_cast<_Tp>(*_minval);
size_t minpos = *_minpos, maxpos = *_maxpos;
-
+
if( !mask )
{
for( size_t i = 0; i < total; i++ )
}
}
}
-
+
*_maxval = maxval;
*_minval = minval;
*_maxpos = maxpos;
pos[i] = -1;
}
}
-
+
void minMaxLoc(const Mat& src, double* _minval, double* _maxval,
vector<int>* _minloc, vector<int>* _maxloc,
const Mat& mask)
CV_Assert( src.channels() == 1 );
const Mat *arrays[]={&src, &mask, 0};
Mat planes[2];
-
+
NAryMatIterator it(arrays, planes);
size_t startidx = 1, total = planes[0].total();
size_t i, nplanes = it.nplanes;
- int depth = src.depth();
+ int depth = src.depth();
double maxval = depth < CV_32F ? INT_MIN : depth == CV_32F ? -FLT_MAX : -DBL_MAX;
double minval = depth < CV_32F ? INT_MAX : depth == CV_32F ? FLT_MAX : DBL_MAX;
size_t maxidx = 0, minidx = 0;
-
+
for( i = 0; i < nplanes; i++, ++it, startidx += total )
{
const uchar* sptr = planes[0].data;
const uchar* mptr = planes[1].data;
-
+
switch( depth )
{
case CV_8U:
CV_Assert(0);
}
}
-
+
if( minidx == 0 )
minval = maxval = 0;
-
+
if( _maxval )
*_maxval = maxval;
if( _minval )
setpos( src, *_minloc, minidx );
}
-
+
static int
normHamming(const uchar* src, size_t total, int cellSize)
{
int result = 0;
int mask = cellSize == 1 ? 1 : cellSize == 2 ? 3 : cellSize == 4 ? 15 : -1;
CV_Assert( mask >= 0 );
-
+
for( size_t i = 0; i < total; i++ )
{
unsigned a = src[i];
}
return result;
}
-
-
+
+
template<typename _Tp> static double
norm_(const _Tp* src, size_t total, int cn, int normType, double startval, const uchar* mask)
{
double result = startval;
if( !mask )
total *= cn;
-
+
if( normType == NORM_INF )
{
if( !mask )
double result = startval;
if( !mask )
total *= cn;
-
+
if( normType == NORM_INF )
{
if( !mask )
}
return result;
}
-
-
+
+
double norm(const Mat& src, int normType, const Mat& mask)
{
if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
bitwise_and(src, mask, temp);
return norm(temp, normType, Mat());
}
-
+
CV_Assert( src.depth() == CV_8U );
-
+
const Mat *arrays[]={&src, 0};
Mat planes[1];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total();
size_t i, nplanes = it.nplanes;
double result = 0;
int cellSize = normType == NORM_HAMMING ? 1 : 2;
-
+
for( i = 0; i < nplanes; i++, ++it )
result += normHamming(planes[0].data, total, cellSize);
return result;
}
int normType0 = normType;
normType = normType == NORM_L2SQR ? NORM_L2 : normType;
-
+
CV_Assert( mask.empty() || (src.size == mask.size && mask.type() == CV_8U) );
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
-
+
const Mat *arrays[]={&src, &mask, 0};
Mat planes[2];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total();
size_t i, nplanes = it.nplanes;
int depth = src.depth(), cn = planes[0].channels();
double result = 0;
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr = planes[0].data;
const uchar* mptr = planes[1].data;
-
+
switch( depth )
{
case CV_8U:
return result;
}
-
+
double norm(const Mat& src1, const Mat& src2, int normType, const Mat& mask)
{
if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
bitwise_xor(src1, src2, temp);
if( !mask.empty() )
bitwise_and(temp, mask, temp);
-
+
CV_Assert( temp.depth() == CV_8U );
-
+
const Mat *arrays[]={&temp, 0};
Mat planes[1];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total();
size_t i, nplanes = it.nplanes;
double result = 0;
int cellSize = normType == NORM_HAMMING ? 1 : 2;
-
+
for( i = 0; i < nplanes; i++, ++it )
result += normHamming(planes[0].data, total, cellSize);
return result;
}
int normType0 = normType;
normType = normType == NORM_L2SQR ? NORM_L2 : normType;
-
+
CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
CV_Assert( mask.empty() || (src1.size == mask.size && mask.type() == CV_8U) );
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
const Mat *arrays[]={&src1, &src2, &mask, 0};
Mat planes[3];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total();
size_t i, nplanes = it.nplanes;
- int depth = src1.depth(), cn = planes[0].channels();
+ int depth = src1.depth(), cn = planes[0].channels();
double result = 0;
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr1 = planes[0].data;
const uchar* sptr2 = planes[1].data;
const uchar* mptr = planes[2].data;
-
+
switch( depth )
{
case CV_8U:
return result;
}
-
+
template<typename _Tp> static double
crossCorr_(const _Tp* src1, const _Tp* src2, size_t total)
{
result += (double)src1[i]*src2[i];
return result;
}
-
+
double crossCorr(const Mat& src1, const Mat& src2)
{
CV_Assert( src1.size == src2.size && src1.type() == src2.type() );
const Mat *arrays[]={&src1, &src2, 0};
Mat planes[2];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total()*planes[0].channels();
size_t i, nplanes = it.nplanes;
- int depth = src1.depth();
+ int depth = src1.depth();
double result = 0;
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr1 = planes[0].data;
const uchar* sptr2 = planes[1].data;
-
+
switch( depth )
{
case CV_8U:
}
return result;
}
-
+
static void
logicOp_(const uchar* src1, const uchar* src2, uchar* dst, size_t total, char c)
else
for( i = 0; i < total; i++ )
dst[i] = ~src[i];
-}
-
-
+}
+
+
void logicOp( const Mat& src1, const Mat& src2, Mat& dst, char op )
{
CV_Assert( op == '&' || op == '|' || op == '^' );
dst.create( src1.dims, &src1.size[0], src1.type() );
const Mat *arrays[]={&src1, &src2, &dst, 0};
Mat planes[3];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total()*planes[0].elemSize();
size_t i, nplanes = it.nplanes;
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr1 = planes[0].data;
const uchar* sptr2 = planes[1].data;
uchar* dptr = planes[2].data;
-
+
logicOp_(sptr1, sptr2, dptr, total, op);
}
}
-
-
+
+
void logicOp(const Mat& src, const Scalar& s, Mat& dst, char op)
{
CV_Assert( op == '&' || op == '|' || op == '^' || op == '~' );
dst.create( src.dims, &src.size[0], src.type() );
const Mat *arrays[]={&src, &dst, 0};
Mat planes[2];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total()*planes[0].elemSize();
size_t i, nplanes = it.nplanes;
double buf[12];
scalarToRawData(s, buf, src.type(), (int)(96/planes[0].elemSize1()));
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr = planes[0].data;
uchar* dptr = planes[1].data;
-
+
logicOpS_(sptr, (uchar*)&buf[0], dptr, total, op);
}
}
default:
CV_Error(CV_StsBadArg, "Unknown comparison operation");
}
-}
-
-
+}
+
+
void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop)
{
CV_Assert( src1.type() == src2.type() && src1.channels() == 1 && src1.size == src2.size );
dst.create( src1.dims, &src1.size[0], CV_8U );
const Mat *arrays[]={&src1, &src2, &dst, 0};
Mat planes[3];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total();
size_t i, nplanes = it.nplanes;
- int depth = src1.depth();
-
+ int depth = src1.depth();
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr1 = planes[0].data;
const uchar* sptr2 = planes[1].data;
uchar* dptr = planes[2].data;
-
+
switch( depth )
{
case CV_8U:
}
}
}
-
+
void compare(const Mat& src, double value, Mat& dst, int cmpop)
{
CV_Assert( src.channels() == 1 );
dst.create( src.dims, &src.size[0], CV_8U );
const Mat *arrays[]={&src, &dst, 0};
Mat planes[2];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total();
size_t i, nplanes = it.nplanes;
- int depth = src.depth();
+ int depth = src.depth();
int ivalue = saturate_cast<int>(value);
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr = planes[0].data;
uchar* dptr = planes[1].data;
-
+
switch( depth )
{
case CV_8U:
}
}
-
+
template<typename _Tp> double
cmpUlpsInt_(const _Tp* src1, const _Tp* src2, size_t total, int imaxdiff,
size_t startidx, size_t& idx)
return realmaxdiff;
}
-
+
template<> double cmpUlpsInt_<int>(const int* src1, const int* src2,
size_t total, int imaxdiff,
size_t startidx, size_t& idx)
}
}
return realmaxdiff;
-}
-
+}
+
static double
cmpUlpsFlt_(const int* src1, const int* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
}
return realmaxdiff;
}
-
+
static double
cmpUlpsFlt_(const int64* src1, const int64* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
}
return realmaxdiff;
}
-
+
bool cmpUlps(const Mat& src1, const Mat& src2, int imaxDiff, double* _realmaxdiff, vector<int>* loc)
{
CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total()*planes[0].channels();
size_t i, nplanes = it.nplanes;
- int depth = src1.depth();
+ int depth = src1.depth();
size_t startidx = 1, idx = 0;
if(_realmaxdiff)
*_realmaxdiff = 0;
-
+
for( i = 0; i < nplanes; i++, ++it, startidx += total )
{
const uchar* sptr1 = planes[0].data;
default:
CV_Error(CV_StsUnsupportedFormat, "");
}
-
+
if(_realmaxdiff)
*_realmaxdiff = std::max(*_realmaxdiff, realmaxdiff);
}
return idx == 0;
}
-
+
template<typename _Tp> static void
checkInt_(const _Tp* a, size_t total, int imin, int imax, size_t startidx, size_t& idx)
{
}
}
-
+
template<typename _Tp> static void
checkFlt_(const _Tp* a, size_t total, double fmin, double fmax, size_t startidx, size_t& idx)
{
}
}
}
-
+
// checks that the array does not have NaNs and/or Infs and all the elements are
// within [min_val,max_val). idx is the index of the first "bad" element.
NAryMatIterator it(arrays, &plane);
size_t total = plane.total()*plane.channels();
size_t i, nplanes = it.nplanes;
- int depth = a.depth();
+ int depth = a.depth();
size_t startidx = 1, idx = 0;
int imin = 0, imax = 0;
-
+
if( depth <= CV_32S )
{
imin = cvCeil(fmin);
imax = cvFloor(fmax);
- }
-
+ }
+
for( i = 0; i < nplanes; i++, ++it, startidx += total )
{
const uchar* aptr = plane.data;
-
+
switch( depth )
{
case CV_8U:
default:
CV_Error(CV_StsUnsupportedFormat, "");
}
-
+
if( idx != 0 )
break;
}
-
+
if(idx != 0 && _idx)
setpos(a, *_idx, idx);
return idx == 0 ? 0 : -1;
bool element_wise_relative_error )
{
CV_Assert( arr.type() == refarr.type() && arr.size == refarr.size );
-
+
int ilevel = refarr.depth() <= CV_32S ? cvFloor(success_err_level) : 0;
int result = 0;
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total()*planes[0].channels(), j = total;
size_t i, nplanes = it.nplanes;
- int depth = arr.depth();
+ int depth = arr.depth();
size_t startidx = 1, idx = 0;
double realmaxdiff = 0, maxval = 0;
-
+
if(_realmaxdiff)
*_realmaxdiff = 0;
*_realmaxdiff = exp(1000.);
if(idx > 0 && _idx)
setpos(arr, *_idx, idx);
-
+
return result;
}
return cmpEps2( ts, _val, _refval, eps, true, param_name );
}
-
+
template<typename _Tp> static void
GEMM_(const _Tp* a_data0, int a_step, int a_delta,
const _Tp* b_data0, int b_step, int b_delta,
const _Tp* a_data = a_data0;
const _Tp* b_data = b_data0 + j*b_delta;
const _Tp* c_data = c_data0 + j*c_delta;
-
+
if( cn == 1 )
{
double s = 0;
else
{
double s_re = 0, s_im = 0;
-
+
for( int k = 0; k < a_cols; k++ )
{
s_re += ((double)a_data[0])*b_data[0] - ((double)a_data[1])*b_data[1];
a_data += a_delta;
b_data += b_step;
}
-
+
s_re *= alpha;
s_im *= alpha;
-
+
if( c_data )
{
s_re += c_data[0]*beta;
s_im += c_data[1]*beta;
}
-
+
d_data[j*2] = (_Tp)s_re;
d_data[j*2+1] = (_Tp)s_im;
}
}
}
}
-
-
+
+
void gemm( const Mat& _a, const Mat& _b, double alpha,
const Mat& _c, double beta, Mat& d, int flags )
{
Mat a = _a, b = _b, c = _c;
-
- if( a.data == d.data )
+
+ if( a.data == d.data )
a = a.clone();
-
- if( b.data == d.data )
+
+ if( b.data == d.data )
b = b.clone();
-
+
if( !c.empty() && c.data == d.data && (flags & cv::GEMM_3_T) )
c = c.clone();
-
+
int a_rows = a.rows, a_cols = a.cols, b_rows = b.rows, b_cols = b.cols;
int cn = a.channels();
int a_step = (int)a.step1(), a_delta = cn;
std::swap( a_rows, a_cols );
std::swap( a_step, a_delta );
}
-
+
if( flags & cv::GEMM_2_T )
{
std::swap( b_rows, b_cols );
std::swap( b_step, b_delta );
}
-
+
if( !c.empty() )
{
c_rows = c.rows;
c_cols = c.cols;
c_step = (int)c.step1();
c_delta = cn;
-
+
if( flags & cv::GEMM_3_T )
{
std::swap( c_rows, c_cols );
std::swap( c_step, c_delta );
}
-
+
CV_Assert( c.dims == 2 && c.type() == a.type() && c_rows == a_rows && c_cols == b_cols );
}
d.create(a_rows, b_cols, a.type());
-
+
if( a.depth() == CV_32F )
GEMM_(a.ptr<float>(), a_step, a_delta, b.ptr<float>(), b_step, b_delta,
!c.empty() ? c.ptr<float>() : 0, c_step, c_delta, d.ptr<float>(),
}
-double cvTsCrossCorr( const CvMat* a, const CvMat* b )
-{
- int i, j;
- int cn, ncols;
- double s = 0;
-
- cn = CV_MAT_CN(a->type);
- ncols = a->cols*cn;
-
- assert( CV_ARE_SIZES_EQ( a, b ) && CV_ARE_TYPES_EQ( a, b ) );
- for( i = 0; i < a->rows; i++ )
- {
- uchar* a_data = a->data.ptr + a->step*i;
- uchar* b_data = b->data.ptr + b->step*i;
-
- switch( CV_MAT_DEPTH(a->type) )
- {
- case CV_8U:
- for( j = 0; j < ncols; j++ )
- s += ((uchar*)a_data)[j]*((uchar*)b_data)[j];
- break;
- case CV_8S:
- for( j = 0; j < ncols; j++ )
- s += ((schar*)a_data)[j]*((schar*)b_data)[j];
- break;
- case CV_16U:
- for( j = 0; j < ncols; j++ )
- s += (double)((ushort*)a_data)[j]*((ushort*)b_data)[j];
- break;
- case CV_16S:
- for( j = 0; j < ncols; j++ )
- s += ((short*)a_data)[j]*((short*)b_data)[j];
- break;
- case CV_32S:
- for( j = 0; j < ncols; j++ )
- s += ((double)((int*)a_data)[j])*((int*)b_data)[j];
- break;
- case CV_32F:
- for( j = 0; j < ncols; j++ )
- s += ((double)((float*)a_data)[j])*((float*)b_data)[j];
- break;
- case CV_64F:
- for( j = 0; j < ncols; j++ )
- s += ((double*)a_data)[j]*((double*)b_data)[j];
- break;
- default:
- assert(0);
- return log(-1.);
- }
- }
-
- return s;
-}
-
-
template<typename _Tp> static void
transform_(const _Tp* sptr, _Tp* dptr, size_t total, int scn, int dcn, const double* mat)
{
}
}
}
-
-
+
+
void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& _shift )
{
double mat[20];
int mattype = transmat.depth();
Mat shift = _shift.reshape(1, 0);
bool haveShift = !shift.empty();
-
+
CV_Assert( scn <= 4 && dcn <= 4 &&
(mattype == CV_32F || mattype == CV_64F) &&
(!haveShift || (shift.type() == mattype && (shift.rows == 1 || shift.cols == 1))) );
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total();
size_t i, nplanes = it.nplanes;
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr = planes[0].data;
uchar* dptr = planes[1].data;
-
+
switch( depth )
{
case CV_8U:
}
}
}
-
+
template<typename _Tp> static void
minmax_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, char op)
{
CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
const Mat *arrays[]={&src1, &src2, &dst, 0};
Mat planes[3];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total()*planes[0].channels();
size_t i, nplanes = it.nplanes, depth = src1.depth();
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr1 = planes[0].data;
const uchar* sptr2 = planes[1].data;
uchar* dptr = planes[2].data;
-
+
switch( depth )
{
case CV_8U:
}
}
-
+
void min(const Mat& src1, const Mat& src2, Mat& dst)
{
minmax( src1, src2, dst, 'm' );
dst.create(src1.dims, src1.size, src1.type());
const Mat *arrays[]={&src1, &dst, 0};
Mat planes[2];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total()*planes[0].channels();
size_t i, nplanes = it.nplanes, depth = src1.depth();
int ival = saturate_cast<int>(val);
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr1 = planes[0].data;
uchar* dptr = planes[1].data;
-
+
switch( depth )
{
case CV_8U:
}
}
-
+
void min(const Mat& src1, double val, Mat& dst)
{
minmax( src1, val, dst, 'm' );
{
minmax( src1, val, dst, 'M' );
}
-
-
+
+
template<typename _Tp> static void
muldiv_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, double scale, char op)
{
CV_Assert( src1.empty() || (src1.type() == src2.type() && src1.size == src2.size) );
const Mat *arrays[]={&src1, &src2, &dst, 0};
Mat planes[3];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[1].total()*planes[1].channels();
size_t i, nplanes = it.nplanes, depth = src2.depth();
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr1 = planes[0].data;
const uchar* sptr2 = planes[1].data;
uchar* dptr = planes[2].data;
-
+
switch( depth )
{
case CV_8U:
}
}
}
-
+
void multiply(const Mat& src1, const Mat& src2, Mat& dst, double scale)
{
void divide(const Mat& src1, const Mat& src2, Mat& dst, double scale)
{
muldiv( src1, src2, dst, scale, '/' );
-}
-
-
+}
+
+
template<typename _Tp> static void
mean_(const _Tp* src, const uchar* mask, size_t total, int cn, Scalar& sum, int& nz)
{
}
}
}
-
+
Scalar mean(const Mat& src, const Mat& mask)
{
CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.size == src.size));
Scalar sum;
int nz = 0;
-
+
const Mat *arrays[]={&src, &mask, 0};
Mat planes[2];
-
+
NAryMatIterator it(arrays, planes);
size_t total = planes[0].total();
size_t i, nplanes = it.nplanes;
int depth = src.depth(), cn = src.channels();
-
+
for( i = 0; i < nplanes; i++, ++it )
{
const uchar* sptr = planes[0].data;
const uchar* mptr = planes[1].data;
-
+
switch( depth )
{
case CV_8U:
CV_Error(CV_StsUnsupportedFormat, "");
}
}
-
+
return sum * (1./std::max(nz, 1));
}
-
+
void patchZeros( Mat& mat, double level )
{
int j, ncols = mat.cols * mat.channels();
int depth = mat.depth();
CV_Assert( depth == CV_32F || depth == CV_64F );
-
+
for( int i = 0; i < mat.rows; i++ )
{
if( depth == CV_32F )
}
}
-
+
static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<int>& kernel )
{
int i, j, oldval, newval;
kernel.resize(size + 1);
-
+
if( _aperture_size < 0 )
{
static const int scharr[] = { 3, 10, 3, -1, 0, 1 };
kernel[i] = scharr[order*3 + i];
return;
}
-
+
for( i = 1; i <= size; i++ )
kernel[i] = 0;
kernel[0] = 1;
-
+
for( i = 0; i < size - order - 1; i++ )
{
oldval = kernel[0];
oldval = newval;
}
}
-
+
for( i = 0; i < order; i++ )
{
oldval = -kernel[0];
dx >= 0 && dy >= 0 && dx + dy <= 3 );
Size ksize = _aperture_size == -1 ? Size(3,3) : _aperture_size > 1 ?
Size(_aperture_size, _aperture_size) : dx > 0 ? Size(3, 1) : Size(1, 3);
-
+
Mat kernel(ksize, CV_32F);
vector<int> kx, ky;
-
+
calcSobelKernel1D( dx, _aperture_size, ksize.width, kx );
calcSobelKernel1D( dy, _aperture_size, ksize.height, ky );
-
+
for( int i = 0; i < kernel.rows; i++ )
{
float ay = (float)ky[i]*(origin && (dy & 1) ? -1 : 1);
for( int j = 0; j < kernel.cols; j++ )
kernel.at<float>(i, j) = kx[j]*ay;
}
-
+
return kernel;
}
-
+
Mat calcLaplaceKernel2D( int aperture_size )
{
int ksize = aperture_size == 1 ? 3 : aperture_size;
Mat kernel(ksize, ksize, CV_32F);
-
+
vector<int> kx, ky;
-
+
calcSobelKernel1D( 2, aperture_size, ksize, kx );
if( aperture_size > 1 )
calcSobelKernel1D( 0, aperture_size, ksize, ky );
ky.resize(3);
ky[0] = ky[2] = 0; ky[1] = 1;
}
-
+
for( int i = 0; i < ksize; i++ )
for( int j = 0; j < ksize; j++ )
kernel.at<float>(i, j) = (float)(kx[j]*ky[i] + kx[i]*ky[j]);
-
+
return kernel;
}
-
+
void initUndistortMap( const Mat& _a0, const Mat& _k0, Size sz, Mat& _mapx, Mat& _mapy )
{
_mapx.create(sz, CV_32F);
_mapy.create(sz, CV_32F);
-
+
double a[9], k[5]={0,0,0,0,0};
Mat _a(3, 3, CV_64F, a);
Mat _k(_k0.rows,_k0.cols, CV_MAKETYPE(CV_64F,_k0.channels()),k);
double fx, fy, cx, cy, ifx, ify, cxn, cyn;
-
+
_a0.convertTo(_a, CV_64F);
_k0.convertTo(_k, CV_64F);
fx = a[0]; fy = a[4]; cx = a[2]; cy = a[5];
ifx = 1./fx; ify = 1./fy;
cxn = cx;
cyn = cy;
-
+
for( int v = 0; v < sz.height; v++ )
{
for( int u = 0; u < sz.width; u++ )
double cdist = 1 + (k[0] + (k[1] + k[4]*r2)*r2)*r2;
double x1 = x*cdist + k[2]*2*x*y + k[3]*(r2 + 2*x2);
double y1 = y*cdist + k[3]*2*x*y + k[2]*(r2 + 2*y2);
-
+
_mapy.at<float>(v, u) = (float)(y1*fy + cy);
_mapx.at<float>(v, u) = (float)(x1*fx + cx);
}
}
}
-
-
+
+
std::ostream& operator << (std::ostream& out, const MatInfo& m)
{
if( !m.m || m.m->empty() )
}
}
return out;
-}
-
+}
+
MatComparator::MatComparator(double _maxdiff, int _context)
: maxdiff(_maxdiff), context(_context) {}
-
+
::testing::AssertionResult
MatComparator::operator()(const char* expr1, const char* expr2,
const Mat& m1, const Mat& m2)
<< "The reference and the actual output arrays have different type or size:\n"
<< expr1 << " ~ " << MatInfo(m1) << "\n"
<< expr2 << " ~ " << MatInfo(m2) << "\n";
-
+
//bool ok = cvtest::cmpUlps(m1, m2, maxdiff, &realmaxdiff, &loc0);
int code = cmpEps( m1, m2, &realmaxdiff, maxdiff, &loc0, true);
-
+
if(code >= 0)
return ::testing::AssertionSuccess();
-
+
Mat m[] = {m1.reshape(1,0), m2.reshape(1,0)};
int dims = m[0].dims;
vector<int> loc;
int border = dims <= 2 ? context : 0;
-
+
Mat m1part, m2part;
if( border == 0 )
{
m1part = getSubArray(m[0], border, loc0, loc);
m2part = getSubArray(m[1], border, loc0, loc);
}
-
+
return ::testing::AssertionFailure()
<< "too big relative difference (" << realmaxdiff << " > "
<< maxdiff << ") between "
// This line ensures that gtest.h can be compiled on its own, even
// when it's fused.
-#include "opencv2/ts/ts_gtest.h"
+#include "precomp.hpp"
+
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
// The following lines pull in the real gtest *.cc files.
// Copyright 2005, Google Inc.
// This predicate-formatter checks that 'results' contains a test part
// failure of the given type and that the failure message contains the
// given substring.
-AssertionResult HasOneFailure(const char* /* results_expr */,
+static AssertionResult HasOneFailure(const char* /* results_expr */,
const char* /* type_expr */,
const char* /* substr_expr */,
const TestPartResultArray& results,
// Returns the ANSI color code for the given color. COLOR_DEFAULT is
// an invalid input.
-const char* GetAnsiColorCode(GTestColor color) {
+static const char* GetAnsiColorCode(GTestColor color) {
switch (color) {
case COLOR_RED: return "1";
case COLOR_GREEN: return "2";
// cannot simply emit special characters and have the terminal change colors.
// This routine must actually emit the characters rather than return a string
// that would be colored when printed, as can be done on Linux.
-void ColoredPrintf(GTestColor color, const char* fmt, ...) {
+static void ColoredPrintf(GTestColor color, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
va_end(args);
}
-void PrintFullTestCommentIfPresent(const TestInfo& test_info) {
+static void PrintFullTestCommentIfPresent(const TestInfo& test_info) {
const char* const type_param = test_info.type_param();
const char* const value_param = test_info.value_param();
// default implementation. Use this implementation to keep good OO
// design with private destructor.
-#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+#if (defined(_MSC_VER) && _MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
static UnitTest* const instance = new UnitTest;
return instance;
#else
static UnitTest instance;
return &instance;
-#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+#endif // (defined(_MSC_VER) && _MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
}
// Gets the number of successful test cases.
// part can be omitted.
//
// Returns the value of the flag, or NULL if the parsing failed.
-const char* ParseFlagValue(const char* str,
+static const char* ParseFlagValue(const char* str,
const char* flag,
bool def_optional) {
// str and flag must not be NULL.
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
-bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+static bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, true);
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
-bool ParseStringFlag(const char* str, const char* flag, String* value) {
+static bool ParseStringFlag(const char* str, const char* flag, String* value) {
// Gets the value of the flag as a string.
const char* const value_str = ParseFlagValue(str, flag, false);
// message is propagated back to the parent process. Otherwise, the
// message is simply printed to stderr. In either case, the program
// then exits with status 1.
-void DeathTestAbort(const String& message) {
+static void DeathTestAbort(const String& message) {
// On a POSIX system, this function may be called from a threadsafe-style
// death test child process, which operates on a very small stack. Use
// the heap for any additional non-minuscule memory requirements.
return &dummy < ptr;
}
-bool StackGrowsDown() {
+static bool StackGrowsDown() {
int dummy;
return StackLowerThanAddress(&dummy);
}
static CapturedStream* g_captured_stdout = NULL;
// Starts capturing an output stream (stdout/stderr).
-void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
+static void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
if (*stream != NULL) {
GTEST_LOG_(FATAL) << "Only one " << stream_name
<< " capturer can exist at a time.";
}
// Stops capturing the output stream and returns the captured string.
-String GetCapturedStream(CapturedStream** captured_stream) {
+static String GetCapturedStream(CapturedStream** captured_stream) {
const String content = (*captured_stream)->GetCapturedString();
delete *captured_stream;
#if GTEST_OS_WINDOWS_MOBILE // Windows CE does not define _snprintf_s.
# define snprintf _snprintf
-#elif _MSC_VER >= 1400 // VC 8.0 and later deprecate snprintf and _snprintf.
+#elif defined(_MSC_VER) && _MSC_VER >= 1400 // VC 8.0 and later deprecate snprintf and _snprintf.
# define snprintf _snprintf_s
-#elif _MSC_VER
+#elif defined(_MSC_VER) && _MSC_VER
# define snprintf _snprintf
#endif // GTEST_OS_WINDOWS_MOBILE
#include "precomp.hpp"\r
\r
-#if ANDROID\r
+#ifdef ANDROID\r
# include <sys/time.h>\r
#endif\r
\r
"{ |perf_seed |809564 |seed for random numbers generator}"\r
"{ |perf_tbb_nthreads |-1 |if TBB is enabled, the number of TBB threads}"\r
"{ |perf_write_sanity |false |allow to create new records for sanity checks}"\r
- #if ANDROID\r
+ #ifdef ANDROID\r
"{ |perf_time_limit |6.0 |default time limit for a single test (in seconds)}"\r
"{ |perf_affinity_mask |0 |set affinity mask for the main thread}"\r
"{ |perf_log_power_checkpoints |false |additional xml logging for power measurement}"\r
static double param_time_limit;\r
static int param_tbb_nthreads;\r
static bool param_write_sanity;\r
-#if ANDROID\r
+#ifdef ANDROID\r
static int param_affinity_mask;\r
static bool log_power_checkpoints;\r
\r
\r
#endif\r
\r
-void randu(cv::Mat& m)\r
+static void randu(cv::Mat& m)\r
{\r
const int bigValue = 0x00000FFF;\r
if (m.depth() < CV_32F)\r
{\r
LOGE("Failed to open sanity data for reading: %s", storageInPath.c_str());\r
}\r
- \r
+\r
if(!storageIn.isOpened())\r
storageOutPath = storageInPath;\r
}\r
param_force_samples = args.get<unsigned int>("perf_force_samples");\r
param_write_sanity = args.get<bool>("perf_write_sanity");\r
param_tbb_nthreads = args.get<int>("perf_tbb_nthreads");\r
-#if ANDROID\r
+#ifdef ANDROID\r
param_affinity_mask = args.get<int>("perf_affinity_mask");\r
log_power_checkpoints = args.get<bool>("perf_log_power_checkpoints");\r
#endif\r
return (int64)compensation;\r
}\r
\r
+#ifdef _MSC_VER\r
+# pragma warning(push)\r
+# pragma warning(disable:4355) // 'this' : used in base member initializer list\r
+#endif\r
TestBase::TestBase(): declare(this)\r
{\r
}\r
+#ifdef _MSC_VER\r
+# pragma warning(pop)\r
+#endif\r
+\r
\r
void TestBase::declareArray(SizeVector& sizes, cv::InputOutputArray a, int wtype)\r
{\r
bool TestBase::next()\r
{\r
bool has_next = ++currentIter < nIters && totalTime < timeLimit;\r
-#if ANDROID\r
+#ifdef ANDROID\r
if (log_power_checkpoints)\r
{\r
timeval tim;\r
gettimeofday(&tim, NULL);\r
unsigned long long t1 = tim.tv_sec * 1000LLU + (unsigned long long)(tim.tv_usec / 1000.f);\r
- \r
+\r
if (currentIter == 1) RecordProperty("test_start", cv::format("%llu",t1).c_str());\r
if (!has_next) RecordProperty("test_complete", cv::format("%llu",t1).c_str());\r
}\r
-#endif \r
+#endif\r
return has_next;\r
}\r
\r
p_tbb_initializer=new tbb::task_scheduler_init(param_tbb_nthreads);\r
}\r
#endif\r
-#if ANDROID\r
+#ifdef ANDROID\r
if (param_affinity_mask)\r
setCurrentThreadAffinityMask(param_affinity_mask);\r
#endif\r
{
CV_EXPORTS bool initModule_video(void);
-
+
}
#endif
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_VIDEO_PRECOMP_HPP__
#define __OPENCV_VIDEO_PRECOMP_HPP__
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/ts/ts.hpp"
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
#endif
}
//set the number of modes
- modesUsed[x] = nmodes;
+ modesUsed[x] = uchar(nmodes);
mask[x] = background ? 0 :
detectShadows && detectShadowGMM(data, nchannels, nmodes, gmm, mean, Tb, TB, tau) ?
shadowVal : 255;
bgmodelUsedModes.data, nmixtures, (float)learningRate,
(float)varThreshold,
backgroundRatio, varThresholdGen,
- fVarInit, fVarMin, fVarMax, -learningRate*fCT, fTau,
+ fVarInit, fVarMin, fVarMax, float(-learningRate*fCT), fTau,
bShadowDetection, nShadowDetection));
}
}
#include <stdio.h>
#include "lkpyramid.hpp"
-namespace
+namespace
{
static void calcSharrDeriv(const cv::Mat& src, cv::Mat& dst)
{
#ifdef HAVE_TEGRA_OPTIMIZATION
if (tegra::calcSharrDeriv(src, dst))
return;
-#endif
-
+#endif
+
int x, y, delta = (int)alignSize((cols + 2)*cn, 16);
AutoBuffer<deriv_type> _tempBuf(delta*2 + 64);
deriv_type *trow0 = alignPtr(_tempBuf + cn, 16), *trow1 = alignPtr(trow0 + delta, 16);
-
+
#if CV_SSE2
__m128i z = _mm_setzero_si128(), c3 = _mm_set1_epi16(3), c10 = _mm_set1_epi16(10);
#endif
-
+
for( y = 0; y < rows; y++ )
{
const uchar* srow0 = src.ptr<uchar>(y > 0 ? y-1 : rows > 1 ? 1 : 0);
const uchar* srow1 = src.ptr<uchar>(y);
const uchar* srow2 = src.ptr<uchar>(y < rows-1 ? y+1 : rows > 1 ? rows-2 : 0);
deriv_type* drow = dst.ptr<deriv_type>(y);
-
+
// do vertical convolution
x = 0;
#if CV_SSE2
trow0[x] = (deriv_type)t0;
trow1[x] = (deriv_type)t1;
}
-
+
// make border
int x0 = (cols > 1 ? 1 : 0)*cn, x1 = (cols > 1 ? cols-2 : 0)*cn;
for( int k = 0; k < cn; k++ )
trow0[-cn + k] = trow0[x0 + k]; trow0[colsn + k] = trow0[x1 + k];
trow1[-cn + k] = trow1[x0 + k]; trow1[colsn + k] = trow1[x1 + k];
}
-
+
// do horizontal convolution, interleave the results and store them to dst
x = 0;
#if CV_SSE2
__m128i s2 = _mm_loadu_si128((const __m128i*)(trow1 + x - cn));
__m128i s3 = _mm_load_si128((const __m128i*)(trow1 + x));
__m128i s4 = _mm_loadu_si128((const __m128i*)(trow1 + x + cn));
-
+
__m128i t0 = _mm_sub_epi16(s1, s0);
__m128i t1 = _mm_add_epi16(_mm_mullo_epi16(_mm_add_epi16(s2, s4), c3), _mm_mullo_epi16(s3, c10));
__m128i t2 = _mm_unpacklo_epi16(t0, t1);
_mm_storeu_si128((__m128i*)(drow + x*2), t2);
_mm_storeu_si128((__m128i*)(drow + x*2 + 8), t0);
}
-#endif
+#endif
for( ; x < colsn; x++ )
{
deriv_type t0 = (deriv_type)(trow0[x+cn] - trow0[x-cn]);
}
}//namespace
-
+
cv::detail::LKTrackerInvoker::LKTrackerInvoker(
const Mat& _prevImg, const Mat& _prevDeriv, const Mat& _nextImg,
const Point2f* _prevPts, Point2f* _nextPts,
const Mat& I = *prevImg;
const Mat& J = *nextImg;
const Mat& derivI = *prevDeriv;
-
+
int j, cn = I.channels(), cn2 = cn*2;
cv::AutoBuffer<deriv_type> _buf(winSize.area()*(cn + cn2));
int derivDepth = DataType<deriv_type>::depth;
-
+
Mat IWinBuf(winSize, CV_MAKETYPE(derivDepth, cn), (deriv_type*)_buf);
Mat derivIWinBuf(winSize, CV_MAKETYPE(derivDepth, cn2), (deriv_type*)_buf + winSize.area()*cn);
-
+
for( int ptidx = range.begin(); ptidx < range.end(); ptidx++ )
{
Point2f prevPt = prevPts[ptidx]*(float)(1./(1 << level));
else
nextPt = nextPts[ptidx]*2.f;
nextPts[ptidx] = nextPt;
-
+
Point2i iprevPt, inextPt;
prevPt -= halfWin;
iprevPt.x = cvFloor(prevPt.x);
iprevPt.y = cvFloor(prevPt.y);
-
+
if( iprevPt.x < -winSize.width || iprevPt.x >= derivI.cols ||
iprevPt.y < -winSize.height || iprevPt.y >= derivI.rows )
{
}
continue;
}
-
+
float a = prevPt.x - iprevPt.x;
float b = prevPt.y - iprevPt.y;
const int W_BITS = 14, W_BITS1 = 14;
int iw01 = cvRound(a*(1.f - b)*(1 << W_BITS));
int iw10 = cvRound((1.f - a)*b*(1 << W_BITS));
int iw11 = (1 << W_BITS) - iw00 - iw01 - iw10;
-
+
int dstep = (int)(derivI.step/derivI.elemSize1());
int step = (int)(I.step/I.elemSize1());
CV_Assert( step == (int)(J.step/J.elemSize1()) );
float A11 = 0, A12 = 0, A22 = 0;
-
+
#if CV_SSE2
__m128i qw0 = _mm_set1_epi32(iw00 + (iw01 << 16));
__m128i qw1 = _mm_set1_epi32(iw10 + (iw11 << 16));
__m128i qdelta = _mm_set1_epi32(1 << (W_BITS1-5-1));
__m128 qA11 = _mm_setzero_ps(), qA12 = _mm_setzero_ps(), qA22 = _mm_setzero_ps();
#endif
-
+
// extract the patch from the first image, compute covariation matrix of derivatives
int x, y;
for( y = 0; y < winSize.height; y++ )
{
const uchar* src = (const uchar*)I.data + (y + iprevPt.y)*step + iprevPt.x*cn;
const deriv_type* dsrc = (const deriv_type*)derivI.data + (y + iprevPt.y)*dstep + iprevPt.x*cn2;
-
+
deriv_type* Iptr = (deriv_type*)(IWinBuf.data + y*IWinBuf.step);
deriv_type* dIptr = (deriv_type*)(derivIWinBuf.data + y*derivIWinBuf.step);
-
+
x = 0;
-
+
#if CV_SSE2
for( ; x <= winSize.width*cn - 4; x += 4, dsrc += 4*2, dIptr += 4*2 )
{
__m128i v00, v01, v10, v11, t0, t1;
-
+
v00 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src + x)), z);
v01 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src + x + cn)), z);
v10 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src + x + step)), z);
v11 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src + x + step + cn)), z);
-
+
t0 = _mm_add_epi32(_mm_madd_epi16(_mm_unpacklo_epi16(v00, v01), qw0),
_mm_madd_epi16(_mm_unpacklo_epi16(v10, v11), qw1));
t0 = _mm_srai_epi32(_mm_add_epi32(t0, qdelta), W_BITS1-5);
_mm_storel_epi64((__m128i*)(Iptr + x), _mm_packs_epi32(t0,t0));
-
+
v00 = _mm_loadu_si128((const __m128i*)(dsrc));
v01 = _mm_loadu_si128((const __m128i*)(dsrc + cn2));
v10 = _mm_loadu_si128((const __m128i*)(dsrc + dstep));
v11 = _mm_loadu_si128((const __m128i*)(dsrc + dstep + cn2));
-
+
t0 = _mm_add_epi32(_mm_madd_epi16(_mm_unpacklo_epi16(v00, v01), qw0),
_mm_madd_epi16(_mm_unpacklo_epi16(v10, v11), qw1));
t1 = _mm_add_epi32(_mm_madd_epi16(_mm_unpackhi_epi16(v00, v01), qw0),
t0 = _mm_srai_epi32(_mm_add_epi32(t0, qdelta_d), W_BITS1);
t1 = _mm_srai_epi32(_mm_add_epi32(t1, qdelta_d), W_BITS1);
v00 = _mm_packs_epi32(t0, t1); // Ix0 Iy0 Ix1 Iy1 ...
-
+
_mm_storeu_si128((__m128i*)dIptr, v00);
t0 = _mm_srai_epi32(v00, 16); // Iy0 Iy1 Iy2 Iy3
t1 = _mm_srai_epi32(_mm_slli_epi32(v00, 16), 16); // Ix0 Ix1 Ix2 Ix3
-
+
__m128 fy = _mm_cvtepi32_ps(t0);
__m128 fx = _mm_cvtepi32_ps(t1);
-
+
qA22 = _mm_add_ps(qA22, _mm_mul_ps(fy, fy));
qA12 = _mm_add_ps(qA12, _mm_mul_ps(fx, fy));
qA11 = _mm_add_ps(qA11, _mm_mul_ps(fx, fx));
}
#endif
-
+
for( ; x < winSize.width*cn; x++, dsrc += 2, dIptr += 2 )
{
int ival = CV_DESCALE(src[x]*iw00 + src[x+cn]*iw01 +
dsrc[dstep]*iw10 + dsrc[dstep+cn2]*iw11, W_BITS1);
int iyval = CV_DESCALE(dsrc[1]*iw00 + dsrc[cn2+1]*iw01 + dsrc[dstep+1]*iw10 +
dsrc[dstep+cn2+1]*iw11, W_BITS1);
-
+
Iptr[x] = (short)ival;
dIptr[0] = (short)ixval;
dIptr[1] = (short)iyval;
-
+
A11 += (float)(ixval*ixval);
A12 += (float)(ixval*iyval);
A22 += (float)(iyval*iyval);
}
}
-
+
#if CV_SSE2
float CV_DECL_ALIGNED(16) A11buf[4], A12buf[4], A22buf[4];
_mm_store_ps(A11buf, qA11);
A12 += A12buf[0] + A12buf[1] + A12buf[2] + A12buf[3];
A22 += A22buf[0] + A22buf[1] + A22buf[2] + A22buf[3];
#endif
-
+
A11 *= FLT_SCALE;
A12 *= FLT_SCALE;
A22 *= FLT_SCALE;
-
+
float D = A11*A22 - A12*A12;
float minEig = (A22 + A11 - std::sqrt((A11-A22)*(A11-A22) +
4.f*A12*A12))/(2*winSize.width*winSize.height);
-
+
if( err && (flags & CV_LKFLOW_GET_MIN_EIGENVALS) != 0 )
err[ptidx] = (float)minEig;
-
+
if( minEig < minEigThreshold || D < FLT_EPSILON )
{
if( level == 0 && status )
status[ptidx] = false;
continue;
}
-
+
D = 1.f/D;
-
+
nextPt -= halfWin;
Point2f prevDelta;
-
+
for( j = 0; j < criteria.maxCount; j++ )
{
inextPt.x = cvFloor(nextPt.x);
inextPt.y = cvFloor(nextPt.y);
-
+
if( inextPt.x < -winSize.width || inextPt.x >= J.cols ||
inextPt.y < -winSize.height || inextPt.y >= J.rows )
{
status[ptidx] = false;
break;
}
-
+
a = nextPt.x - inextPt.x;
b = nextPt.y - inextPt.y;
iw00 = cvRound((1.f - a)*(1.f - b)*(1 << W_BITS));
qw1 = _mm_set1_epi32(iw10 + (iw11 << 16));
__m128 qb0 = _mm_setzero_ps(), qb1 = _mm_setzero_ps();
#endif
-
+
for( y = 0; y < winSize.height; y++ )
{
const uchar* Jptr = (const uchar*)J.data + (y + inextPt.y)*step + inextPt.x*cn;
const deriv_type* Iptr = (const deriv_type*)(IWinBuf.data + y*IWinBuf.step);
const deriv_type* dIptr = (const deriv_type*)(derivIWinBuf.data + y*derivIWinBuf.step);
-
+
x = 0;
-
+
#if CV_SSE2
for( ; x <= winSize.width*cn - 8; x += 8, dIptr += 8*2 )
{
__m128i v01 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(Jptr + x + cn)), z);
__m128i v10 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(Jptr + x + step)), z);
__m128i v11 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(Jptr + x + step + cn)), z);
-
+
__m128i t0 = _mm_add_epi32(_mm_madd_epi16(_mm_unpacklo_epi16(v00, v01), qw0),
_mm_madd_epi16(_mm_unpacklo_epi16(v10, v11), qw1));
__m128i t1 = _mm_add_epi32(_mm_madd_epi16(_mm_unpackhi_epi16(v00, v01), qw0),
diff0 = _mm_subs_epi16(_mm_packs_epi32(t0, t1), diff0);
diff1 = _mm_unpackhi_epi16(diff0, diff0);
diff0 = _mm_unpacklo_epi16(diff0, diff0); // It0 It0 It1 It1 ...
- v00 = _mm_loadu_si128((const __m128i*)(dIptr)); // Ix0 Iy0 Ix1 Iy1 ...
+ v00 = _mm_loadu_si128((const __m128i*)(dIptr)); // Ix0 Iy0 Ix1 Iy1 ...
v01 = _mm_loadu_si128((const __m128i*)(dIptr + 8));
v10 = _mm_mullo_epi16(v00, diff0);
v11 = _mm_mulhi_epi16(v00, diff0);
qb1 = _mm_add_ps(qb1, _mm_cvtepi32_ps(v10));
}
#endif
-
+
for( ; x < winSize.width*cn; x++, dIptr += 2 )
{
int diff = CV_DESCALE(Jptr[x]*iw00 + Jptr[x+cn]*iw01 +
b2 += (float)(diff*dIptr[1]);
}
}
-
+
#if CV_SSE2
float CV_DECL_ALIGNED(16) bbuf[4];
_mm_store_ps(bbuf, _mm_add_ps(qb0, qb1));
b1 *= FLT_SCALE;
b2 *= FLT_SCALE;
-
+
Point2f delta( (float)((A12*b2 - A22*b1) * D),
(float)((A12*b1 - A11*b2) * D));
//delta = -delta;
-
+
nextPt += delta;
nextPts[ptidx] = nextPt + halfWin;
-
+
if( delta.ddot(delta) <= criteria.epsilon )
break;
-
+
if( j > 0 && std::abs(delta.x + prevDelta.x) < 0.01 &&
std::abs(delta.y + prevDelta.y) < 0.01 )
{
}
prevDelta = delta;
}
-
+
if( status[ptidx] && err && level == 0 && (flags & CV_LKFLOW_GET_MIN_EIGENVALS) == 0 )
{
- Point2f nextPt = nextPts[ptidx] - halfWin;
- Point inextPt;
-
- inextPt.x = cvFloor(nextPt.x);
- inextPt.y = cvFloor(nextPt.y);
-
- if( inextPt.x < -winSize.width || inextPt.x >= J.cols ||
- inextPt.y < -winSize.height || inextPt.y >= J.rows )
+ Point2f nextPoint = nextPts[ptidx] - halfWin;
+ Point inextPoint;
+
+ inextPoint.x = cvFloor(nextPoint.x);
+ inextPoint.y = cvFloor(nextPoint.y);
+
+ if( inextPoint.x < -winSize.width || inextPoint.x >= J.cols ||
+ inextPoint.y < -winSize.height || inextPoint.y >= J.rows )
{
if( status )
status[ptidx] = false;
continue;
}
-
- float a = nextPt.x - inextPt.x;
- float b = nextPt.y - inextPt.y;
- iw00 = cvRound((1.f - a)*(1.f - b)*(1 << W_BITS));
- iw01 = cvRound(a*(1.f - b)*(1 << W_BITS));
- iw10 = cvRound((1.f - a)*b*(1 << W_BITS));
+
+ float aa = nextPoint.x - inextPoint.x;
+ float bb = nextPoint.y - inextPoint.y;
+ iw00 = cvRound((1.f - aa)*(1.f - bb)*(1 << W_BITS));
+ iw01 = cvRound(aa*(1.f - bb)*(1 << W_BITS));
+ iw10 = cvRound((1.f - aa)*bb*(1 << W_BITS));
iw11 = (1 << W_BITS) - iw00 - iw01 - iw10;
float errval = 0.f;
-
+
for( y = 0; y < winSize.height; y++ )
{
- const uchar* Jptr = (const uchar*)J.data + (y + inextPt.y)*step + inextPt.x*cn;
+ const uchar* Jptr = (const uchar*)J.data + (y + inextPoint.y)*step + inextPoint.x*cn;
const deriv_type* Iptr = (const deriv_type*)(IWinBuf.data + y*IWinBuf.step);
-
+
for( x = 0; x < winSize.width*cn; x++ )
{
int diff = CV_DESCALE(Jptr[x]*iw00 + Jptr[x+cn]*iw01 +
}
}
}
-
+
int cv::buildOpticalFlowPyramid(InputArray _img, OutputArrayOfArrays pyramid, Size winSize, int maxLevel, bool withDerivatives,
int pyrBorder, int derivBorder, bool tryReuseInputImage)
{
if(!lvl0IsSet)
{
Mat& temp = pyramid.getMatRef(0);
-
+
if(!temp.empty())
temp.adjustROI(winSize.height, winSize.height, winSize.width, winSize.width);
if(temp.type() != img.type() || temp.cols != winSize.width*2 + img.cols || temp.rows != winSize.height * 2 + img.rows)
if (level != 0)
{
Mat& temp = pyramid.getMatRef(level * pyrstep);
-
+
if(!temp.empty())
temp.adjustROI(winSize.height, winSize.height, winSize.width, winSize.width);
if(temp.type() != img.type() || temp.cols != winSize.width*2 + sz.width || temp.rows != winSize.height * 2 + sz.height)
int level=0, i, npoints;
CV_Assert( (npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0 );
-
+
if( npoints == 0 )
{
_nextPts.release();
_err.release();
return;
}
-
+
if( !(flags & OPTFLOW_USE_INITIAL_FLOW) )
_nextPts.create(prevPtsMat.size(), prevPtsMat.type(), -1, true);
-
+
Mat nextPtsMat = _nextPts.getMat();
CV_Assert( nextPtsMat.checkVector(2, CV_32F, true) == npoints );
-
+
const Point2f* prevPts = (const Point2f*)prevPtsMat.data;
Point2f* nextPts = (Point2f*)nextPtsMat.data;
-
+
_status.create((int)npoints, 1, CV_8U, -1, true);
Mat statusMat = _status.getMat(), errMat;
CV_Assert( statusMat.isContinuous() );
uchar* status = statusMat.data;
float* err = 0;
-
+
for( i = 0; i < npoints; i++ )
status[i] = true;
-
+
if( _err.needed() )
{
_err.create((int)npoints, 1, CV_32F, -1, true);
}
else
derivI = prevPyr[level * lvlStep1 + 1];
-
+
CV_Assert(prevPyr[level * lvlStep1].size() == nextPyr[level * lvlStep2].size());
CV_Assert(prevPyr[level * lvlStep1].type() == nextPyr[level * lvlStep2].type());
#ifdef HAVE_TEGRA_OPTIMIZATION
typedef tegra::LKTrackerInvoker<cv::detail::LKTrackerInvoker> LKTrackerInvoker;
#else
- typedef cv::detail::LKTrackerInvoker LKTrackerInvoker;
+ typedef cv::detail::LKTrackerInvoker LKTrackerInvoker;
#endif
parallel_for(BlockedRange(0, npoints), LKTrackerInvoker(prevPyr[level * lvlStep1], derivI,
{
CvRect rect;
const char* src = (const char*)srcptr;
-
+
if( ip.x >= 0 )
{
src += ip.x*pix_size;
if( rect.x > win_size.width )
rect.x = win_size.width;
}
-
+
if( ip.x + win_size.width < src_size.width )
rect.width = win_size.width;
else
}
assert( rect.width <= win_size.width );
}
-
+
if( ip.y >= 0 )
{
src += ip.y * src_step;
}
else
rect.y = -ip.y;
-
+
if( ip.y + win_size.height < src_size.height )
rect.height = win_size.height;
else
rect.height = 0;
}
}
-
+
*pRect = rect;
return src - rect.x*pix_size;
}
float a, b;
double s = 0;
int i, j;
-
+
center.x -= (win_size.width-1)*0.5f;
center.y -= (win_size.height-1)*0.5f;
-
+
ip.x = cvFloor( center.x );
ip.y = cvFloor( center.y );
-
+
if( win_size.width <= 0 || win_size.height <= 0 )
return CV_BADRANGE_ERR;
-
+
a = center.x - ip.x;
b = center.y - ip.y;
a = MAX(a,0.0001f);
b1 = 1.f - b;
b2 = b;
s = (1. - a)/a;
-
+
src_step /= sizeof(src[0]);
dst_step /= sizeof(dst[0]);
-
+
if( 0 <= ip.x && ip.x + win_size.width < src_size.width &&
0 <= ip.y && ip.y + win_size.height < src_size.height )
{
// extracted rectangle is totally inside the image
src += ip.y * src_step + ip.x;
-
+
#if 0
if( icvCopySubpix_8u32f_C1R_p &&
icvCopySubpix_8u32f_C1R_p( src, src_step, dst,
dst_step*sizeof(dst[0]), win_size, a, b ) >= 0 )
return CV_OK;
#endif
-
+
for( ; win_size.height--; src += src_step, dst += dst_step )
{
float prev = (1 - a)*(b1*CV_8TO32F(src[0]) + b2*CV_8TO32F(src[src_step]));
else
{
CvRect r;
-
+
src = (const uchar*)icvAdjustRect( src, src_step*sizeof(*src),
sizeof(*src), src_size, win_size,ip, &r);
-
+
for( i = 0; i < win_size.height; i++, dst += dst_step )
{
const uchar *src2 = src + src_step;
-
+
if( i < r.y || i >= r.height )
src2 -= src_step;
-
+
for( j = 0; j < r.x; j++ )
{
float s0 = CV_8TO32F(src[r.x])*b1 +
CV_8TO32F(src2[r.x])*b2;
-
+
dst[j] = (float)(s0);
}
-
+
if( j < r.width )
{
float prev = (1 - a)*(b1*CV_8TO32F(src[j]) + b2*CV_8TO32F(src2[j]));
-
+
for( ; j < r.width; j++ )
{
float t = a12*CV_8TO32F(src[j+1]) + a22*CV_8TO32F(src2[j+1]);
prev = (float)(t*s);
}
}
-
+
for( ; j < win_size.width; j++ )
{
float s0 = CV_8TO32F(src[r.width])*b1 +
CV_8TO32F(src2[r.width])*b2;
-
+
dst[j] = (float)(s0);
}
-
+
if( i < r.height )
src = src2;
}
}
-
+
return CV_OK;
}
cv::Mat ptA(count, 1, CV_32FC2, (void*)featuresA);
cv::Mat ptB(count, 1, CV_32FC2, (void*)featuresB);
cv::Mat st, err;
-
+
if( status )
st = cv::Mat(count, 1, CV_8U, (void*)status);
if( error )
status[i] = (char)pt_status;
featuresB[i].x = Av[2];
featuresB[i].y = Av[5];
-
+
matrices[i*4] = Av[0];
matrices[i*4+1] = Av[1];
matrices[i*4+2] = Av[3];
cvResize( A, sA, CV_INTER_AREA );
cvResize( B, sB, CV_INTER_AREA );
}
-
+
A = sA;
B = sB;
}
if( count < RANSAC_SIZE0 )
return 0;
-
- CvMat _pB = cvMat(1, count, CV_32FC2, pB);
+
+ CvMat _pB = cvMat(1, count, CV_32FC2, pB);
brect = cvBoundingRect(&_pB, 1);
// RANSAC stuff:
for( k1 = 0; k1 < RANSAC_MAX_ITERS; k1++ )
{
idx[i] = cvRandInt(&rng) % count;
-
+
for( j = 0; j < i; j++ )
{
if( idx[j] == idx[i] )
b[0] = pB[idx[0]];
b[1] = pB[idx[1]];
b[2] = pB[idx[2]];
-
+
double dax1 = a[1].x - a[0].x, day1 = a[1].y - a[0].y;
double dax2 = a[2].x - a[0].x, day2 = a[2].y - a[0].y;
double dbx1 = b[1].x - b[0].x, dby1 = b[1].y - b[0].y;
m[2] /= scale;
m[5] /= scale;
cvConvert( &M, matM );
-
+
return 1;
}
FarnebackPolyExp( const Mat& src, Mat& dst, int n, double sigma )
{
int k, x, y;
-
+
assert( src.type() == CV_32FC1 );
int width = src.cols;
int height = src.rows;
float* xg = g + n*2 + 1;
float* xxg = xg + n*2 + 1;
float *row = (float*)_row + n*3;
-
+
if( sigma < FLT_EPSILON )
sigma = n*0.3;
-
+
double s = 0.;
for( x = -n; x <= n; x++ )
{
g[x] = (float)std::exp(-x*x/(2*sigma*sigma));
s += g[x];
}
-
+
s = 1./s;
for( x = -n; x <= n; x++ )
{
}
Mat_<double> G = Mat_<double>::zeros(6, 6);
-
+
for( y = -n; y <= n; y++ )
for( x = -n; x <= n; x++ )
{
G(3,3) += g[y]*g[x]*x*x*x*x;
G(5,5) += g[y]*g[x]*x*x*y*y;
}
-
+
//G[0][0] = 1.;
G(2,2) = G(0,3) = G(0,4) = G(3,0) = G(4,0) = G(1,1);
G(4,4) = G(3,3);
double ig11 = invG(1,1), ig03 = invG(0,3), ig33 = invG(3,3), ig55 = invG(5,5);
dst.create( height, width, CV_32FC(5));
-
+
for( y = 0; y < height; y++ )
{
float g0 = g[0], g1, g2;
float *srow0 = (float*)(src.data + src.step*y), *srow1 = 0;
float *drow = (float*)(dst.data + dst.step*y);
-
+
// vertical part of convolution
for( x = 0; x < width; x++ )
{
row[x*3] = srow0[x]*g0;
row[x*3+1] = row[x*3+2] = 0.f;
}
-
+
for( k = 1; k <= n; k++ )
{
g0 = g[k]; g1 = xg[k]; g2 = xxg[k];
srow0 = (float*)(src.data + src.step*std::max(y-k,0));
srow1 = (float*)(src.data + src.step*std::min(y+k,height-1));
-
+
for( x = 0; x < width; x++ )
{
float p = srow0[x] + srow1[x];
float t0 = row[x*3] + g0*p;
float t1 = row[x*3+1] + g1*(srow1[x] - srow0[x]);
float t2 = row[x*3+2] + g2*p;
-
+
row[x*3] = t0;
row[x*3+1] = t1;
row[x*3+2] = t2;
}
}
-
+
// horizontal part of convolution
for( x = 0; x < n*3; x++ )
{
row[-1-x] = row[2-x];
row[width*3+x] = row[width*3+x-3];
}
-
+
for( x = 0; x < width; x++ )
{
g0 = g[0];
// r1 ~ 1, r2 ~ x, r3 ~ y, r4 ~ x^2, r5 ~ y^2, r6 ~ xy
double b1 = row[x*3]*g0, b2 = 0, b3 = row[x*3+1]*g0,
b4 = 0, b5 = row[x*3+2]*g0, b6 = 0;
-
+
for( k = 1; k <= n; k++ )
{
double tg = row[(x+k)*3] + row[(x-k)*3];
b6 += (row[(x+k)*3+1] - row[(x-k)*3+1])*xg[k];
b5 += (row[(x+k)*3+2] + row[(x-k)*3+2])*g0;
}
-
+
// do not store r1
drow[x*5+1] = (float)(b2*ig11);
drow[x*5] = (float)(b3*ig11);
drow[x*5+4] = (float)(b6*ig55);
}
}
-
+
row -= n*3;
}
int x, y, width = _flow.cols, height = _flow.rows;
const float* R1 = (float*)_R1.data;
size_t step1 = _R1.step/sizeof(R1[0]);
-
+
matM.create(height, width, CV_32FC(5));
for( y = _y0; y < _y1; y++ )
const float* flow = (float*)(_flow.data + y*_flow.step);
const float* R0 = (float*)(_R0.data + y*_R0.step);
float* M = (float*)(matM.data + y*matM.step);
-
+
for( x = 0; x < width; x++ )
{
float dx = flow[x*2], dy = flow[x*2+1];
float r2, r3, r4, r5, r6;
fx -= x1; fy -= y1;
-
+
if( (unsigned)x1 < (unsigned)(width-1) &&
(unsigned)y1 < (unsigned)(height-1) )
{
int y0 = 0, y1;
int min_update_stripe = std::max((1 << 10)/width, block_size);
double scale = 1./(block_size*block_size);
-
+
AutoBuffer<double> _vsum((width+m*2+2)*5);
double* vsum = _vsum + (m+1)*5;
srow0 = (const float*)(matM.data + matM.step*std::max(y-m-1,0));
const float* srow1 = (const float*)(matM.data + matM.step*std::min(y+m,height-1));
-
+
// vertical blur
for( x = 0; x < width*5; x++ )
vsum[x] += srow1[x] - srow0[x];
double h2_ = h2*scale;
double idet = 1./(g11_*g22_ - g12_*g12_+1e-3);
-
+
flow[x*2] = (float)((g11_*h2_-g12_*h1_)*idet);
flow[x*2+1] = (float)((g22_*h1_-g12_*h2_)*idet);
}
int y0 = 0, y1;
int min_update_stripe = std::max((1 << 10)/width, block_size);
double sigma = m*0.3, s = 1;
-
+
AutoBuffer<float> _vsum((width+m*2+2)*5 + 16), _hsum(width*5 + 16);
AutoBuffer<float, 4096> _kernel((m+1)*5 + 16);
AutoBuffer<float*, 1024> _srow(m*2+1);
s2 = _mm_add_ps(s2, _mm_mul_ps(x0, g4));
s3 = _mm_add_ps(s3, _mm_mul_ps(x1, g4));
}
-
+
_mm_store_ps(vsum + x, s0);
_mm_store_ps(vsum + x + 4, s1);
_mm_store_ps(vsum + x + 8, s2);
#endif
for( ; x < width*5; x++ )
{
- float s = vsum[x]*kernel[0];
+ float sum = vsum[x]*kernel[0];
for( i = 1; i <= m; i++ )
- s += kernel[i]*(vsum[x - i*5] + vsum[x + i*5]);
- hsum[x] = s;
+ sum += kernel[i]*(vsum[x - i*5] + vsum[x + i*5]);
+ hsum[x] = sum;
}
for( x = 0; x < width; x++ )
h2 = hsum[x*5+4];
double idet = 1./(g11*g22 - g12*g12 + 1e-3);
-
+
flow[x*2] = (float)((g11*h2-g12*h1)*idet);
flow[x*2+1] = (float)((g22*h1-g12*h2)*idet);
}
resize( fimg, I, Size(width, height), CV_INTER_LINEAR );
FarnebackPolyExp( I, R[i], poly_n, poly_sigma );
}
-
+
FarnebackUpdateMatrices( R[0], R[1], flow, M, 0, flow.rows );
for( i = 0; i < iterations; i++ )
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
-#if _MSC_VER >= 1200
-#pragma warning( disable: 4251 4710 4711 4514 4996 )
-#endif
-
-#ifdef HAVE_CVCONFIG_H
+#ifdef HAVE_CVCONFIG_H
#include "cvconfig.h"
#endif
//M*/
#include "precomp.hpp"
+#include "opencv2/video/video.hpp"
namespace cv
{
-
+
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorMOG, "BackgroundSubtractor.MOG",
obj.info()->addParam(obj, "varThreshold", obj.varThreshold);
obj.info()->addParam(obj, "detectShadows", obj.bShadowDetection));
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
bool initModule_video(void)
{
- Ptr<Algorithm> mog = createBackgroundSubtractorMOG(), mog2 = createBackgroundSubtractorMOG2();
- return mog->info() != 0 && mog2->info() != 0;
+ bool all = true;
+ all &= !BackgroundSubtractorMOG_info_auto.name().empty();
+ all &= !BackgroundSubtractorMOG2_info_auto.name().empty();
+
+ return all;
}
-
+
}
+#ifdef __GNUC__
+# pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
int x, y;
DXY() : dist(0), x(0), y(0) {}
- DXY(float dist, int x, int y) : dist(dist), x(x), y(y) {}
+ DXY(float _dist, int _x, int _y) : dist(_dist), x(_x), y(_y) {}
bool operator <(const DXY &dxy) const { return dist < dxy.dist; }
};
float eps; // max outliers ratio
float prob; // probability of success
- RansacParams(int size, float thresh, float eps, float prob)
- : size(size), thresh(thresh), eps(eps), prob(prob) {}
+ RansacParams(int _size, float _thresh, float _eps, float _prob)
+ : size(_size), thresh(_thresh), eps(_eps), prob(_prob) {}
static RansacParams translationMotionStd() { return RansacParams(2, 0.5f, 0.5f, 0.99f); }
static RansacParams translationAndScale2dMotionStd() { return RansacParams(3, 0.5f, 0.5f, 0.99f); }
class CV_EXPORTS ColorInpainter : public InpainterBase
{
public:
- ColorInpainter(int method = INPAINT_TELEA, double radius = 2.)
- : method_(method), radius_(radius) {}
+ ColorInpainter(int method = INPAINT_TELEA, double _radius = 2.)
+ : method_(method), radius_(_radius) {}
virtual void inpaint(int idx, Mat &frame, Mat &mask);
{
public:
virtual void stabilize(const Mat *motions, int size, Mat *stabilizationMotions) const = 0;
+
+#ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
+ virtual ~IMotionStabilizer() {}
+#endif
};
class CV_EXPORTS MotionFilterBase : public IMotionStabilizer
#include "opencv2/core/core.hpp"
#include "opencv2/opencv_modules.hpp"
-#if HAVE_OPENCV_GPU
+#ifdef HAVE_OPENCV_GPU
# include "opencv2/gpu/gpu.hpp"
#endif
OutputArray status, OutputArray errors);
};
-#if HAVE_OPENCV_GPU
+#ifdef HAVE_OPENCV_GPU
class CV_EXPORTS DensePyrLkOptFlowEstimatorGpu
: public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator
{
public:
virtual ~StabilizerBase() {}
- void setLog(Ptr<ILog> log) { log_ = log; }
+ void setLog(Ptr<ILog> _log) { log_ = _log; }
Ptr<ILog> log() const { return log_; }
void setRadius(int val) { radius_ = val; }
Mat_<float> M = impls[model](params.size, &subset0[0], &subset1[0], 0);
- int ninliers = 0;
+ int _ninliers = 0;
for (int i = 0; i < npoints; ++i)
{
p0 = points0[i]; p1 = points1[i];
x = M(0,0)*p0.x + M(0,1)*p0.y + M(0,2);
y = M(1,0)*p0.x + M(1,1)*p0.y + M(1,2);
if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
- ninliers++;
+ _ninliers++;
}
- if (ninliers >= ninliersMax)
+ if (_ninliers >= ninliersMax)
{
bestM = M;
- ninliersMax = ninliers;
+ ninliersMax = _ninliers;
subset0best.swap(subset0);
subset1best.swap(subset1);
}
CV_Assert(mask.size() == frame.size() && mask.type() == CV_8U);
Mat invS = at(idx, *stabilizationMotions_).inv();
- vector<Mat_<float> > motions(2*radius_ + 1);
+ vector<Mat_<float> > _motions(2*radius_ + 1);
for (int i = -radius_; i <= radius_; ++i)
- motions[radius_ + i] = getMotion(idx, idx + i, *motions_) * invS;
+ _motions[radius_ + i] = getMotion(idx, idx + i, *motions_) * invS;
int n;
float mean, var;
for (int i = -radius_; i <= radius_; ++i)
{
const Mat_<Point3_<uchar> > &framei = at(idx + i, *frames_);
- const Mat_<float> &Mi = motions[radius_ + i];
+ const Mat_<float> &Mi = _motions[radius_ + i];
int xi = cvRound(Mi(0,0)*x + Mi(0,1)*y + Mi(0,2));
int yi = cvRound(Mi(1,0)*x + Mi(1,1)*y + Mi(1,2));
if (xi >= 0 && xi < framei.cols && yi >= 0 && yi < framei.rows)
for (int dx = -rad; dx <= rad; ++dx)
{
int qx0 = x + dx;
- int qy0 = y + dy;
+ int qy0 = y + dy;
if (qy0 >= 0 && qy0 < mask0.rows && qx0 >= 0 && qx0 < mask0.cols && mask0(qy0,qx0))
{
MotionInpainter::MotionInpainter()
{
-#if HAVE_OPENCV_GPU
+#ifdef HAVE_OPENCV_GPU
setOptFlowEstimator(new DensePyrLkOptFlowEstimatorGpu());
#else
CV_Error(CV_StsNotImplemented, "Current implementation of MotionInpainter requires GPU");
void MotionInpainter::inpaint(int idx, Mat &frame, Mat &mask)
{
priority_queue<pair<float,int> > neighbors;
- vector<Mat> motions(2*radius_ + 1);
+ vector<Mat> _motions(2*radius_ + 1);
for (int i = -radius_; i <= radius_; ++i)
{
Mat motion0to1 = getMotion(idx, idx + i, *motions_) * at(idx, *stabilizationMotions_).inv();
- motions[radius_ + i] = motion0to1;
+ _motions[radius_ + i] = motion0to1;
if (i != 0)
{
int neighbor = neighbors.top().second;
neighbors.pop();
- Mat motion1to0 = motions[radius_ + neighbor - idx].inv();
+ Mat motion1to0 = _motions[radius_ + neighbor - idx].inv();
frame1_ = at(neighbor, *frames_);
warpAffine(
if (x1 >= 0 && x1 < frame1.cols && y1 >= 0 && y1 < frame1.rows && mask1_(y1,x1)
&& sqr(flowX_(y0,x0)) + sqr(flowY_(y0,x0)) < sqr(distThresh))
- {
+ {
frame0.at<Point3_<uchar> >(y0,x0) = frame1.at<Point3_<uchar> >(y1,x1);
mask0_(y0,x0) = 255;
//count++;
}
-#if HAVE_OPENCV_GPU
+#ifdef HAVE_OPENCV_GPU
DensePyrLkOptFlowEstimatorGpu::DensePyrLkOptFlowEstimatorGpu()
{
CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
void StabilizerBase::setUp(int cacheSize, const Mat &frame)
{
- InpainterBase *inpainter = static_cast<InpainterBase*>(inpainter_);
- doInpainting_ = dynamic_cast<NullInpainter*>(inpainter) == 0;
+ InpainterBase *_inpainter = static_cast<InpainterBase*>(inpainter_);
+ doInpainting_ = dynamic_cast<NullInpainter*>(_inpainter) == 0;
if (doInpainting_)
{
inpainter_->setRadius(radius_);
motions_.push_back(Mat::eye(3, 3, CV_32F));
log_->print("\n");
- IMotionStabilizer *motionStabilizer = static_cast<IMotionStabilizer*>(motionStabilizer_);
- MotionFilterBase *motionFilterBase = dynamic_cast<MotionFilterBase*>(motionStabilizer);
+ IMotionStabilizer *_motionStabilizer = static_cast<IMotionStabilizer*>(motionStabilizer_);
+ MotionFilterBase *motionFilterBase = dynamic_cast<MotionFilterBase*>(_motionStabilizer);
if (motionFilterBase)
{
motionFilterBase->setRadius(radius_);
# ----------------------------------------------------------------------------
add_custom_target(opencv_android_examples)
+ocv_warnings_disable(CMAKE_CXX_FLAGS -Wmissing-declarations)
+
add_subdirectory(15-puzzle)
add_subdirectory(face-detection)
add_subdirectory(image-manipulations)
#include "opencv2/contrib/contrib.hpp"\r
#include "opencv2/highgui/highgui.hpp"\r
\r
-void help(char **argv)\r
+static void help(char **argv)\r
{\r
- std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"\r
- << "Usage: " << std::endl <<\r
- argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<\r
- "Example: " << std::endl <<\r
- argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg 0 1000" << std::endl <<\r
- " iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<\r
- "If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<\r
- "Please note: Background should not contain large surfaces with skin tone." <<\r
- "\n\n ESC will stop\n"\r
- "Using OpenCV version %s\n" << CV_VERSION << "\n"\r
- << std::endl;\r
+ std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"\r
+ << "Usage: " << std::endl <<\r
+ argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<\r
+ "Example: " << std::endl <<\r
+ argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg 0 1000" << std::endl <<\r
+ " iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<\r
+ "If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<\r
+ "Please note: Background should not contain large surfaces with skin tone." <<\r
+ "\n\n ESC will stop\n"\r
+ "Using OpenCV version %s\n" << CV_VERSION << "\n"\r
+ << std::endl;\r
}\r
\r
class ASDFrameHolder\r
{\r
private:\r
- IplImage *image;\r
- double timeStamp;\r
+ IplImage *image;\r
+ double timeStamp;\r
\r
public:\r
- ASDFrameHolder();\r
- virtual ~ASDFrameHolder();\r
- virtual void assignFrame(IplImage *sourceImage, double frameTime);\r
- inline IplImage *getImage();\r
- inline double getTimeStamp();\r
- virtual void setImage(IplImage *sourceImage);\r
+ ASDFrameHolder();\r
+ virtual ~ASDFrameHolder();\r
+ virtual void assignFrame(IplImage *sourceImage, double frameTime);\r
+ inline IplImage *getImage();\r
+ inline double getTimeStamp();\r
+ virtual void setImage(IplImage *sourceImage);\r
};\r
\r
class ASDFrameSequencer\r
{\r
public:\r
- virtual ~ASDFrameSequencer();\r
- virtual IplImage *getNextImage();\r
- virtual void close();\r
- virtual bool isOpen();\r
- virtual void getFrameCaption(char *caption);\r
+ virtual ~ASDFrameSequencer();\r
+ virtual IplImage *getNextImage();\r
+ virtual void close();\r
+ virtual bool isOpen();\r
+ virtual void getFrameCaption(char *caption);\r
};\r
\r
class ASDCVFrameSequencer : public ASDFrameSequencer\r
{\r
protected:\r
- CvCapture *capture;\r
+ CvCapture *capture;\r
\r
public:\r
- virtual IplImage *getNextImage();\r
- virtual void close();\r
- virtual bool isOpen();\r
+ virtual IplImage *getNextImage();\r
+ virtual void close();\r
+ virtual bool isOpen();\r
};\r
\r
class ASDFrameSequencerWebCam : public ASDCVFrameSequencer\r
{\r
public:\r
- virtual bool open(int cameraIndex);\r
+ virtual bool open(int cameraIndex);\r
};\r
\r
class ASDFrameSequencerVideoFile : public ASDCVFrameSequencer\r
{\r
public:\r
- virtual bool open(const char *fileName);\r
+ virtual bool open(const char *fileName);\r
};\r
\r
class ASDFrameSequencerImageFile : public ASDFrameSequencer {\r
private:\r
- char sFileNameMask[2048];\r
- int nCurrentIndex, nStartIndex, nEndIndex;\r
+ char sFileNameMask[2048];\r
+ int nCurrentIndex, nStartIndex, nEndIndex;\r
\r
public:\r
- virtual void open(const char *fileNameMask, int startIndex, int endIndex);\r
- virtual void getFrameCaption(char *caption);\r
- virtual IplImage *getNextImage();\r
- virtual void close();\r
- virtual bool isOpen();\r
+ virtual void open(const char *fileNameMask, int startIndex, int endIndex);\r
+ virtual void getFrameCaption(char *caption);\r
+ virtual IplImage *getNextImage();\r
+ virtual void close();\r
+ virtual bool isOpen();\r
};\r
\r
//-------------------- ASDFrameHolder -----------------------//\r
ASDFrameHolder::ASDFrameHolder( )\r
{\r
- image = NULL;\r
- timeStamp = 0;\r
+ image = NULL;\r
+ timeStamp = 0;\r
};\r
\r
ASDFrameHolder::~ASDFrameHolder( )\r
{\r
- cvReleaseImage(&image);\r
+ cvReleaseImage(&image);\r
};\r
\r
void ASDFrameHolder::assignFrame(IplImage *sourceImage, double frameTime)\r
{\r
- if (image != NULL)\r
- {\r
- cvReleaseImage(&image);\r
- image = NULL;\r
- }\r
-\r
- image = cvCloneImage(sourceImage);\r
- timeStamp = frameTime;\r
+ if (image != NULL)\r
+ {\r
+ cvReleaseImage(&image);\r
+ image = NULL;\r
+ }\r
+\r
+ image = cvCloneImage(sourceImage);\r
+ timeStamp = frameTime;\r
};\r
\r
IplImage *ASDFrameHolder::getImage()\r
{\r
- return image;\r
+ return image;\r
};\r
\r
double ASDFrameHolder::getTimeStamp()\r
{\r
- return timeStamp;\r
+ return timeStamp;\r
};\r
\r
void ASDFrameHolder::setImage(IplImage *sourceImage)\r
{\r
- image = sourceImage;\r
+ image = sourceImage;\r
};\r
\r
\r
\r
ASDFrameSequencer::~ASDFrameSequencer()\r
{\r
- close();\r
+ close();\r
};\r
\r
IplImage *ASDFrameSequencer::getNextImage()\r
{\r
- return NULL;\r
+ return NULL;\r
};\r
\r
void ASDFrameSequencer::close()\r
\r
bool ASDFrameSequencer::isOpen()\r
{\r
- return false;\r
+ return false;\r
};\r
\r
void ASDFrameSequencer::getFrameCaption(char* /*caption*/) {\r
- return;\r
+ return;\r
};\r
\r
IplImage* ASDCVFrameSequencer::getNextImage()\r
{\r
- IplImage *image;\r
-\r
- image = cvQueryFrame(capture);\r
-\r
- if (image != NULL)\r
- {\r
- return cvCloneImage(image);\r
- }\r
- else\r
- {\r
- return NULL;\r
- }\r
+ IplImage *image;\r
+\r
+ image = cvQueryFrame(capture);\r
+\r
+ if (image != NULL)\r
+ {\r
+ return cvCloneImage(image);\r
+ }\r
+ else\r
+ {\r
+ return NULL;\r
+ }\r
};\r
\r
void ASDCVFrameSequencer::close()\r
{\r
- if (capture != NULL)\r
- {\r
- cvReleaseCapture(&capture);\r
- }\r
+ if (capture != NULL)\r
+ {\r
+ cvReleaseCapture(&capture);\r
+ }\r
};\r
\r
bool ASDCVFrameSequencer::isOpen()\r
{\r
- return (capture != NULL);\r
+ return (capture != NULL);\r
};\r
\r
\r
\r
bool ASDFrameSequencerWebCam::open(int cameraIndex)\r
{\r
- close();\r
-\r
- capture = cvCaptureFromCAM(cameraIndex);\r
-\r
- if (!capture)\r
- {\r
- return false;\r
- }\r
- else\r
- {\r
- return true;\r
- }\r
+ close();\r
+\r
+ capture = cvCaptureFromCAM(cameraIndex);\r
+\r
+ if (!capture)\r
+ {\r
+ return false;\r
+ }\r
+ else\r
+ {\r
+ return true;\r
+ }\r
};\r
\r
\r
\r
bool ASDFrameSequencerVideoFile::open(const char *fileName)\r
{\r
- close();\r
-\r
- capture = cvCaptureFromFile(fileName);\r
- if (!capture)\r
- {\r
- return false;\r
- }\r
- else\r
- {\r
- return true;\r
- }\r
+ close();\r
+\r
+ capture = cvCaptureFromFile(fileName);\r
+ if (!capture)\r
+ {\r
+ return false;\r
+ }\r
+ else\r
+ {\r
+ return true;\r
+ }\r
};\r
\r
\r
\r
void ASDFrameSequencerImageFile::open(const char *fileNameMask, int startIndex, int endIndex)\r
{\r
- nCurrentIndex = startIndex-1;\r
- nStartIndex = startIndex;\r
- nEndIndex = endIndex;\r
+ nCurrentIndex = startIndex-1;\r
+ nStartIndex = startIndex;\r
+ nEndIndex = endIndex;\r
\r
- std::sprintf(sFileNameMask, "%s", fileNameMask);\r
+ std::sprintf(sFileNameMask, "%s", fileNameMask);\r
};\r
\r
void ASDFrameSequencerImageFile::getFrameCaption(char *caption) {\r
- std::sprintf(caption, sFileNameMask, nCurrentIndex);\r
+ std::sprintf(caption, sFileNameMask, nCurrentIndex);\r
};\r
\r
IplImage* ASDFrameSequencerImageFile::getNextImage()\r
{\r
- char fileName[2048];\r
+ char fileName[2048];\r
\r
- nCurrentIndex++;\r
+ nCurrentIndex++;\r
\r
- if (nCurrentIndex > nEndIndex)\r
- return NULL;\r
+ if (nCurrentIndex > nEndIndex)\r
+ return NULL;\r
\r
- std::sprintf(fileName, sFileNameMask, nCurrentIndex);\r
+ std::sprintf(fileName, sFileNameMask, nCurrentIndex);\r
\r
- IplImage* img = cvLoadImage(fileName);\r
+ IplImage* img = cvLoadImage(fileName);\r
\r
- return img;\r
+ return img;\r
};\r
\r
void ASDFrameSequencerImageFile::close()\r
{\r
- nCurrentIndex = nEndIndex+1;\r
+ nCurrentIndex = nEndIndex+1;\r
};\r
\r
bool ASDFrameSequencerImageFile::isOpen()\r
{\r
- return (nCurrentIndex <= nEndIndex);\r
+ return (nCurrentIndex <= nEndIndex);\r
};\r
\r
-void putTextWithShadow(IplImage *img, const char *str, CvPoint point, CvFont *font, CvScalar color = CV_RGB(255, 255, 128))\r
+static void putTextWithShadow(IplImage *img, const char *str, CvPoint point, CvFont *font, CvScalar color = CV_RGB(255, 255, 128))\r
{\r
- cvPutText(img, str, cvPoint(point.x-1,point.y-1), font, CV_RGB(0, 0, 0));\r
- cvPutText(img, str, point, font, color);\r
+ cvPutText(img, str, cvPoint(point.x-1,point.y-1), font, CV_RGB(0, 0, 0));\r
+ cvPutText(img, str, point, font, color);\r
};\r
\r
-#define ASD_RGB_SET_PIXEL(pointer, r, g, b) { (*pointer) = (unsigned char)b; (*(pointer+1)) = (unsigned char)g; (*(pointer+2)) = (unsigned char)r; }\r
+#define ASD_RGB_SET_PIXEL(pointer, r, g, b) { (*pointer) = (unsigned char)b; (*(pointer+1)) = (unsigned char)g; (*(pointer+2)) = (unsigned char)r; }\r
\r
#define ASD_RGB_GET_PIXEL(pointer, r, g, b) {b = (unsigned char)(*(pointer)); g = (unsigned char)(*(pointer+1)); r = (unsigned char)(*(pointer+2));}\r
\r
-void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gValue, int bValue)\r
+static void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gValue, int bValue)\r
{\r
- int x, y, nWidth, nHeight;\r
- double destX, destY, dx, dy;\r
- uchar c;\r
- unsigned char *pSrc;\r
-\r
- nWidth = buffer->width;\r
- nHeight = buffer->height;\r
-\r
- dx = double(rgbDestImage->width)/double(nWidth);\r
- dy = double(rgbDestImage->height)/double(nHeight);\r
-\r
- destX = 0;\r
- for (x = 0; x < nWidth; x++)\r
- {\r
- destY = 0;\r
- for (y = 0; y < nHeight; y++)\r
- {\r
- c = ((uchar*)(buffer->imageData + buffer->widthStep*y))[x];\r
-\r
- if (c)\r
- {\r
- pSrc = (unsigned char *)rgbDestImage->imageData + rgbDestImage->widthStep*int(destY) + (int(destX)*rgbDestImage->nChannels);\r
- ASD_RGB_SET_PIXEL(pSrc, rValue, gValue, bValue);\r
- }\r
- destY += dy;\r
- }\r
- destY = 0;\r
- destX += dx;\r
- }\r
+ int x, y, nWidth, nHeight;\r
+ double destX, destY, dx, dy;\r
+ uchar c;\r
+ unsigned char *pSrc;\r
+\r
+ nWidth = buffer->width;\r
+ nHeight = buffer->height;\r
+\r
+ dx = double(rgbDestImage->width)/double(nWidth);\r
+ dy = double(rgbDestImage->height)/double(nHeight);\r
+\r
+ destX = 0;\r
+ for (x = 0; x < nWidth; x++)\r
+ {\r
+ destY = 0;\r
+ for (y = 0; y < nHeight; y++)\r
+ {\r
+ c = ((uchar*)(buffer->imageData + buffer->widthStep*y))[x];\r
+\r
+ if (c)\r
+ {\r
+ pSrc = (unsigned char *)rgbDestImage->imageData + rgbDestImage->widthStep*int(destY) + (int(destX)*rgbDestImage->nChannels);\r
+ ASD_RGB_SET_PIXEL(pSrc, rValue, gValue, bValue);\r
+ }\r
+ destY += dy;\r
+ }\r
+ destY = 0;\r
+ destX += dx;\r
+ }\r
};\r
\r
int main(int argc, char** argv )\r
{\r
- IplImage *img, *filterMask = NULL;\r
- CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);\r
- ASDFrameSequencer *sequencer;\r
- CvFont base_font;\r
- char caption[2048], s[256], windowName[256];\r
- long int clockTotal = 0, numFrames = 0;\r
- std::clock_t clock;\r
-\r
- if (argc < 4)\r
- {\r
- help(argv);\r
- sequencer = new ASDFrameSequencerWebCam();\r
- (dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);\r
-\r
- if (! sequencer->isOpen())\r
- {\r
- std::cout << std::endl << "Error: Cannot initialize the default Webcam" << std::endl << std::endl;\r
- }\r
- }\r
- else\r
- {\r
- sequencer = new ASDFrameSequencerImageFile();\r
- (dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here\r
-\r
- }\r
- std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");\r
-\r
- cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);\r
- cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);\r
-\r
- // Usage:\r
- // c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000\r
-\r
- std::cout << "Press ESC to stop." << std::endl << std::endl;\r
- while ((img = sequencer->getNextImage()) != 0)\r
- {\r
- numFrames++;\r
-\r
- if (filterMask == NULL)\r
- {\r
- filterMask = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1);\r
- }\r
- clock = std::clock();\r
- filter.process(img, filterMask); // DETECT SKIN\r
- clockTotal += (std::clock() - clock);\r
-\r
- displayBuffer(img, filterMask, 0, 255, 0);\r
-\r
- sequencer->getFrameCaption(caption);\r
- std::sprintf(s, "%s - %d x %d", caption, img->width, img->height);\r
- putTextWithShadow(img, s, cvPoint(10, img->height-35), &base_font);\r
-\r
- std::sprintf(s, "Average processing time per frame: %5.2fms", (double(clockTotal*1000/CLOCKS_PER_SEC))/numFrames);\r
- putTextWithShadow(img, s, cvPoint(10, img->height-15), &base_font);\r
-\r
- cvShowImage (windowName, img);\r
- cvReleaseImage(&img);\r
-\r
- if (cvWaitKey(1) == 27)\r
- break;\r
- }\r
-\r
- sequencer->close();\r
- delete sequencer;\r
-\r
- cvReleaseImage(&filterMask);\r
-\r
- cvDestroyWindow(windowName);\r
-\r
- std::cout << "Finished, " << numFrames << " frames processed." << std::endl;\r
-\r
- return 0;\r
+ IplImage *img, *filterMask = NULL;\r
+ CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);\r
+ ASDFrameSequencer *sequencer;\r
+ CvFont base_font;\r
+ char caption[2048], s[256], windowName[256];\r
+ long int clockTotal = 0, numFrames = 0;\r
+ std::clock_t clock;\r
+\r
+ if (argc < 4)\r
+ {\r
+ help(argv);\r
+ sequencer = new ASDFrameSequencerWebCam();\r
+ (dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);\r
+\r
+ if (! sequencer->isOpen())\r
+ {\r
+ std::cout << std::endl << "Error: Cannot initialize the default Webcam" << std::endl << std::endl;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ sequencer = new ASDFrameSequencerImageFile();\r
+ (dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here\r
+\r
+ }\r
+ std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");\r
+\r
+ cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);\r
+ cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);\r
+\r
+ // Usage:\r
+ // c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000\r
+\r
+ std::cout << "Press ESC to stop." << std::endl << std::endl;\r
+ while ((img = sequencer->getNextImage()) != 0)\r
+ {\r
+ numFrames++;\r
+\r
+ if (filterMask == NULL)\r
+ {\r
+ filterMask = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1);\r
+ }\r
+ clock = std::clock();\r
+ filter.process(img, filterMask); // DETECT SKIN\r
+ clockTotal += (std::clock() - clock);\r
+\r
+ displayBuffer(img, filterMask, 0, 255, 0);\r
+\r
+ sequencer->getFrameCaption(caption);\r
+ std::sprintf(s, "%s - %d x %d", caption, img->width, img->height);\r
+ putTextWithShadow(img, s, cvPoint(10, img->height-35), &base_font);\r
+\r
+ std::sprintf(s, "Average processing time per frame: %5.2fms", (double(clockTotal*1000/CLOCKS_PER_SEC))/numFrames);\r
+ putTextWithShadow(img, s, cvPoint(10, img->height-15), &base_font);\r
+\r
+ cvShowImage (windowName, img);\r
+ cvReleaseImage(&img);\r
+\r
+ if (cvWaitKey(1) == 27)\r
+ break;\r
+ }\r
+\r
+ sequencer->close();\r
+ delete sequencer;\r
+\r
+ cvReleaseImage(&filterMask);\r
+\r
+ cvDestroyWindow(windowName);\r
+\r
+ std::cout << "Finished, " << numFrames << " frames processed." << std::endl;\r
+\r
+ return 0;\r
}\r
\r
// Background average sample code done with averages and done with codebooks
// (adapted from the OpenCV book sample)
-//
+//
// NOTE: To get the keyboard to work, you *have* to have one of the video windows be active
// and NOT the consule window.
//
// Gary Bradski Oct 3, 2008.
-//
+//
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warrenty, support or any guarentee of it working.
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
-
- AVAILABLE AT:
+
+ AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
- ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
+ ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
************************************************** */
#include "opencv2/core/core.hpp"
#include "opencv2/video/background_segm.hpp"
const int NCHANNELS = 3;
bool ch[NCHANNELS]={true,true,true}; // This sets what channels should be adjusted for background bounds
-void help()
+static void help()
{
printf("\nLearn background and find foreground using simple average and average difference learning method:\n"
- "Originally from the book: Learning OpenCV by O'Reilly press\n"
+ "Originally from the book: Learning OpenCV by O'Reilly press\n"
"\nUSAGE:\n"
" bgfg_codebook [--nframes(-nf)=300] [--movie_filename(-mf)=tree.avi] [--camera(-c), use camera or not]\n"
"***Keep the focus on the video windows, NOT the consol***\n\n"
"INTERACTIVE PARAMETERS:\n"
"\tESC,q,Q - quit the program\n"
- "\th - print this help\n"
- "\tp - pause toggle\n"
- "\ts - single step\n"
- "\tr - run mode (single step off)\n"
+ "\th - print this help\n"
+ "\tp - pause toggle\n"
+ "\ts - single step\n"
+ "\tr - run mode (single step off)\n"
"=== AVG PARAMS ===\n"
"\t- - bump high threshold UP by 0.25\n"
"\t= - bump high threshold DOWN by 0.25\n"
"\t] - bump low threshold DOWN by 0.25\n"
"=== CODEBOOK PARAMS ===\n"
"\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
- "\ta - adjust all 3 channels at once\n"
- "\tb - adjust both 2 and 3 at once\n"
- "\ti,o - bump upper threshold up,down by 1\n"
- "\tk,l - bump lower threshold up,down by 1\n"
+ "\ta - adjust all 3 channels at once\n"
+ "\tb - adjust both 2 and 3 at once\n"
+ "\ti,o - bump upper threshold up,down by 1\n"
+ "\tk,l - bump lower threshold up,down by 1\n"
"\tSPACE - reset the model\n"
);
}
int c, n, nframes = 0;
model = cvCreateBGCodeBookModel();
-
+
//Set color thresholds to default values
model->modMin[0] = 3;
model->modMin[1] = model->modMin[2] = 3;
{
rawImage = cvQueryFrame( capture );
++nframes;
- if(!rawImage)
+ if(!rawImage)
break;
}
if( singlestep )
pause = true;
-
+
//First time:
if( nframes == 1 && rawImage )
{
ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
cvSet(ImaskCodeBook,cvScalar(255));
-
+
cvNamedWindow( "Raw", 1 );
cvNamedWindow( "ForegroundCodeBook",1);
cvNamedWindow( "CodeBook_ConnectComp",1);
}
- // If we've got an rawImage and are good to go:
+ // If we've got an rawImage and are good to go:
if( rawImage )
{
cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
if( nframes-1 == nframesToLearnBG )
cvBGCodeBookClearStale( model, model->t/2 );
-
+
//Find the foreground if any
if( nframes-1 >= nframesToLearnBG )
{
// Find foreground by codebook method
cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );
// This part just to visualize bounding boxes and centers if desired
- cvCopy(ImaskCodeBook,ImaskCodeBookCC);
+ cvCopy(ImaskCodeBook,ImaskCodeBookCC);
cvSegmentFGMask( ImaskCodeBookCC );
}
//Display
case 'u': case '1':
case 'v': case '2':
case 'a': case '3':
- case 'b':
+ case 'b':
ch[0] = c == 'y' || c == '0' || c == 'a' || c == '3';
ch[1] = c == 'u' || c == '1' || c == 'a' || c == '3' || c == 'b';
ch[2] = c == 'v' || c == '2' || c == 'a' || c == '3' || c == 'b';
}
break;
}
- }
-
+ }
+
cvReleaseCapture( &capture );
cvDestroyWindow( "Raw" );
cvDestroyWindow( "ForegroundCodeBook");
CvBlobTrackerAutoParam1 param = {0};
CvBlobTrackerAuto* pTracker = NULL;
- float scale = 1;
+ //float scale = 1;
const char* scale_name = NULL;
char* yml_name = NULL;
char** yml_video_names = NULL;
const char* bta_name = NULL;
char* bta_data_name = NULL;
char* track_name = NULL;
- char* comment_name = NULL;
+ //char* comment_name = NULL;
char* FGTrainFrames = NULL;
char* log_name = NULL;
char* savestate_name = NULL;
RO("bta_data=",bta_data_name);
RO("btgen=",btgen_name);
RO("track=",track_name);
- RO("comment=",comment_name);
+ //RO("comment=",comment_name);
RO("FGTrainFrames=",FGTrainFrames);
RO("log=",log_name);
RO("savestate=",savestate_name);
if(!scale_name) scale_name = "1";
}
- if(scale_name)
- scale = (float)atof(scale_name);
+// if(scale_name)
+ // scale = (float)atof(scale_name);
for(pFGModule=FGDetector_Modules; pFGModule->nickname; ++pFGModule)
if( fg_name && MY_STRICMP(fg_name,pFGModule->nickname)==0 ) break;
-#include <opencv2/imgproc/imgproc_c.h>
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc/imgproc_c.h"
+#include "opencv2/highgui/highgui_c.h"
#include <stdio.h>
-void help()
+
+static void help(void)
{
- printf("\nThis program creates an image to demonstrate the use of the \"c\" contour\n"
- "functions: cvFindContours() and cvApproxPoly() along with the storage\n"
- "functions cvCreateMemStorage() and cvDrawContours().\n"
- "It also shows the use of a trackbar to control contour retrieval.\n"
- "\n"
+ printf("\nThis program creates an image to demonstrate the use of the \"c\" contour\n"
+ "functions: cvFindContours() and cvApproxPoly() along with the storage\n"
+ "functions cvCreateMemStorage() and cvDrawContours().\n"
+ "It also shows the use of a trackbar to control contour retrieval.\n"
+ "\n"
"Usage :\n"
- "./contours\n");
+ "./contours\n");
}
#define w 500
int levels = 3;
CvSeq* contours = 0;
-void on_trackbar(int pos)
+static void on_trackbar(int pos)
{
IplImage* cnt_img = cvCreateImage( cvSize(w,w), 8, 3 );
CvSeq* _contours = contours;
cvZero(mask);
cvRectangle( mask, cvPoint(0, 0), cvPoint(mask->width-1, mask->height-1),
cvScalarAll(1), 1, 8, 0 );
-
+
for( y = 0; y < img->height; y++ )
for( x = 0; x < img->width; x++ )
{
}
-int main()
+int main(int argc, char* argv[])
{
int i, j;
CvMemStorage* storage = cvCreateMemStorage(0);
cvFindContours( img32s, storage, &contours, sizeof(CvContour),
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
-
+
//cvFindContours( img, storage, &contours, sizeof(CvContour),
// CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
-
-
+
+
{
const char* attrs[] = {"recursive", "1", 0};
cvSave("contours.xml", contours, 0, 0, cvAttrList(attrs, 0));
{
CvRNG rng = cvRNG(-1);
-
+
CvSeq* tcontours = contours;
cvCvtColor( img, img3, CV_GRAY2BGR );
while( tcontours->h_next )
cvDrawContours(img3, tcontours->v_next, color, color, 1, -1, 8, cvPoint(0,0));
}
}
-
+
}
-
+
cvShowImage( "colored", img3 );
on_trackbar(0);
cvWaitKey(0);
cvReleaseImage( &img32f );
cvReleaseImage( &img32s );
cvReleaseImage( &img3 );
-
+
return 0;
}
#include <ctype.h>
#include <stdio.h>
-void help()
+static void help(void)
{
printf("\n This sample demonstrates cascade's convertation \n"
"Usage:\n"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>
-void help()
+static void help( void )
{
printf("\nThis program demostrates iterative construction of\n"
"delaunay triangulation and voronoi tesselation.\n"
"hitting any key.\n");
}
-CvSubdiv2D* init_delaunay( CvMemStorage* storage,
+static CvSubdiv2D* init_delaunay( CvMemStorage* storage,
CvRect rect )
{
CvSubdiv2D* subdiv;
}
-void draw_subdiv_point( IplImage* img, CvPoint2D32f fp, CvScalar color )
+static void draw_subdiv_point( IplImage* img, CvPoint2D32f fp, CvScalar color )
{
cvCircle( img, cvPoint(cvRound(fp.x), cvRound(fp.y)), 3, color, CV_FILLED, 8, 0 );
}
-void draw_subdiv_edge( IplImage* img, CvSubdiv2DEdge edge, CvScalar color )
+static void draw_subdiv_edge( IplImage* img, CvSubdiv2DEdge edge, CvScalar color )
{
CvSubdiv2DPoint* org_pt;
CvSubdiv2DPoint* dst_pt;
}
-void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,
+static void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,
CvScalar delaunay_color, CvScalar voronoi_color )
{
CvSeqReader reader;
}
-void locate_point( CvSubdiv2D* subdiv, CvPoint2D32f fp, IplImage* img,
+static void locate_point( CvSubdiv2D* subdiv, CvPoint2D32f fp, IplImage* img,
CvScalar active_color )
{
CvSubdiv2DEdge e;
}
-void draw_subdiv_facet( IplImage* img, CvSubdiv2DEdge edge )
+static void draw_subdiv_facet( IplImage* img, CvSubdiv2DEdge edge )
{
CvSubdiv2DEdge t = edge;
int i, count = 0;
free( buf );
}
-void paint_voronoi( CvSubdiv2D* subdiv, IplImage* img )
+static void paint_voronoi( CvSubdiv2D* subdiv, IplImage* img )
{
CvSeqReader reader;
int i, total = subdiv->edges->total;
}
-void run(void)
+static void run(void)
{
char win[] = "source";
int i;
using namespace std;
using namespace cv;
-void help()
+static void help()
{
cout << "\nThis program demonstrates the cascade recognizer. Now you can use Haar or LBP features.\n"
"This classifier can recognize many ~rigid objects, it's most known use is for faces.\n"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <stdio.h>
-void help()
+
+static void help(void)
{
- printf(
+ printf(
"\n This program demonstrate dense \"Farneback\n optical flow\n"
- "It read from camera 0, and shows how to use and display dense Franeback optical flow\n"
+ "It read from camera 0, and shows how to use and display dense Franeback optical flow\n"
"Usage: \n"
"./fback_c \n");
}
-void drawOptFlowMap(const CvMat* flow, CvMat* cflowmap, int step,
+
+static void drawOptFlowMap(const CvMat* flow, CvMat* cflowmap, int step,
double scale, CvScalar color)
{
int x, y;
}
}
-int main()
+int main( int argc, char** argv )
{
CvCapture* capture = cvCreateCameraCapture(0);
CvMat* prevgray = 0, *gray = 0, *flow = 0, *cflow = 0;
if( !capture )
return -1;
-
+
cvNamedWindow("flow", 1);
-
+
for(;;)
{
int firstFrame = gray == 0;
cflow = cvCreateMat(gray->rows, gray->cols, CV_8UC3);
}
cvCvtColor(frame, gray, CV_BGR2GRAY);
-
+
if( !firstFrame )
{
cvCalcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
if(cvWaitKey(30)>=0)
break;
{
- CvMat* temp;
+ CvMat* temp;
CV_SWAP(prevgray, gray, temp);
}
}
#include <stdio.h>
using namespace std;
-void help()
+static void help()
{
printf(
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
// define whether to use approximate nearest-neighbor search
#define USE_FLANN
+#ifdef USE_FLANN
+static void
+flannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,
+ const CvSeq*, const CvSeq* imageDescriptors, vector<int>& ptpairs )
+{
+ int length = (int)(objectDescriptors->elem_size/sizeof(float));
+
+ cv::Mat m_object(objectDescriptors->total, length, CV_32F);
+ cv::Mat m_image(imageDescriptors->total, length, CV_32F);
+
+
+ // copy descriptors
+ CvSeqReader obj_reader;
+ float* obj_ptr = m_object.ptr<float>(0);
+ cvStartReadSeq( objectDescriptors, &obj_reader );
+ for(int i = 0; i < objectDescriptors->total; i++ )
+ {
+ const float* descriptor = (const float*)obj_reader.ptr;
+ CV_NEXT_SEQ_ELEM( obj_reader.seq->elem_size, obj_reader );
+ memcpy(obj_ptr, descriptor, length*sizeof(float));
+ obj_ptr += length;
+ }
+ CvSeqReader img_reader;
+ float* img_ptr = m_image.ptr<float>(0);
+ cvStartReadSeq( imageDescriptors, &img_reader );
+ for(int i = 0; i < imageDescriptors->total; i++ )
+ {
+ const float* descriptor = (const float*)img_reader.ptr;
+ CV_NEXT_SEQ_ELEM( img_reader.seq->elem_size, img_reader );
+ memcpy(img_ptr, descriptor, length*sizeof(float));
+ img_ptr += length;
+ }
-IplImage* image = 0;
+ // find nearest neighbors using FLANN
+ cv::Mat m_indices(objectDescriptors->total, 2, CV_32S);
+ cv::Mat m_dists(objectDescriptors->total, 2, CV_32F);
+ cv::flann::Index flann_index(m_image, cv::flann::KDTreeIndexParams(4)); // using 4 randomized kdtrees
+ flann_index.knnSearch(m_object, m_indices, m_dists, 2, cv::flann::SearchParams(64) ); // maximum number of leafs checked
-double
+ int* indices_ptr = m_indices.ptr<int>(0);
+ float* dists_ptr = m_dists.ptr<float>(0);
+ for (int i=0;i<m_indices.rows;++i) {
+ if (dists_ptr[2*i]<0.6*dists_ptr[2*i+1]) {
+ ptpairs.push_back(i);
+ ptpairs.push_back(indices_ptr[2*i]);
+ }
+ }
+}
+#else
+
+static double
compareSURFDescriptors( const float* d1, const float* d2, double best, int length )
{
double total_cost = 0;
return total_cost;
}
-
-int
+static int
naiveNearestNeighbor( const float* vec, int laplacian,
const CvSeq* model_keypoints,
const CvSeq* model_descriptors )
{
const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
const float* mvec = (const float*)reader.ptr;
- CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
+ CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
if( laplacian != kp->laplacian )
continue;
return -1;
}
-void
+static void
findPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, vector<int>& ptpairs )
{
}
}
}
-
-
-void
-flannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,
- const CvSeq*, const CvSeq* imageDescriptors, vector<int>& ptpairs )
-{
- int length = (int)(objectDescriptors->elem_size/sizeof(float));
-
- cv::Mat m_object(objectDescriptors->total, length, CV_32F);
- cv::Mat m_image(imageDescriptors->total, length, CV_32F);
-
-
- // copy descriptors
- CvSeqReader obj_reader;
- float* obj_ptr = m_object.ptr<float>(0);
- cvStartReadSeq( objectDescriptors, &obj_reader );
- for(int i = 0; i < objectDescriptors->total; i++ )
- {
- const float* descriptor = (const float*)obj_reader.ptr;
- CV_NEXT_SEQ_ELEM( obj_reader.seq->elem_size, obj_reader );
- memcpy(obj_ptr, descriptor, length*sizeof(float));
- obj_ptr += length;
- }
- CvSeqReader img_reader;
- float* img_ptr = m_image.ptr<float>(0);
- cvStartReadSeq( imageDescriptors, &img_reader );
- for(int i = 0; i < imageDescriptors->total; i++ )
- {
- const float* descriptor = (const float*)img_reader.ptr;
- CV_NEXT_SEQ_ELEM( img_reader.seq->elem_size, img_reader );
- memcpy(img_ptr, descriptor, length*sizeof(float));
- img_ptr += length;
- }
-
- // find nearest neighbors using FLANN
- cv::Mat m_indices(objectDescriptors->total, 2, CV_32S);
- cv::Mat m_dists(objectDescriptors->total, 2, CV_32F);
- cv::flann::Index flann_index(m_image, cv::flann::KDTreeIndexParams(4)); // using 4 randomized kdtrees
- flann_index.knnSearch(m_object, m_indices, m_dists, 2, cv::flann::SearchParams(64) ); // maximum number of leafs checked
-
- int* indices_ptr = m_indices.ptr<int>(0);
- float* dists_ptr = m_dists.ptr<float>(0);
- for (int i=0;i<m_indices.rows;++i) {
- if (dists_ptr[2*i]<0.6*dists_ptr[2*i+1]) {
- ptpairs.push_back(i);
- ptpairs.push_back(indices_ptr[2*i]);
- }
- }
-}
-
+#endif
/* a rough implementation for object location */
-int
+static int
locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,
const CvPoint src_corners[4], CvPoint dst_corners[4] )
cvNamedWindow("Object", 1);
cvNamedWindow("Object Correspond", 1);
- static CvScalar colors[] =
+ static CvScalar colors[] =
{
{{0,0,255}},
{{0,128,255}},
using namespace std;
using namespace cv;
-void help()
+static void help()
{
cout << "This program shows the use of the Calonder point descriptor classifier"
"SURF is used to detect interest points, Calonder is used to describe/match these points\n"
/*
* Generates random perspective transform of image
*/
-void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
+static void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
{
H.create(3, 3, CV_32FC1);
H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
*
* To train Calonder classifier RTreeClassifier class need to be used.
*/
-void trainCalonderClassifier( const string& classifierFilename, const string& imgFilename )
+static void trainCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
// Reads train images
ifstream is( imgFilename.c_str(), ifstream::in );
* but it is convenient to use CalonderDescriptorExtractor class which is wrapper of
* RTreeClassifier.
*/
-void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
+static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
Mat img1 = imread( imgFilename, CV_LOAD_IMAGE_GRAYSCALE ), img2, H12;
if( img1.empty() )
#include <stdio.h>
using namespace cv;
-void help()
+static void help()
{
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
"descriptor classifier\n"
#include "opencv2/highgui/highgui.hpp"\r
#include <stdio.h>\r
\r
-#ifdef HAVE_CVCONFIG_H \r
-#include <cvconfig.h> \r
+#ifdef HAVE_CVCONFIG_H\r
+#include <cvconfig.h>\r
#endif\r
#ifdef HAVE_TBB\r
#include "tbb/task_scheduler_init.h"\r
\r
using namespace cv;\r
\r
-void help()\r
+static void help()\r
{\r
- printf( "This program demonstrated the use of the latentSVM detector.\n"\r
- "It reads in a trained object model and then uses that to detect the object in an image\n"\r
- "Call:\n"\r
+ printf( "This program demonstrated the use of the latentSVM detector.\n"\r
+ "It reads in a trained object model and then uses that to detect the object in an image\n"\r
+ "Call:\n"\r
"./latentsvmdetect [<image_filename> <model_filename> [<threads_number>]]\n"\r
- " The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n"\r
- " Press any key to quit.\n");\r
+ " The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n"\r
+ " Press any key to quit.\n");\r
}\r
\r
const char* model_filename = "cat.xml";\r
const char* image_filename = "cat.jpg";\r
int tbbNumThreads = -1;\r
\r
-void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1)\r
+static void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1)\r
{\r
CvMemStorage* storage = cvCreateMemStorage(0);\r
CvSeq* detections = 0;\r
int i = 0;\r
- int64 start = 0, finish = 0;\r
+ int64 start = 0, finish = 0;\r
#ifdef HAVE_TBB\r
tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);\r
- if (numThreads > 0)\r
- {\r
- init.initialize(numThreads);\r
+ if (numThreads > 0)\r
+ {\r
+ init.initialize(numThreads);\r
printf("Number of threads %i\n", numThreads);\r
- }\r
- else\r
- {\r
- printf("Number of threads is not correct for TBB version");\r
- return;\r
- }\r
+ }\r
+ else\r
+ {\r
+ printf("Number of threads is not correct for TBB version");\r
+ return;\r
+ }\r
#endif\r
\r
- start = cvGetTickCount();\r
+ start = cvGetTickCount();\r
detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads);\r
- finish = cvGetTickCount();\r
- printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));\r
+ finish = cvGetTickCount();\r
+ printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));\r
\r
#ifdef HAVE_TBB\r
init.terminate();\r
for( i = 0; i < detections->total; i++ )\r
{\r
CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i );\r
- CvRect bounding_box = detection.rect;\r
+ CvRect bounding_box = detection.rect;\r
cvRectangle( image, cvPoint(bounding_box.x, bounding_box.y),\r
- cvPoint(bounding_box.x + bounding_box.width, \r
- bounding_box.y + bounding_box.height),\r
+ cvPoint(bounding_box.x + bounding_box.width,\r
+ bounding_box.y + bounding_box.height),\r
CV_RGB(255,0,0), 3 );\r
}\r
cvReleaseMemStorage( &storage );\r
\r
int main(int argc, char* argv[])\r
{\r
- help();\r
- if (argc > 2)\r
- {\r
- image_filename = argv[1];\r
- model_filename = argv[2];\r
+ help();\r
+ if (argc > 2)\r
+ {\r
+ image_filename = argv[1];\r
+ model_filename = argv[2];\r
if (argc > 3)\r
{\r
tbbNumThreads = atoi(argv[3]);\r
}\r
- }\r
- IplImage* image = cvLoadImage(image_filename);\r
- if (!image)\r
- {\r
- printf( "Unable to load the image\n"\r
+ }\r
+ IplImage* image = cvLoadImage(image_filename);\r
+ if (!image)\r
+ {\r
+ printf( "Unable to load the image\n"\r
"Pass it as the first parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );\r
- return -1;\r
- }\r
+ return -1;\r
+ }\r
CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_filename);\r
- if (!detector)\r
- {\r
- printf( "Unable to load the model\n"\r
+ if (!detector)\r
+ {\r
+ printf( "Unable to load the model\n"\r
"Pass it as the second parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );\r
- cvReleaseImage( &image );\r
- return -1;\r
- }\r
+ cvReleaseImage( &image );\r
+ return -1;\r
+ }\r
detect_and_draw_objects( image, detector, tbbNumThreads );\r
cvNamedWindow( "test", 0 );\r
cvShowImage( "test", image );\r
cvReleaseLatentSvmDetector( &detector );\r
cvReleaseImage( &image );\r
cvDestroyAllWindows();\r
- \r
- return 0;\r
+\r
+ return 0;\r
}\r
int erode_dilate_pos = 0;
// callback function for open/close trackbar
-void OpenClose(int pos)
+static void OpenClose(int pos)
{
int n = open_close_pos - max_iters;
int an = n > 0 ? n : -n;
}
// callback function for erode/dilate trackbar
-void ErodeDilate(int pos)
+static void ErodeDilate(int pos)
{
int n = erode_dilate_pos - max_iters;
int an = n > 0 ? n : -n;
cvShowImage("Erode/Dilate",dst);
}
-void help()
+static void help(void)
{
printf( "This program demonstrated the use of the morphology operator, especially open, close, erode, dilate operations\n"
"Morphology operators are built on max (close) and min (open) operators as measured by pixels covered by small structuring elements.\n"
#include <stdio.h>
#include <ctype.h>
-void help()
+static void help(void)
{
printf(
"\nThis program demonstrated the use of motion templates -- basically using the gradients\n"
// img - input video frame
// dst - resultant motion picture
// args - optional parameters
-void update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
+static void update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
CvSize size = cvSize(img->width,img->height); // get current frame size
using namespace cv;
using namespace std;
-void help()
+static void help()
{
- cout << "\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
+ cout << "\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
"It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
"\nCall:\n"
"./mser_sample <path_and_image_filename, Default is 'puzzle.png'>\n\n";
}
-static const Vec3b bcolors[] =
+static const Vec3b bcolors[] =
{
Vec3b(0,0,255),
Vec3b(0,128,255),
int main( int argc, char** argv )
{
- string path;
- Mat img0, img, yuv, gray, ellipses;
- help();
-
+ string path;
+ Mat img0, img, yuv, gray, ellipses;
+ help();
+
img0 = imread( argc != 2 ? "puzzle.png" : argv[1], 1 );
if( img0.empty() )
{
cout << "Unable to load image " << argv[1] << endl;
return 0;
}
-
- cvtColor(img0, yuv, COLOR_BGR2YCrCb);
+
+ cvtColor(img0, yuv, COLOR_BGR2YCrCb);
cvtColor(img0, gray, COLOR_BGR2GRAY);
cvtColor(gray, img, COLOR_GRAY2BGR);
img.copyTo(ellipses);
-
+
vector<vector<Point> > contours;
- double t = (double)getTickCount();
+ double t = (double)getTickCount();
MSER()(yuv, contours);
- t = (double)getTickCount() - t;
- printf( "MSER extracted %d contours in %g ms.\n", (int)contours.size(),
+ t = (double)getTickCount() - t;
+ printf( "MSER extracted %d contours in %g ms.\n", (int)contours.size(),
t*1000./getTickFrequency() );
-
- // draw mser's with different colors
- for( int i = (int)contours.size()-1; i >= 0; i-- )
- {
- const vector<Point>& r = contours[i];
- for ( int j = 0; j < (int)r.size(); j++ )
- {
- Point pt = r[j];
+
+ // draw mser's with different colors
+ for( int i = (int)contours.size()-1; i >= 0; i-- )
+ {
+ const vector<Point>& r = contours[i];
+ for ( int j = 0; j < (int)r.size(); j++ )
+ {
+ Point pt = r[j];
img.at<Vec3b>(pt) = bcolors[i%9];
- }
-
+ }
+
// find ellipse (it seems cvfitellipse2 have error or sth?)
RotatedRect box = fitEllipse( r );
-
+
box.angle=(float)CV_PI/2-box.angle;
ellipse( ellipses, box, Scalar(196,255,255), 2 );
- }
-
- imshow( "original", img0 );
- imshow( "response", img );
- imshow( "ellipses", ellipses );
-
- waitKey(0);
+ }
+
+ imshow( "original", img0 );
+ imshow( "response", img );
+ imshow( "ellipses", ellipses );
+
+ waitKey(0);
}
#include "opencv2/ml/ml.hpp"
#include <stdio.h>
-void help()
+static void help()
{
- printf("\nThis program demonstrated the use of OpenCV's decision tree function for learning and predicting data\n"
+ printf("\nThis program demonstrated the use of OpenCV's decision tree function for learning and predicting data\n"
"Usage :\n"
"./mushroom <path to agaricus-lepiota.data>\n"
"\n"
"// the values are encoded by characters.\n\n");
}
-int mushroom_read_database( const char* filename, CvMat** data, CvMat** missing, CvMat** responses )
+static int mushroom_read_database( const char* filename, CvMat** data, CvMat** missing, CvMat** responses )
{
const int M = 1024;
FILE* f = fopen( filename, "rt" );
}
-CvDTree* mushroom_create_dtree( const CvMat* data, const CvMat* missing,
+static CvDTree* mushroom_create_dtree( const CvMat* data, const CvMat* missing,
const CvMat* responses, float p_weight )
{
CvDTree* dtree;
cvSet( var_type, cvScalarAll(CV_VAR_CATEGORICAL) ); // all the variables are categorical
dtree = new CvDTree;
-
+
dtree->train( data, CV_ROW_SAMPLE, responses, 0, 0, var_type, missing,
CvDTreeParams( 8, // max depth
10, // min sample count
};
-void print_variable_importance( CvDTree* dtree, const char** var_desc )
+static void print_variable_importance( CvDTree* dtree )
{
const CvMat* var_importance = dtree->get_var_importance();
int i;
for( i = 0; i < var_importance->cols*var_importance->rows; i++ )
{
double val = var_importance->data.db[i];
- if( var_desc )
- {
- char buf[100];
- int len = (int)(strchr( var_desc[i], '(' ) - var_desc[i] - 1);
- strncpy( buf, var_desc[i], len );
- buf[len] = '\0';
- printf( "%s", buf );
- }
- else
- printf( "var #%d", i );
+ char buf[100];
+ int len = (int)(strchr( var_desc[i], '(' ) - var_desc[i] - 1);
+ strncpy( buf, var_desc[i], len );
+ buf[len] = '\0';
+ printf( "%s", buf );
printf( ": %g%%\n", val*100. );
}
}
-void interactive_classification( CvDTree* dtree, const char** var_desc )
+static void interactive_classification( CvDTree* dtree )
{
char input[1000];
const CvDTreeNode* root;
for(;;)
{
const CvDTreeNode* node;
-
+
printf( "Start/Proceed with interactive mushroom classification (y/n): " );
int values_read = scanf( "%1s", input );
CV_Assert(values_read == 1);
if( input[0] != 'y' && input[0] != 'Y' )
break;
- printf( "Enter 1-letter answers, '?' for missing/unknown value...\n" );
+ printf( "Enter 1-letter answers, '?' for missing/unknown value...\n" );
// custom version of predict
node = root;
{
CvDTreeSplit* split = node->split;
int dir = 0;
-
+
if( !node->left || node->Tn <= dtree->get_pruned_tree_idx() || !node->split )
break;
else
printf( "Error: unrecognized value\n" );
}
-
+
if( !dir )
{
printf( "Impossible to classify the sample\n");
cvReleaseMat( &missing );
cvReleaseMat( &responses );
- print_variable_importance( dtree, var_desc );
- interactive_classification( dtree, var_desc );
+ print_variable_importance( dtree );
+ interactive_classification( dtree );
delete dtree;
return 0;
#include <string>
#include <stdio.h>
-void help()
+static void help()
{
printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
"Correspondences are drawn\n");
#include <ctype.h>
#include <stdio.h>
-void help()
+static void help( void )
{
printf("\nThis program illustrates Linear-Polar and Log-Polar image transforms\n"
"Usage :\n"
#include "opencv2/legacy/legacy.hpp"
#include <stdio.h>
-void help()
+static void help(void)
{
printf("\nThis program demonstrated color pyramid segmentation cvcvPyrSegmentation() which is controlled\n"
"by two trhesholds which can be manipulated by a trackbar. It can take an image file name or defaults to 'fruits.jpg'\n"
CvPoint pt1, pt2;
-void ON_SEGMENT(int a)
+static void ON_SEGMENT(int a)
{
cvPyrSegmentation(image0, image1, storage, &comp,
level, threshold1+1, threshold2+1);
#include <stdio.h>
#include <map>
-void help()
+static void help()
{
- printf(
- "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
- "CvDTree dtree;\n"
- "CvBoost boost;\n"
- "CvRTrees rtrees;\n"
- "CvERTrees ertrees;\n"
- "CvGBTrees gbtrees;\n"
- "Call:\n\t./tree_engine [-r <response_column>] [-c] <csv filename>\n"
+ printf(
+ "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
+ "CvDTree dtree;\n"
+ "CvBoost boost;\n"
+ "CvRTrees rtrees;\n"
+ "CvERTrees ertrees;\n"
+ "CvGBTrees gbtrees;\n"
+ "Call:\n\t./tree_engine [-r <response_column>] [-c] <csv filename>\n"
"where -r <response_column> specified the 0-based index of the response (0 by default)\n"
"-c specifies that the response is categorical (it's ordered by default) and\n"
"<csv filename> is the name of training data file in comma-separated value format\n\n");
}
-int count_classes(CvMLData& data)
+static int count_classes(CvMLData& data)
{
cv::Mat r(data.get_responses());
std::map<int, int> rmap;
int ival = cvRound(val);
if( ival != val )
return -1;
- rmap[ival] = 1;
+ rmap[ival] = 1;
}
return (int)rmap.size();
}
-void print_result(float train_err, float test_err, const CvMat* _var_imp)
+static void print_result(float train_err, float test_err, const CvMat* _var_imp)
{
printf( "train error %f\n", train_err );
printf( "test error %f\n\n", test_err );
-
+
if (_var_imp)
{
cv::Mat var_imp(_var_imp), sorted_idx;
cv::sortIdx(var_imp, sorted_idx, CV_SORT_EVERY_ROW + CV_SORT_DESCENDING);
-
+
printf( "variable importance:\n" );
int i, n = (int)var_imp.total();
int type = var_imp.type();
CV_Assert(type == CV_32F || type == CV_64F);
-
+
for( i = 0; i < n; i++)
{
int k = sorted_idx.at<int>(i);
const char* filename = 0;
int response_idx = 0;
bool categorical_response = false;
-
+
for(int i = 1; i < argc; i++)
{
if(strcmp(argv[i], "-r") == 0)
return -1;
}
}
-
+
printf("\nReading in %s...\n\n",filename);
CvDTree dtree;
CvBoost boost;
CvRTrees rtrees;
CvERTrees ertrees;
- CvGBTrees gbtrees;
+ CvGBTrees gbtrees;
CvMLData data;
-
+
CvTrainTestSplit spl( 0.5f );
-
+
if ( data.read_csv( filename ) == 0)
{
data.set_response_idx( response_idx );
if(categorical_response)
data.change_var_type( response_idx, CV_VAR_CATEGORICAL );
data.set_train_test_split( &spl );
-
+
printf("======DTREE=====\n");
dtree.train( &data, CvDTreeParams( 10, 2, 0, false, 16, 0, false, false, 0 ));
print_result( dtree.calc_error( &data, CV_TRAIN_ERROR), dtree.calc_error( &data, CV_TEST_ERROR ), dtree.get_var_importance() );
print_result( ertrees.calc_error( &data, CV_TRAIN_ERROR), ertrees.calc_error( &data, CV_TEST_ERROR ), ertrees.get_var_importance() );
printf("======GBTREES=====\n");
- if (categorical_response)
- gbtrees.train( &data, CvGBTreesParams(CvGBTrees::DEVIANCE_LOSS, 100, 0.1f, 0.8f, 5, false));
- else
- gbtrees.train( &data, CvGBTreesParams(CvGBTrees::SQUARED_LOSS, 100, 0.1f, 0.8f, 5, false));
+ if (categorical_response)
+ gbtrees.train( &data, CvGBTreesParams(CvGBTrees::DEVIANCE_LOSS, 100, 0.1f, 0.8f, 5, false));
+ else
+ gbtrees.train( &data, CvGBTreesParams(CvGBTrees::SQUARED_LOSS, 100, 0.1f, 0.8f, 5, false));
print_result( gbtrees.calc_error( &data, CV_TRAIN_ERROR), gbtrees.calc_error( &data, CV_TEST_ERROR ), 0 ); //doesn't compute importance
}
else
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
-void help()
+static void help()
{
printf( "\nThis is a camera calibration sample that calibrates 3 horizontally placed cameras together.\n"
"Usage: 3calibration\n"
static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
-
+
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
vector<vector<Point2f> > imagePoints2,
- vector<vector<Point2f> > imagePoints3,
+ vector<vector<Point2f> > imagePoints3,
Size imageSize, Size boardSize,
float squareSize, float aspectRatio,
int flags,
Mat& R12, Mat& T12, Mat& R13, Mat& T13)
{
int c, i;
-
+
// step 1: calibrate each camera individually
vector<vector<Point3f> > objpt(1);
vector<vector<Point2f> > imgpt;
calcChessboardCorners(boardSize, squareSize, objpt[0]);
vector<Mat> rvecs, tvecs;
-
+
for( c = 1; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 1 ? imagePoints1 : c == 2 ? imagePoints2 : imagePoints3;
imgpt.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
-
+
if( imgpt.size() < 3 )
{
printf("Error: not enough views for camera %d\n", c);
}
objpt.resize(imgpt.size(),objpt[0]);
-
+
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
-
+
Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
-
+
double err = calibrateCamera(objpt, imgpt, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs,
flags|CV_CALIB_FIX_K3/*|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5|CV_CALIB_FIX_K6*/);
return false;
}
printf("Camera %d calibration reprojection error = %g\n", c, sqrt(err/N));
-
+
if( c == 1 )
cameraMatrix1 = cameraMatrix, distCoeffs1 = distCoeffs;
else if( c == 2 )
else
cameraMatrix3 = cameraMatrix, distCoeffs3 = distCoeffs;
}
-
+
vector<vector<Point2f> > imgpt_right;
-
+
// step 2: calibrate (1,2) and (3,2) pairs
for( c = 2; c <= 3; c++ )
{
const vector<vector<Point2f> >& imgpt0 = c == 2 ? imagePoints2 : imagePoints3;
-
+
imgpt.clear();
imgpt_right.clear();
int N = 0;
-
+
for( i = 0; i < (int)std::min(imagePoints1.size(), imgpt0.size()); i++ )
if( !imagePoints1.empty() && !imgpt0[i].empty() )
{
imgpt_right.push_back(imgpt0[i]);
N += (int)imgpt0[i].size();
}
-
+
if( imgpt.size() < 3 )
{
printf("Error: not enough shared views for cameras 1 and %d\n", c);
return false;
}
-
+
objpt.resize(imgpt.size(),objpt[0]);
Mat cameraMatrix = c == 2 ? cameraMatrix2 : cameraMatrix3;
Mat distCoeffs = c == 2 ? distCoeffs2 : distCoeffs3;
R13 = R; T13 = T;
}
}
-
+
return true;
}
float squareSize = 1.f, aspectRatio = 1.f;
const char* outputFilename = "out_camera_data.yml";
const char* inputFilename = 0;
-
+
vector<vector<Point2f> > imgpt[3];
vector<string> imageList;
-
+
if(argc < 2)
{
- help();
- return 1;
+ help();
+ return 1;
}
-
+
for( i = 1; i < argc; i++ )
{
const char* s = argv[i];
else if( s[0] != '-' )
{
inputFilename = s;
- }
+ }
else
return fprintf( stderr, "Unknown option %s", s ), -1;
}
-
+
if( !inputFilename ||
!readStringList(inputFilename, imageList) ||
imageList.size() == 0 || imageList.size() % 3 != 0 )
printf("Error: the input image list is not specified, or can not be read, or the number of files is not divisible by 3\n");
return -1;
}
-
+
Mat view, viewGray;
Mat cameraMatrix[3], distCoeffs[3], R[3], P[3], R12, T12;
for( k = 0; k < 3; k++ )
distCoeffs[k] = Mat_<double>::zeros(5,1);
}
Mat R13=Mat_<double>::eye(3,3), T13=Mat_<double>::zeros(3,1);
-
+
FileStorage fs;
namedWindow( "Image View", 0 );
-
+
for( k = 0; k < 3; k++ )
imgpt[k].resize(imageList.size()/3);
-
+
for( i = 0; i < (int)(imageList.size()/3); i++ )
{
for( k = 0; k < 3; k++ )
int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
printf("%s\n", imageList[i*3+k].c_str());
view = imread(imageList[i*3+k], 1);
-
+
if(view.data)
{
vector<Point2f> ptvec;
imageSize = view.size();
cvtColor(view, viewGray, CV_BGR2GRAY);
bool found = findChessboardCorners( view, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH );
-
+
drawChessboardCorners( view, boardSize, Mat(ptvec), found );
if( found )
{
}
}
}
-
+
printf("Running calibration ...\n");
-
+
run3Calibration(imgpt[0], imgpt[1], imgpt[2], imageSize,
boardSize, squareSize, aspectRatio, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5,
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
cameraMatrix[2], distCoeffs[2],
R12, T12, R13, T13);
-
+
fs.open(outputFilename, CV_STORAGE_WRITE);
-
+
fs << "cameraMatrix1" << cameraMatrix[0];
fs << "cameraMatrix2" << cameraMatrix[1];
fs << "cameraMatrix3" << cameraMatrix[2];
-
+
fs << "distCoeffs1" << distCoeffs[0];
fs << "distCoeffs2" << distCoeffs[1];
fs << "distCoeffs3" << distCoeffs[2];
-
+
fs << "R12" << R12;
fs << "T12" << T12;
fs << "R13" << R13;
fs << "T13" << T13;
-
+
fs << "imageWidth" << imageSize.width;
fs << "imageHeight" << imageSize.height;
-
+
Mat Q;
-
+
// step 3: find rectification transforms
double ratio = rectify3Collinear(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
distCoeffs[1], cameraMatrix[2], distCoeffs[2],
R[0], R[1], R[2], P[0], P[1], P[2], Q, -1.,
imageSize, 0, 0, CV_CALIB_ZERO_DISPARITY);
Mat map1[3], map2[3];
-
+
fs << "R1" << R[0];
fs << "R2" << R[1];
fs << "R3" << R[2];
-
+
fs << "P1" << P[0];
fs << "P2" << P[1];
fs << "P3" << P[2];
-
+
fs << "disparityRatio" << ratio;
fs.release();
-
+
printf("Disparity ratio = %g\n", ratio);
-
+
for( k = 0; k < 3; k++ )
initUndistortRectifyMap(cameraMatrix[k], distCoeffs[k], R[k], P[k], imageSize, CV_16SC2, map1[k], map2[k]);
-
+
Mat canvas(imageSize.height, imageSize.width*3, CV_8UC3), small_canvas;
destroyWindow("view");
canvas = Scalar::all(0);
-
+
for( i = 0; i < (int)(imageList.size()/3); i++ )
{
canvas = Scalar::all(0);
int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
int k2 = k == 0 ? 1 : k == 1 ? 0 : 2;
view = imread(imageList[i*3+k], 1);
-
+
if(!view.data)
continue;
-
+
Mat rview = canvas.colRange(k2*imageSize.width, (k2+1)*imageSize.width);
remap(view, rview, map1[k1], map2[k1], CV_INTER_LINEAR);
}
if( c == 27 || c == 'q' || c == 'Q' )
break;
}
-
+
return 0;
}
#include "opencv2/opencv.hpp"
-void help(std::string errorMessage)
+static void help(std::string errorMessage)
{
- std::cout<<"Program init error : "<<errorMessage<<std::endl;
- std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image to process]"<<std::endl;
- std::cout<<"\t[OpenEXR image to process] : the input HDR image to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
- std::cout<<"\nExamples:"<<std::endl;
- std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping memorial.exr"<<std::endl;
+ std::cout<<"Program init error : "<<errorMessage<<std::endl;
+ std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image to process]"<<std::endl;
+ std::cout<<"\t[OpenEXR image to process] : the input HDR image to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
+ std::cout<<"\nExamples:"<<std::endl;
+ std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping memorial.exr"<<std::endl;
}
// simple procedure for 1D curve tracing
-void drawPlot(const cv::Mat curve, const std::string figureTitle, const int lowerLimit, const int upperLimit)
+static void drawPlot(const cv::Mat curve, const std::string figureTitle, const int lowerLimit, const int upperLimit)
{
- //std::cout<<"curve size(h,w) = "<<curve.size().height<<", "<<curve.size().width<<std::endl;
- cv::Mat displayedCurveImage = cv::Mat::ones(200, curve.size().height, CV_8U);
-
- cv::Mat windowNormalizedCurve;
- normalize(curve, windowNormalizedCurve, 0, 200, CV_MINMAX, CV_32F);
-
- displayedCurveImage = cv::Scalar::all(255); // set a white background
- int binW = cvRound((double)displayedCurveImage.cols/curve.size().height);
-
- for( int i = 0; i < curve.size().height; i++ )
- rectangle( displayedCurveImage, cv::Point(i*binW, displayedCurveImage.rows),
- cv::Point((i+1)*binW, displayedCurveImage.rows - cvRound(windowNormalizedCurve.at<float>(i))),
- cv::Scalar::all(0), -1, 8, 0 );
- rectangle( displayedCurveImage, cv::Point(0, 0),
- cv::Point((lowerLimit)*binW, 200),
- cv::Scalar::all(128), -1, 8, 0 );
- rectangle( displayedCurveImage, cv::Point(displayedCurveImage.cols, 0),
- cv::Point((upperLimit)*binW, 200),
- cv::Scalar::all(128), -1, 8, 0 );
-
- cv::imshow(figureTitle, displayedCurveImage);
+ //std::cout<<"curve size(h,w) = "<<curve.size().height<<", "<<curve.size().width<<std::endl;
+ cv::Mat displayedCurveImage = cv::Mat::ones(200, curve.size().height, CV_8U);
+
+ cv::Mat windowNormalizedCurve;
+ normalize(curve, windowNormalizedCurve, 0, 200, CV_MINMAX, CV_32F);
+
+ displayedCurveImage = cv::Scalar::all(255); // set a white background
+ int binW = cvRound((double)displayedCurveImage.cols/curve.size().height);
+
+ for( int i = 0; i < curve.size().height; i++ )
+ rectangle( displayedCurveImage, cv::Point(i*binW, displayedCurveImage.rows),
+ cv::Point((i+1)*binW, displayedCurveImage.rows - cvRound(windowNormalizedCurve.at<float>(i))),
+ cv::Scalar::all(0), -1, 8, 0 );
+ rectangle( displayedCurveImage, cv::Point(0, 0),
+ cv::Point((lowerLimit)*binW, 200),
+ cv::Scalar::all(128), -1, 8, 0 );
+ rectangle( displayedCurveImage, cv::Point(displayedCurveImage.cols, 0),
+ cv::Point((upperLimit)*binW, 200),
+ cv::Scalar::all(128), -1, 8, 0 );
+
+ cv::imshow(figureTitle, displayedCurveImage);
}
/*
* objective : get the gray level map of the input image and rescale it to the range [0-255]
- */void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit)
+ */
+ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit)
{
- // adjust output matrix wrt the input size but single channel
- std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
- //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
- //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;
-
- // rescale between 0-255, keeping floating point values
- cv::normalize(inputMat, outputMat, 0.0, 255.0, cv::NORM_MINMAX);
-
- // extract a 8bit image that will be used for histogram edge cut
- cv::Mat intGrayImage;
- if (inputMat.channels()==1)
- {
- outputMat.convertTo(intGrayImage, CV_8U);
- }else
- {
- cv::Mat rgbIntImg;
- outputMat.convertTo(rgbIntImg, CV_8UC3);
- cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
- }
-
- // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
- cv::Mat dst, hist;
- int histSize = 256;
- calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
- cv::Mat normalizedHist;
- normalize(hist, normalizedHist, 1, 0, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1
-
- double min_val, max_val;
- CvMat histArr(normalizedHist);
- cvMinMaxLoc(&histArr, &min_val, &max_val);
- //std::cout<<"Hist max,min = "<<max_val<<", "<<min_val<<std::endl;
-
- // compute density probability
- cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
- denseProb.at<float>(0)=normalizedHist.at<float>(0);
- int histLowerLimit=0, histUpperLimit=0;
- for (int i=1;i<normalizedHist.size().height;++i)
- {
- denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
- //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
- if ( denseProb.at<float>(i)<histogramClippingLimit)
- histLowerLimit=i;
- if ( denseProb.at<float>(i)<1-histogramClippingLimit)
- histUpperLimit=i;
- }
- // deduce min and max admitted gray levels
- float minInputValue = (float)histLowerLimit/histSize*255;
- float maxInputValue = (float)histUpperLimit/histSize*255;
-
- std::cout<<"=> Histogram limits "
- <<"\n\t"<<histogramClippingLimit*100<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
- <<"\n\t"<<(1-histogramClippingLimit)*100<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
- <<std::endl;
- //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
- drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);
-
- // rescale image range [minInputValue-maxInputValue] to [0-255]
- outputMat-=minInputValue;
- outputMat*=255.0/(maxInputValue-minInputValue);
- // cut original histogram and back project to original image
- cv::threshold( outputMat, outputMat, 255.0, 255.0, 2 ); //THRESH_TRUNC, clips values above 255
- cv::threshold( outputMat, outputMat, 0.0, 0.0, 3 ); //THRESH_TOZERO, clips values under 0
+ // adjust output matrix wrt the input size but single channel
+ std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
+ //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
+ //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;
+
+ // rescale between 0-255, keeping floating point values
+ cv::normalize(inputMat, outputMat, 0.0, 255.0, cv::NORM_MINMAX);
+
+ // extract a 8bit image that will be used for histogram edge cut
+ cv::Mat intGrayImage;
+ if (inputMat.channels()==1)
+ {
+ outputMat.convertTo(intGrayImage, CV_8U);
+ }else
+ {
+ cv::Mat rgbIntImg;
+ outputMat.convertTo(rgbIntImg, CV_8UC3);
+ cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
+ }
+
+ // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
+ cv::Mat dst, hist;
+ int histSize = 256;
+ calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
+ cv::Mat normalizedHist;
+ normalize(hist, normalizedHist, 1, 0, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1
+
+ double min_val, max_val;
+ CvMat histArr(normalizedHist);
+ cvMinMaxLoc(&histArr, &min_val, &max_val);
+ //std::cout<<"Hist max,min = "<<max_val<<", "<<min_val<<std::endl;
+
+ // compute density probability
+ cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
+ denseProb.at<float>(0)=normalizedHist.at<float>(0);
+ int histLowerLimit=0, histUpperLimit=0;
+ for (int i=1;i<normalizedHist.size().height;++i)
+ {
+ denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
+ //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
+ if ( denseProb.at<float>(i)<histogramClippingLimit)
+ histLowerLimit=i;
+ if ( denseProb.at<float>(i)<1-histogramClippingLimit)
+ histUpperLimit=i;
+ }
+ // deduce min and max admitted gray levels
+ float minInputValue = (float)histLowerLimit/histSize*255;
+ float maxInputValue = (float)histUpperLimit/histSize*255;
+
+ std::cout<<"=> Histogram limits "
+ <<"\n\t"<<histogramClippingLimit*100<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
+ <<"\n\t"<<(1-histogramClippingLimit)*100<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
+ <<std::endl;
+ //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
+ drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);
+
+ // rescale image range [minInputValue-maxInputValue] to [0-255]
+ outputMat-=minInputValue;
+ outputMat*=255.0/(maxInputValue-minInputValue);
+ // cut original histogram and back project to original image
+ cv::threshold( outputMat, outputMat, 255.0, 255.0, 2 ); //THRESH_TRUNC, clips values above 255
+ cv::threshold( outputMat, outputMat, 0.0, 0.0, 3 ); //THRESH_TOZERO, clips values under 0
}
// basic callback method for interface management
cv::Mat inputImage;
cv::Mat imageInputRescaled;
int histogramClippingValue;
- void callBack_rescaleGrayLevelMat(int, void*)
+ static void callBack_rescaleGrayLevelMat(int, void*)
{
- std::cout<<"Histogram clipping value changed, current value = "<<histogramClippingValue<<std::endl;
- rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)(histogramClippingValue/100.0));
- normalize(imageInputRescaled, imageInputRescaled, 0.0, 255.0, cv::NORM_MINMAX);
+ std::cout<<"Histogram clipping value changed, current value = "<<histogramClippingValue<<std::endl;
+ rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)(histogramClippingValue/100.0));
+ normalize(imageInputRescaled, imageInputRescaled, 0.0, 255.0, cv::NORM_MINMAX);
}
cv::Ptr<cv::Retina> retina;
int retinaHcellsGain;
int localAdaptation_photoreceptors, localAdaptation_Gcells;
- void callBack_updateRetinaParams(int, void*)
+ static void callBack_updateRetinaParams(int, void*)
{
- retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
+ retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
}
int colorSaturationFactor;
- void callback_saturateColors(int, void*)
+ static void callback_saturateColors(int, void*)
{
- retina->setColorSaturation(true, (float)colorSaturationFactor);
+ retina->setColorSaturation(true, (float)colorSaturationFactor);
}
int main(int argc, char* argv[]) {
- // welcome message
- std::cout<<"*********************************************************************************"<<std::endl;
- std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
- std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
- std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
- std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
- std::cout<<"* The retina model still have the following properties:"<<std::endl;
- std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
- std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
- std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
- std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
- std::cout<<"* for more information, reer to the following papers :"<<std::endl;
- std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
- std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
- std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
- std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
- std::cout<<"*********************************************************************************"<<std::endl;
- std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
- std::cout<<"*********************************************************************************"<<std::endl;
- std::cout<<"*** You can use free tools to generate OpenEXR images from images sets : ***"<<std::endl;
- std::cout<<"*** => 1. take a set of photos from the same viewpoint using bracketing ***"<<std::endl;
- std::cout<<"*** => 2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
- std::cout<<"*** => 3. apply tone mapping with this program ***"<<std::endl;
- std::cout<<"*********************************************************************************"<<std::endl;
-
- // basic input arguments checking
- if (argc<2)
- {
- help("bad number of parameter");
- return -1;
- }
-
- bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
-
- std::string inputImageName=argv[1];
-
- //////////////////////////////////////////////////////////////////////////////
- // checking input media type (still image, video file, live video acquisition)
- std::cout<<"RetinaDemo: processing image "<<inputImageName<<std::endl;
- // image processing case
- // declare the retina input buffer... that will be fed differently in regard of the input media
- inputImage = cv::imread(inputImageName, -1); // load image in RGB mode
- std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
- if (!inputImage.total())
- {
- help("could not load image, program end");
- return -1;
+ // welcome message
+ std::cout<<"*********************************************************************************"<<std::endl;
+ std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
+ std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
+ std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
+ std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
+ std::cout<<"* The retina model still have the following properties:"<<std::endl;
+ std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
+ std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
+ std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
+ std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
+ std::cout<<"* for more information, reer to the following papers :"<<std::endl;
+ std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
+ std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
+ std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
+ std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
+ std::cout<<"*********************************************************************************"<<std::endl;
+ std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
+ std::cout<<"*********************************************************************************"<<std::endl;
+ std::cout<<"*** You can use free tools to generate OpenEXR images from images sets : ***"<<std::endl;
+ std::cout<<"*** => 1. take a set of photos from the same viewpoint using bracketing ***"<<std::endl;
+ std::cout<<"*** => 2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
+ std::cout<<"*** => 3. apply tone mapping with this program ***"<<std::endl;
+ std::cout<<"*********************************************************************************"<<std::endl;
+
+ // basic input arguments checking
+ if (argc<2)
+ {
+ help("bad number of parameter");
+ return -1;
+ }
+
+ bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
+
+ std::string inputImageName=argv[1];
+
+ //////////////////////////////////////////////////////////////////////////////
+ // checking input media type (still image, video file, live video acquisition)
+ std::cout<<"RetinaDemo: processing image "<<inputImageName<<std::endl;
+ // image processing case
+ // declare the retina input buffer... that will be fed differently in regard of the input media
+ inputImage = cv::imread(inputImageName, -1); // load image in RGB mode
+ std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
+ if (!inputImage.total())
+ {
+ help("could not load image, program end");
+ return -1;
}
- // rescale between 0 and 1
- normalize(inputImage, inputImage, 0.0, 1.0, cv::NORM_MINMAX);
- cv::Mat gammaTransformedImage;
- cv::pow(inputImage, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
- imshow("EXR image original image, 16bits=>8bits linear rescaling ", inputImage);
- imshow("EXR image with basic processing : 16bits=>8bits with gamma correction", gammaTransformedImage);
- if (inputImage.empty())
- {
- help("Input image could not be loaded, aborting");
- return -1;
- }
-
- //////////////////////////////////////////////////////////////////////////////
- // Program start in a try/catch safety context (Retina may throw errors)
- try
- {
- /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
- * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
- */
- if (useLogSampling)
+ // rescale between 0 and 1
+ normalize(inputImage, inputImage, 0.0, 1.0, cv::NORM_MINMAX);
+ cv::Mat gammaTransformedImage;
+ cv::pow(inputImage, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
+ imshow("EXR image original image, 16bits=>8bits linear rescaling ", inputImage);
+ imshow("EXR image with basic processing : 16bits=>8bits with gamma correction", gammaTransformedImage);
+ if (inputImage.empty())
+ {
+ help("Input image could not be loaded, aborting");
+ return -1;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Program start in a try/catch safety context (Retina may throw errors)
+ try
+ {
+ /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
+ * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
+ */
+ if (useLogSampling)
{
retina = new cv::Retina(inputImage.size(),true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
- else// -> else allocate "classical" retina :
- retina = new cv::Retina(inputImage.size());
-
- // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
- retina->write("RetinaDefaultParameters.xml");
+ else// -> else allocate "classical" retina :
+ retina = new cv::Retina(inputImage.size());
+
+ // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
+ retina->write("RetinaDefaultParameters.xml");
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
retina->activateMovingContoursProcessing(false);
- // declare retina output buffers
- cv::Mat retinaOutput_parvo;
-
- /////////////////////////////////////////////
- // prepare displays and interactions
- histogramClippingValue=0; // default value... updated with interface slider
- //inputRescaleMat = inputImage;
- //outputRescaleMat = imageInputRescaled;
- cv::namedWindow("Retina input image (with cut edges histogram for basic pixels error avoidance)",1);
- cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
-
- cv::namedWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", 1);
- colorSaturationFactor=3;
- cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
-
- retinaHcellsGain=40;
- cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
-
- localAdaptation_photoreceptors=197;
- localAdaptation_Gcells=190;
- cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
- cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
-
-
- /////////////////////////////////////////////
- // apply default parameters of user interaction variables
- rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100);
- retina->setColorSaturation(true,(float)colorSaturationFactor);
- callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
-
- // processing loop with stop condition
- bool continueProcessing=true;
- while(continueProcessing)
- {
- // run retina filter
- retina->run(imageInputRescaled);
- // Retrieve and display retina output
- retina->getParvo(retinaOutput_parvo);
- cv::imshow("Retina input image (with cut edges histogram for basic pixels error avoidance)", imageInputRescaled/255.0);
- cv::imshow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", retinaOutput_parvo);
- cv::waitKey(10);
- }
- }catch(cv::Exception e)
- {
- std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
- }
-
- // Program end message
- std::cout<<"Retina demo end"<<std::endl;
-
- return 0;
+ // declare retina output buffers
+ cv::Mat retinaOutput_parvo;
+
+ /////////////////////////////////////////////
+ // prepare displays and interactions
+ histogramClippingValue=0; // default value... updated with interface slider
+ //inputRescaleMat = inputImage;
+ //outputRescaleMat = imageInputRescaled;
+ cv::namedWindow("Retina input image (with cut edges histogram for basic pixels error avoidance)",1);
+ cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
+
+ cv::namedWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", 1);
+ colorSaturationFactor=3;
+ cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
+
+ retinaHcellsGain=40;
+ cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
+
+ localAdaptation_photoreceptors=197;
+ localAdaptation_Gcells=190;
+ cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
+ cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
+
+
+ /////////////////////////////////////////////
+ // apply default parameters of user interaction variables
+ rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100);
+ retina->setColorSaturation(true,(float)colorSaturationFactor);
+ callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
+
+ // processing loop with stop condition
+ bool continueProcessing=true;
+ while(continueProcessing)
+ {
+ // run retina filter
+ retina->run(imageInputRescaled);
+ // Retrieve and display retina output
+ retina->getParvo(retinaOutput_parvo);
+ cv::imshow("Retina input image (with cut edges histogram for basic pixels error avoidance)", imageInputRescaled/255.0);
+ cv::imshow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", retinaOutput_parvo);
+ cv::waitKey(10);
+ }
+ }catch(cv::Exception e)
+ {
+ std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
+ }
+
+ // Program end message
+ std::cout<<"Retina demo end"<<std::endl;
+
+ return 0;
}
// Description : HighDynamicRange compression (tone mapping) for image sequences with the help of the Gipsa/Listic's retina in C++, Ansi-style
// Known issues: the input OpenEXR sequences can have bad computed pixels that should be removed
// => a simple method consists of cutting histogram edges (a slider for this on the UI is provided)
-// => however, in image sequences, this histogramm cut must be done in an elegant way from frame to frame... still not done...
+// => however, in image sequences, this histogramm cut must be done in an elegant way from frame to frame... still not done...
//============================================================================
#include <iostream>
#include "opencv2/opencv.hpp"
-void help(std::string errorMessage)
+static void help(std::string errorMessage)
{
- std::cout<<"Program init error : "<<errorMessage<<std::endl;
- std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image sequence to process] [OPTIONNAL start frame] [OPTIONNAL end frame]"<<std::endl;
- std::cout<<"\t[OpenEXR image sequence to process] : std::sprintf style ready prototype filename of the input HDR images to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
- std::cout<<"\t\t => WARNING : image index number of digits cannot exceed 10"<<std::endl;
- std::cout<<"\t[start frame] : the starting frame tat should be considered"<<std::endl;
- std::cout<<"\t[end frame] : the ending frame tat should be considered"<<std::endl;
- std::cout<<"\nExamples:"<<std::endl;
- std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45"<<std::endl;
- std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45 log"<<std::endl;
- std::cout<<"\t ==> to process images from memorial020d.exr to memorial045d.exr"<<std::endl;
+ std::cout<<"Program init error : "<<errorMessage<<std::endl;
+ std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image sequence to process] [OPTIONNAL start frame] [OPTIONNAL end frame]"<<std::endl;
+ std::cout<<"\t[OpenEXR image sequence to process] : std::sprintf style ready prototype filename of the input HDR images to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
+ std::cout<<"\t\t => WARNING : image index number of digits cannot exceed 10"<<std::endl;
+ std::cout<<"\t[start frame] : the starting frame tat should be considered"<<std::endl;
+ std::cout<<"\t[end frame] : the ending frame tat should be considered"<<std::endl;
+ std::cout<<"\nExamples:"<<std::endl;
+ std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45"<<std::endl;
+ std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45 log"<<std::endl;
+ std::cout<<"\t ==> to process images from memorial020d.exr to memorial045d.exr"<<std::endl;
}
// simple procedure for 1D curve tracing
-void drawPlot(const cv::Mat curve, const std::string figureTitle, const int lowerLimit, const int upperLimit)
+static void drawPlot(const cv::Mat curve, const std::string figureTitle, const int lowerLimit, const int upperLimit)
{
- //std::cout<<"curve size(h,w) = "<<curve.size().height<<", "<<curve.size().width<<std::endl;
- cv::Mat displayedCurveImage = cv::Mat::ones(200, curve.size().height, CV_8U);
-
- cv::Mat windowNormalizedCurve;
- normalize(curve, windowNormalizedCurve, 0, 200, CV_MINMAX, CV_32F);
-
- displayedCurveImage = cv::Scalar::all(255); // set a white background
- int binW = cvRound((double)displayedCurveImage.cols/curve.size().height);
-
- for( int i = 0; i < curve.size().height; i++ )
- rectangle( displayedCurveImage, cv::Point(i*binW, displayedCurveImage.rows),
- cv::Point((i+1)*binW, displayedCurveImage.rows - cvRound(windowNormalizedCurve.at<float>(i))),
- cv::Scalar::all(0), -1, 8, 0 );
- rectangle( displayedCurveImage, cv::Point(0, 0),
- cv::Point((lowerLimit)*binW, 200),
- cv::Scalar::all(128), -1, 8, 0 );
- rectangle( displayedCurveImage, cv::Point(displayedCurveImage.cols, 0),
- cv::Point((upperLimit)*binW, 200),
- cv::Scalar::all(128), -1, 8, 0 );
-
- cv::imshow(figureTitle, displayedCurveImage);
+ //std::cout<<"curve size(h,w) = "<<curve.size().height<<", "<<curve.size().width<<std::endl;
+ cv::Mat displayedCurveImage = cv::Mat::ones(200, curve.size().height, CV_8U);
+
+ cv::Mat windowNormalizedCurve;
+ normalize(curve, windowNormalizedCurve, 0, 200, CV_MINMAX, CV_32F);
+
+ displayedCurveImage = cv::Scalar::all(255); // set a white background
+ int binW = cvRound((double)displayedCurveImage.cols/curve.size().height);
+
+ for( int i = 0; i < curve.size().height; i++ )
+ rectangle( displayedCurveImage, cv::Point(i*binW, displayedCurveImage.rows),
+ cv::Point((i+1)*binW, displayedCurveImage.rows - cvRound(windowNormalizedCurve.at<float>(i))),
+ cv::Scalar::all(0), -1, 8, 0 );
+ rectangle( displayedCurveImage, cv::Point(0, 0),
+ cv::Point((lowerLimit)*binW, 200),
+ cv::Scalar::all(128), -1, 8, 0 );
+ rectangle( displayedCurveImage, cv::Point(displayedCurveImage.cols, 0),
+ cv::Point((upperLimit)*binW, 200),
+ cv::Scalar::all(128), -1, 8, 0 );
+
+ cv::imshow(figureTitle, displayedCurveImage);
}
/*
* objective : get the gray level map of the input image and rescale it to the range [0-255] if rescale0_255=TRUE, simply trunks else
*/
-void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit, const bool rescale0_255)
+static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit, const bool rescale0_255)
{
- // adjust output matrix wrt the input size but single channel
- std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
- //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
- //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;
-
- // get min and max values to use afterwards if no 0-255 rescaling is used
- double maxInput, minInput, histNormRescalefactor=1.f;
- double histNormOffset=0.f;
- minMaxLoc(inputMat, &minInput, &maxInput);
- histNormRescalefactor=255.f/(maxInput-minInput);
- histNormOffset=minInput;
- std::cout<<"Hist max,min = "<<maxInput<<", "<<minInput<<" => scale, offset = "<<histNormRescalefactor<<", "<<histNormOffset<<std::endl;
- // rescale between 0-255, keeping floating point values
- cv::Mat normalisedImage;
- cv::normalize(inputMat, normalisedImage, 0.f, 255.f, cv::NORM_MINMAX);
- if (rescale0_255)
- normalisedImage.copyTo(outputMat);
- // extract a 8bit image that will be used for histogram edge cut
- cv::Mat intGrayImage;
- if (inputMat.channels()==1)
- {
- normalisedImage.convertTo(intGrayImage, CV_8U);
- }else
- {
- cv::Mat rgbIntImg;
- normalisedImage.convertTo(rgbIntImg, CV_8UC3);
- cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
- }
-
- // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
- cv::Mat dst, hist;
- int histSize = 256;
- calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
- cv::Mat normalizedHist;
-
- normalize(hist, normalizedHist, 1.f, 0.f, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1
-
- // compute density probability
- cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
- denseProb.at<float>(0)=normalizedHist.at<float>(0);
- int histLowerLimit=0, histUpperLimit=0;
- for (int i=1;i<normalizedHist.size().height;++i)
- {
- denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
- //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
- if ( denseProb.at<float>(i)<histogramClippingLimit)
- histLowerLimit=i;
- if ( denseProb.at<float>(i)<1.f-histogramClippingLimit)
- histUpperLimit=i;
- }
- // deduce min and max admitted gray levels
- float minInputValue = (float)histLowerLimit/histSize*255.f;
- float maxInputValue = (float)histUpperLimit/histSize*255.f;
-
- std::cout<<"=> Histogram limits "
- <<"\n\t"<<histogramClippingLimit*100.f<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
- <<"\n\t"<<(1.f-histogramClippingLimit)*100.f<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
- <<std::endl;
- //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
- drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);
-
- if(rescale0_255) // rescale between 0-255 if asked to
- {
- cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue
- cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //THRESH_TOZERO, clips values under minInputValue
- // rescale image range [minInputValue-maxInputValue] to [0-255]
- outputMat-=minInputValue;
- outputMat*=255.f/(maxInputValue-minInputValue);
- }else
- {
- inputMat.copyTo(outputMat);
- // update threshold in the initial input image range
- maxInputValue=(float)((maxInputValue-255.f)/histNormRescalefactor+maxInput);
- minInputValue=(float)(minInputValue/histNormRescalefactor+minInput);
- std::cout<<"===> Input Hist clipping values (max,min) = "<<maxInputValue<<", "<<minInputValue<<std::endl;
- cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue
- cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //
- }
+ // adjust output matrix wrt the input size but single channel
+ std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
+ //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
+ //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;
+
+ // get min and max values to use afterwards if no 0-255 rescaling is used
+ double maxInput, minInput, histNormRescalefactor=1.f;
+ double histNormOffset=0.f;
+ minMaxLoc(inputMat, &minInput, &maxInput);
+ histNormRescalefactor=255.f/(maxInput-minInput);
+ histNormOffset=minInput;
+ std::cout<<"Hist max,min = "<<maxInput<<", "<<minInput<<" => scale, offset = "<<histNormRescalefactor<<", "<<histNormOffset<<std::endl;
+ // rescale between 0-255, keeping floating point values
+ cv::Mat normalisedImage;
+ cv::normalize(inputMat, normalisedImage, 0.f, 255.f, cv::NORM_MINMAX);
+ if (rescale0_255)
+ normalisedImage.copyTo(outputMat);
+ // extract a 8bit image that will be used for histogram edge cut
+ cv::Mat intGrayImage;
+ if (inputMat.channels()==1)
+ {
+ normalisedImage.convertTo(intGrayImage, CV_8U);
+ }else
+ {
+ cv::Mat rgbIntImg;
+ normalisedImage.convertTo(rgbIntImg, CV_8UC3);
+ cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
+ }
+
+ // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
+ cv::Mat dst, hist;
+ int histSize = 256;
+ calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
+ cv::Mat normalizedHist;
+
+ normalize(hist, normalizedHist, 1.f, 0.f, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1
+
+ // compute density probability
+ cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
+ denseProb.at<float>(0)=normalizedHist.at<float>(0);
+ int histLowerLimit=0, histUpperLimit=0;
+ for (int i=1;i<normalizedHist.size().height;++i)
+ {
+ denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
+ //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
+ if ( denseProb.at<float>(i)<histogramClippingLimit)
+ histLowerLimit=i;
+ if ( denseProb.at<float>(i)<1.f-histogramClippingLimit)
+ histUpperLimit=i;
+ }
+ // deduce min and max admitted gray levels
+ float minInputValue = (float)histLowerLimit/histSize*255.f;
+ float maxInputValue = (float)histUpperLimit/histSize*255.f;
+
+ std::cout<<"=> Histogram limits "
+ <<"\n\t"<<histogramClippingLimit*100.f<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
+ <<"\n\t"<<(1.f-histogramClippingLimit)*100.f<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
+ <<std::endl;
+ //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
+ drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);
+
+ if(rescale0_255) // rescale between 0-255 if asked to
+ {
+ cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue
+ cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //THRESH_TOZERO, clips values under minInputValue
+ // rescale image range [minInputValue-maxInputValue] to [0-255]
+ outputMat-=minInputValue;
+ outputMat*=255.f/(maxInputValue-minInputValue);
+ }else
+ {
+ inputMat.copyTo(outputMat);
+ // update threshold in the initial input image range
+ maxInputValue=(float)((maxInputValue-255.f)/histNormRescalefactor+maxInput);
+ minInputValue=(float)(minInputValue/histNormRescalefactor+minInput);
+ std::cout<<"===> Input Hist clipping values (max,min) = "<<maxInputValue<<", "<<minInputValue<<std::endl;
+ cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue
+ cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //
+ }
}
// basic callback method for interface management
float globalRescalefactor=1;
cv::Scalar globalOffset=0;
int histogramClippingValue;
- void callBack_rescaleGrayLevelMat(int, void*)
+ static void callBack_rescaleGrayLevelMat(int, void*)
{
- std::cout<<"Histogram clipping value changed, current value = "<<histogramClippingValue<<std::endl;
- // rescale and process
- inputImage+=globalOffset;
- inputImage*=globalRescalefactor;
- inputImage+=cv::Scalar(50, 50, 50, 50); // WARNING value linked to the hardcoded value (200.0) used in the globalRescalefactor in order to center on the 128 mean value... experimental but... basic compromise
- rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100.f, true);
+ std::cout<<"Histogram clipping value changed, current value = "<<histogramClippingValue<<std::endl;
+ // rescale and process
+ inputImage+=globalOffset;
+ inputImage*=globalRescalefactor;
+ inputImage+=cv::Scalar(50, 50, 50, 50); // WARNING value linked to the hardcoded value (200.0) used in the globalRescalefactor in order to center on the 128 mean value... experimental but... basic compromise
+ rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100.f, true);
}
cv::Ptr<cv::Retina> retina;
int retinaHcellsGain;
int localAdaptation_photoreceptors, localAdaptation_Gcells;
- void callBack_updateRetinaParams(int, void*)
+ static void callBack_updateRetinaParams(int, void*)
{
- retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
+ retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
}
int colorSaturationFactor;
- void callback_saturateColors(int, void*)
+ static void callback_saturateColors(int, void*)
{
- retina->setColorSaturation(true, (float)colorSaturationFactor);
+ retina->setColorSaturation(true, (float)colorSaturationFactor);
}
// loadNewFrame : loads a n image wrt filename parameters. it also manages image rescaling/histogram edges cutting (acts differently at first image i.e. if firstTimeread=true)
-void loadNewFrame(const std::string filenamePrototype, const int currentFileIndex, const bool firstTimeread)
+static void loadNewFrame(const std::string filenamePrototype, const int currentFileIndex, const bool firstTimeread)
{
- char *currentImageName=NULL;
- currentImageName = (char*)malloc(sizeof(char)*filenamePrototype.size()+10);
-
- // grab the first frame
- sprintf(currentImageName, filenamePrototype.c_str(), currentFileIndex);
-
- //////////////////////////////////////////////////////////////////////////////
- // checking input media type (still image, video file, live video acquisition)
- std::cout<<"RetinaDemo: reading image : "<<currentImageName<<std::endl;
- // image processing case
- // declare the retina input buffer... that will be fed differently in regard of the input media
- inputImage = cv::imread(currentImageName, -1); // load image in RGB mode
- std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
- if (inputImage.empty())
- {
- help("could not load image, program end");
- return;;
+ char *currentImageName=NULL;
+ currentImageName = (char*)malloc(sizeof(char)*filenamePrototype.size()+10);
+
+ // grab the first frame
+ sprintf(currentImageName, filenamePrototype.c_str(), currentFileIndex);
+
+ //////////////////////////////////////////////////////////////////////////////
+ // checking input media type (still image, video file, live video acquisition)
+ std::cout<<"RetinaDemo: reading image : "<<currentImageName<<std::endl;
+ // image processing case
+ // declare the retina input buffer... that will be fed differently in regard of the input media
+ inputImage = cv::imread(currentImageName, -1); // load image in RGB mode
+ std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
+ if (inputImage.empty())
+ {
+ help("could not load image, program end");
+ return;;
}
- // rescaling/histogram clipping stage
- // rescale between 0 and 1
- // TODO : take care of this step !!! maybe disable of do this in a nicer way ... each successive image should get the same transformation... but it depends on the initial image format
- double maxInput, minInput;
- minMaxLoc(inputImage, &minInput, &maxInput);
- std::cout<<"ORIGINAL IMAGE pixels values range (max,min) : "<<maxInput<<", "<<minInput<<std::endl
-;if (firstTimeread)
- {
- /* the first time, get the pixel values range and rougthly update scaling value
- in order to center values around 128 and getting a range close to [0-255],
- => actually using a little less in order to let some more flexibility in range evolves...
- */
- double maxInput, minInput;
- minMaxLoc(inputImage, &minInput, &maxInput);
- std::cout<<"FIRST IMAGE pixels values range (max,min) : "<<maxInput<<", "<<minInput<<std::endl;
- globalRescalefactor=(float)(50.0/(maxInput-minInput)); // less than 255 for flexibility... experimental value to be carefull about
- double channelOffset = -1.5*minInput;
- globalOffset= cv::Scalar(channelOffset, channelOffset, channelOffset, channelOffset);
- }
- // call the generic input image rescaling callback
- callBack_rescaleGrayLevelMat(1,NULL);
+ // rescaling/histogram clipping stage
+ // rescale between 0 and 1
+ // TODO : take care of this step !!! maybe disable of do this in a nicer way ... each successive image should get the same transformation... but it depends on the initial image format
+ double maxInput, minInput;
+ minMaxLoc(inputImage, &minInput, &maxInput);
+ std::cout<<"ORIGINAL IMAGE pixels values range (max,min) : "<<maxInput<<", "<<minInput<<std::endl;
+
+ if (firstTimeread)
+ {
+ /* the first time, get the pixel values range and rougthly update scaling value
+ in order to center values around 128 and getting a range close to [0-255],
+ => actually using a little less in order to let some more flexibility in range evolves...
+ */
+ double maxInput1, minInput1;
+ minMaxLoc(inputImage, &minInput1, &maxInput1);
+ std::cout<<"FIRST IMAGE pixels values range (max,min) : "<<maxInput1<<", "<<minInput1<<std::endl;
+ globalRescalefactor=(float)(50.0/(maxInput1-minInput1)); // less than 255 for flexibility... experimental value to be carefull about
+ double channelOffset = -1.5*minInput;
+ globalOffset= cv::Scalar(channelOffset, channelOffset, channelOffset, channelOffset);
+ }
+ // call the generic input image rescaling callback
+ callBack_rescaleGrayLevelMat(1,NULL);
}
int main(int argc, char* argv[]) {
- // welcome message
- std::cout<<"*********************************************************************************"<<std::endl;
- std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
- std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
- std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
- std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
- std::cout<<"* The retina model still have the following properties:"<<std::endl;
- std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
- std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
- std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
- std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
- std::cout<<"* for more information, reer to the following papers :"<<std::endl;
- std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
- std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
- std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
- std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
- std::cout<<"*********************************************************************************"<<std::endl;
- std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
- std::cout<<"*********************************************************************************"<<std::endl;
- std::cout<<"*** You can use free tools to generate OpenEXR images from images sets : ***"<<std::endl;
- std::cout<<"*** => 1. take a set of photos from the same viewpoint using bracketing ***"<<std::endl;
- std::cout<<"*** => 2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
- std::cout<<"*** => 3. apply tone mapping with this program ***"<<std::endl;
- std::cout<<"*********************************************************************************"<<std::endl;
-
- // basic input arguments checking
- if (argc<4)
- {
- help("bad number of parameter");
- return -1;
- }
-
- bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
-
- int startFrameIndex=0, endFrameIndex=0, currentFrameIndex=0;
- sscanf(argv[2], "%d", &startFrameIndex);
- sscanf(argv[3], "%d", &endFrameIndex);
- std::string inputImageNamePrototype(argv[1]);
-
- //////////////////////////////////////////////////////////////////////////////
- // checking input media type (still image, video file, live video acquisition)
- std::cout<<"RetinaDemo: setting up system with first image..."<<std::endl;
- loadNewFrame(inputImageNamePrototype, startFrameIndex, true);
-
- if (inputImage.empty())
- {
- help("could not load image, program end");
- return -1;
+ // welcome message
+ std::cout<<"*********************************************************************************"<<std::endl;
+ std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
+ std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
+ std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
+ std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
+ std::cout<<"* The retina model still have the following properties:"<<std::endl;
+ std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
+ std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
+ std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
+ std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
+ std::cout<<"* for more information, reer to the following papers :"<<std::endl;
+ std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
+ std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
+ std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
+ std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
+ std::cout<<"*********************************************************************************"<<std::endl;
+ std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
+ std::cout<<"*********************************************************************************"<<std::endl;
+ std::cout<<"*** You can use free tools to generate OpenEXR images from images sets : ***"<<std::endl;
+ std::cout<<"*** => 1. take a set of photos from the same viewpoint using bracketing ***"<<std::endl;
+ std::cout<<"*** => 2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
+ std::cout<<"*** => 3. apply tone mapping with this program ***"<<std::endl;
+ std::cout<<"*********************************************************************************"<<std::endl;
+
+ // basic input arguments checking
+ if (argc<4)
+ {
+ help("bad number of parameter");
+ return -1;
+ }
+
+ bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
+
+ int startFrameIndex=0, endFrameIndex=0, currentFrameIndex=0;
+ sscanf(argv[2], "%d", &startFrameIndex);
+ sscanf(argv[3], "%d", &endFrameIndex);
+ std::string inputImageNamePrototype(argv[1]);
+
+ //////////////////////////////////////////////////////////////////////////////
+ // checking input media type (still image, video file, live video acquisition)
+ std::cout<<"RetinaDemo: setting up system with first image..."<<std::endl;
+ loadNewFrame(inputImageNamePrototype, startFrameIndex, true);
+
+ if (inputImage.empty())
+ {
+ help("could not load image, program end");
+ return -1;
}
- //////////////////////////////////////////////////////////////////////////////
- // Program start in a try/catch safety context (Retina may throw errors)
- try
- {
- /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
- * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
- */
- if (useLogSampling)
+ //////////////////////////////////////////////////////////////////////////////
+ // Program start in a try/catch safety context (Retina may throw errors)
+ try
+ {
+ /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
+ * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
+ */
+ if (useLogSampling)
{
retina = new cv::Retina(inputImage.size(),true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
- else// -> else allocate "classical" retina :
- retina = new cv::Retina(inputImage.size());
-
- // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
- retina->write("RetinaDefaultParameters.xml");
+ else// -> else allocate "classical" retina :
+ retina = new cv::Retina(inputImage.size());
+
+ // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
+ retina->write("RetinaDefaultParameters.xml");
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
retina->activateMovingContoursProcessing(false);
- // declare retina output buffers
- cv::Mat retinaOutput_parvo;
-
- /////////////////////////////////////////////
- // prepare displays and interactions
- histogramClippingValue=0; // default value... updated with interface slider
-
- std::string retinaInputCorrected("Retina input image (with cut edges histogram for basic pixels error avoidance)");
- cv::namedWindow(retinaInputCorrected,1);
- cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
-
- std::string RetinaParvoWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping");
- cv::namedWindow(RetinaParvoWindow, 1);
- colorSaturationFactor=3;
- cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
-
- retinaHcellsGain=40;
- cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
-
- localAdaptation_photoreceptors=197;
- localAdaptation_Gcells=190;
- cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
- cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
-
- std::string powerTransformedInput("EXR image with basic processing : 16bits=>8bits with gamma correction");
-
- /////////////////////////////////////////////
- // apply default parameters of user interaction variables
- callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
- callback_saturateColors(1, NULL);
-
- // processing loop with stop condition
- currentFrameIndex=startFrameIndex;
- while(currentFrameIndex <= endFrameIndex)
- {
- loadNewFrame(inputImageNamePrototype, currentFrameIndex, false);
-
- if (inputImage.empty())
- {
- std::cout<<"Could not load new image (index = "<<currentFrameIndex<<"), program end"<<std::endl;
- return -1;
- }
- // display input & process standard power transformation
- imshow("EXR image original image, 16bits=>8bits linear rescaling ", imageInputRescaled);
- cv::Mat gammaTransformedImage;
- cv::pow(imageInputRescaled, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
- imshow(powerTransformedInput, gammaTransformedImage);
- // run retina filter
- retina->run(imageInputRescaled);
- // Retrieve and display retina output
- retina->getParvo(retinaOutput_parvo);
- cv::imshow(retinaInputCorrected, imageInputRescaled/255.f);
- cv::imshow(RetinaParvoWindow, retinaOutput_parvo);
- cv::waitKey(4);
- // jump to next frame
- ++currentFrameIndex;
- }
- }catch(cv::Exception e)
- {
- std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
- }
-
- // Program end message
- std::cout<<"Retina demo end"<<std::endl;
-
- return 0;
+ // declare retina output buffers
+ cv::Mat retinaOutput_parvo;
+
+ /////////////////////////////////////////////
+ // prepare displays and interactions
+ histogramClippingValue=0; // default value... updated with interface slider
+
+ std::string retinaInputCorrected("Retina input image (with cut edges histogram for basic pixels error avoidance)");
+ cv::namedWindow(retinaInputCorrected,1);
+ cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
+
+ std::string RetinaParvoWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping");
+ cv::namedWindow(RetinaParvoWindow, 1);
+ colorSaturationFactor=3;
+ cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
+
+ retinaHcellsGain=40;
+ cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
+
+ localAdaptation_photoreceptors=197;
+ localAdaptation_Gcells=190;
+ cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
+ cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
+
+ std::string powerTransformedInput("EXR image with basic processing : 16bits=>8bits with gamma correction");
+
+ /////////////////////////////////////////////
+ // apply default parameters of user interaction variables
+ callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
+ callback_saturateColors(1, NULL);
+
+ // processing loop with stop condition
+ currentFrameIndex=startFrameIndex;
+ while(currentFrameIndex <= endFrameIndex)
+ {
+ loadNewFrame(inputImageNamePrototype, currentFrameIndex, false);
+
+ if (inputImage.empty())
+ {
+ std::cout<<"Could not load new image (index = "<<currentFrameIndex<<"), program end"<<std::endl;
+ return -1;
+ }
+ // display input & process standard power transformation
+ imshow("EXR image original image, 16bits=>8bits linear rescaling ", imageInputRescaled);
+ cv::Mat gammaTransformedImage;
+ cv::pow(imageInputRescaled, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
+ imshow(powerTransformedInput, gammaTransformedImage);
+ // run retina filter
+ retina->run(imageInputRescaled);
+ // Retrieve and display retina output
+ retina->getParvo(retinaOutput_parvo);
+ cv::imshow(retinaInputCorrected, imageInputRescaled/255.f);
+ cv::imshow(RetinaParvoWindow, retinaOutput_parvo);
+ cv::waitKey(4);
+ // jump to next frame
+ ++currentFrameIndex;
+ }
+ }catch(cv::Exception e)
+ {
+ std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
+ }
+
+ // Program end message
+ std::cout<<"Retina demo end"<<std::endl;
+
+ return 0;
}
const string svmsDir = "/svms";
const string plotsDir = "/plots";
-void help(char** argv)
+static void help(char** argv)
{
cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
<< "It shows how to use detectors, descriptors and recognition methods \n"
-void makeDir( const string& dir )
+static void makeDir( const string& dir )
{
#if defined WIN32 || defined _WIN32
CreateDirectory( dir.c_str(), 0 );
#endif
}
-void makeUsedDirs( const string& rootPath )
+static void makeUsedDirs( const string& rootPath )
{
makeDir(rootPath + bowImageDescriptorsDir);
makeDir(rootPath + svmsDir);
std::sort(order.begin(),order.end(),orderingSorter());
/* 2. save ranking results to text file */
- string input_file_std = checkFilenamePathsep(input_file);
- size_t fnamestart = input_file_std.rfind("/");
- string scoregt_file_str = input_file_std.substr(0,fnamestart+1) + "scoregt_" + class_name + ".txt";
+ string input_file_std1 = checkFilenamePathsep(input_file);
+ size_t fnamestart = input_file_std1.rfind("/");
+ string scoregt_file_str = input_file_std1.substr(0,fnamestart+1) + "scoregt_" + class_name + ".txt";
std::ofstream scoregt_file(scoregt_file_str.c_str());
if (scoregt_file.is_open())
{
// Protected Functions ------------------------------------
//---------------------------------------------------------
-string getVocName( const string& vocPath )
+static string getVocName( const string& vocPath )
{
size_t found = vocPath.rfind( '/' );
if( found == string::npos )
bool balanceClasses; // Balance class weights by number of samples in each (if true cSvmTrainTargetRatio is ignored).
};
-void readUsedParams( const FileNode& fn, string& vocName, DDMParams& ddmParams, VocabTrainParams& vocabTrainParams, SVMTrainParamsExt& svmTrainParamsExt )
+static void readUsedParams( const FileNode& fn, string& vocName, DDMParams& ddmParams, VocabTrainParams& vocabTrainParams, SVMTrainParamsExt& svmTrainParamsExt )
{
fn["vocName"] >> vocName;
svmTrainParamsExt.read( currFn );
}
-void writeUsedParams( FileStorage& fs, const string& vocName, const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams, const SVMTrainParamsExt& svmTrainParamsExt )
+static void writeUsedParams( FileStorage& fs, const string& vocName, const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams, const SVMTrainParamsExt& svmTrainParamsExt )
{
fs << "vocName" << vocName;
fs << "}";
}
-void printUsedParams( const string& vocPath, const string& resDir,
+static void printUsedParams( const string& vocPath, const string& resDir,
const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams,
const SVMTrainParamsExt& svmTrainParamsExt )
{
cout << "----------------------------------------------------------------" << endl << endl;
}
-bool readVocabulary( const string& filename, Mat& vocabulary )
+static bool readVocabulary( const string& filename, Mat& vocabulary )
{
cout << "Reading vocabulary...";
FileStorage fs( filename, FileStorage::READ );
return false;
}
-bool writeVocabulary( const string& filename, const Mat& vocabulary )
+static bool writeVocabulary( const string& filename, const Mat& vocabulary )
{
cout << "Saving vocabulary..." << endl;
FileStorage fs( filename, FileStorage::WRITE );
return false;
}
-Mat trainVocabulary( const string& filename, VocData& vocData, const VocabTrainParams& trainParams,
+static Mat trainVocabulary( const string& filename, VocData& vocData, const VocabTrainParams& trainParams,
const Ptr<FeatureDetector>& fdetector, const Ptr<DescriptorExtractor>& dextractor )
{
Mat vocabulary;
return vocabulary;
}
-bool readBowImageDescriptor( const string& file, Mat& bowImageDescriptor )
+static bool readBowImageDescriptor( const string& file, Mat& bowImageDescriptor )
{
FileStorage fs( file, FileStorage::READ );
if( fs.isOpened() )
return false;
}
-bool writeBowImageDescriptor( const string& file, const Mat& bowImageDescriptor )
+static bool writeBowImageDescriptor( const string& file, const Mat& bowImageDescriptor )
{
FileStorage fs( file, FileStorage::WRITE );
if( fs.isOpened() )
}
// Load in the bag of words vectors for a set of images, from file if possible
-void calculateImageDescriptors( const vector<ObdImage>& images, vector<Mat>& imageDescriptors,
+static void calculateImageDescriptors( const vector<ObdImage>& images, vector<Mat>& imageDescriptors,
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
const string& resPath )
{
}
}
-void removeEmptyBowImageDescriptors( vector<ObdImage>& images, vector<Mat>& bowImageDescriptors,
+static void removeEmptyBowImageDescriptors( vector<ObdImage>& images, vector<Mat>& bowImageDescriptors,
vector<char>& objectPresent )
{
CV_Assert( !images.empty() );
}
}
-void removeBowImageDescriptorsByCount( vector<ObdImage>& images, vector<Mat> bowImageDescriptors, vector<char> objectPresent,
+static void removeBowImageDescriptorsByCount( vector<ObdImage>& images, vector<Mat> bowImageDescriptors, vector<char> objectPresent,
const SVMTrainParamsExt& svmParamsExt, int descsToDelete )
{
RNG& rng = theRNG();
CV_Assert( bowImageDescriptors.size() == objectPresent.size() );
}
-void setSVMParams( CvSVMParams& svmParams, CvMat& class_wts_cv, const Mat& responses, bool balanceClasses )
+static void setSVMParams( CvSVMParams& svmParams, CvMat& class_wts_cv, const Mat& responses, bool balanceClasses )
{
int pos_ex = countNonZero(responses == 1);
int neg_ex = countNonZero(responses == -1);
}
}
-void setSVMTrainAutoParams( CvParamGrid& c_grid, CvParamGrid& gamma_grid,
+static void setSVMTrainAutoParams( CvParamGrid& c_grid, CvParamGrid& gamma_grid,
CvParamGrid& p_grid, CvParamGrid& nu_grid,
CvParamGrid& coef_grid, CvParamGrid& degree_grid )
{
degree_grid.step = 0;
}
-void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
+static void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
const string& resPath )
{
}
}
-void computeConfidences( CvSVM& svm, const string& objClassName, VocData& vocData,
+static void computeConfidences( CvSVM& svm, const string& objClassName, VocData& vocData,
Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
const string& resPath )
{
cout << "---------------------------------------------------------------" << endl;
}
-void computeGnuPlotOutput( const string& resPath, const string& objClassName, VocData& vocData )
+static void computeGnuPlotOutput( const string& resPath, const string& objClassName, VocData& vocData )
{
vector<float> precision, recall;
float ap;
using namespace std;
using namespace cv;
-void help()
+static void help()
{
printf("\nDo background segmentation, especially demonstrating the use of cvUpdateBGStatModel().\n"
"Learns the background at the start and then segments.\n"
using namespace std;
//Copy (x,y) location of descriptor matches found from KeyPoint data structures into Point2f vectors
-void matches2points(const vector<DMatch>& matches, const vector<KeyPoint>& kpts_train,
+static void matches2points(const vector<DMatch>& matches, const vector<KeyPoint>& kpts_train,
const vector<KeyPoint>& kpts_query, vector<Point2f>& pts_train, vector<Point2f>& pts_query)
{
pts_train.clear();
}
-double match(const vector<KeyPoint>& /*kpts_train*/, const vector<KeyPoint>& /*kpts_query*/, DescriptorMatcher& matcher,
+static double match(const vector<KeyPoint>& /*kpts_train*/, const vector<KeyPoint>& /*kpts_query*/, DescriptorMatcher& matcher,
const Mat& train, const Mat& query, vector<DMatch>& matches)
{
return ((double)getTickCount() - t) / getTickFrequency();
}
-void help()
+static void help()
{
cout << "This program shows how to use BRIEF descriptor to match points in features2d" << endl <<
"It takes in two images, finds keypoints and matches them displaying matches and final homography warped results" << endl <<
vector<Point2f> mpts_1, mpts_2;
matches2points(matches_popcount, kpts_1, kpts_2, mpts_1, mpts_2); //Extract a list of the (x,y) location of the matches
- vector<uchar> outlier_mask;
+ vector<char> outlier_mask;
Mat H = findHomography(mpts_2, mpts_1, RANSAC, 1, outlier_mask);
Mat outimg;
- drawMatches(im2, kpts_2, im1, kpts_1, matches_popcount, outimg, Scalar::all(-1), Scalar::all(-1),
- *(const vector<char>*)(void*)(&outlier_mask));
+ drawMatches(im2, kpts_2, im1, kpts_1, matches_popcount, outimg, Scalar::all(-1), Scalar::all(-1), outlier_mask);
imshow("matches - popcount - outliers removed", outimg);
Mat warped;
using namespace cv;
using namespace std;
-void help()
+static void help()
{
printf("\nSigh: This program is not complete/will be replaced. \n"
"So: Use this just to see hints of how to use things like Rodrigues\n"
roiList.resize(0);
poseList.resize(0);
box.resize(0);
-
+
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
fs["box"] >> box;
-
+
FileNode all = fs["views"];
if( all.type() != FileNode::SEQ )
return false;
FileNodeIterator it = all.begin(), it_end = all.end();
-
+
for(; it != it_end; ++it)
{
FileNode n = *it;
poseList.push_back(Vec6f((float)np[0], (float)np[1], (float)np[2],
(float)np[3], (float)np[4], (float)np[5]));
}
-
+
return true;
}
const PointModel& model)
{
FileStorage fs(modelFileName, FileStorage::WRITE);
-
+
fs << modelname << "{" <<
"points" << "[:" << model.points << "]" <<
"idx" << "[:";
-
+
for( size_t i = 0; i < model.didx.size(); i++ )
fs << "[:" << model.didx[i] << "]";
fs << "]" << "descriptors" << model.descriptors;
{
Point2f p1 = keypoints1[i].pt;
double bestDist1 = DBL_MAX, bestDist2 = DBL_MAX;
- int bestIdx1 = -1, bestIdx2 = -1;
+ int bestIdx1 = -1;//, bestIdx2 = -1;
const float* d1 = descriptors1.ptr<float>(i);
for( int j = 0; j < (int)keypoints2.size(); j++ )
if( dist < bestDist1 )
{
bestDist2 = bestDist1;
- bestIdx2 = bestIdx1;
+ //bestIdx2 = bestIdx1;
bestDist1 = dist;
bestIdx1 = (int)j;
}
else if( dist < bestDist2 )
{
bestDist2 = dist;
- bestIdx2 = (int)j;
+ //bestIdx2 = (int)j;
}
}
}
{
if( i1 == i )
continue;
- Point2f p1 = keypoints1[i1].pt;
+ Point2f pt1 = keypoints1[i1].pt;
const float* d11 = descriptors1.ptr<float>(i1);
double dist = 0;
- e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
- p2.y*(F[3]*p1.x + F[4]*p1.y + F[5]) +
- F[6]*p1.x + F[7]*p1.y + F[8];
+ e = p2.x*(F[0]*pt1.x + F[1]*pt1.y + F[2]) +
+ p2.y*(F[3]*pt1.x + F[4]*pt1.y + F[5]) +
+ F[6]*pt1.x + F[7]*pt1.y + F[8];
if( fabs(e) > eps )
continue;
static Point3f findRayIntersection(Point3f k1, Point3f b1, Point3f k2, Point3f b2)
-{
+{
float a[4], b[2], x[2];
a[0] = k1.dot(k1);
a[1] = a[2] = -k1.dot(k2);
b[1] = k2.dot(b1 - b2);
Mat_<float> A(2, 2, a), B(2, 1, b), X(2, 1, x);
solve(A, B, X);
-
+
float s1 = X.at<float>(0, 0);
float s2 = X.at<float>(1, 0);
return (k1*s1 + b1 + k2*s2 + b2)*0.5f;
const Mat& cameraMatrix)
{
Mat_<double> K(cameraMatrix);
-
+
/*if( ps.size() > 2 )
{
Mat_<double> L(ps.size()*3, 4), U, evalues;
Mat_<double> P(3,4), Rt(3,4), Rt_part1=Rt.colRange(0,3), Rt_part2=Rt.colRange(3,4);
-
+
for( size_t i = 0; i < ps.size(); i++ )
{
double x = ps[i].x, y = ps[i].y;
Rs[i].convertTo(Rt_part1, Rt_part1.type());
ts[i].convertTo(Rt_part2, Rt_part2.type());
P = K*Rt;
-
+
for( int k = 0; k < 4; k++ )
{
L(i*3, k) = x*P(2,k) - P(0,k);
L(i*3+2, k) = x*P(1,k) - y*P(0,k);
}
}
-
+
eigen(L.t()*L, evalues, U);
CV_Assert(evalues(0,0) >= evalues(3,0));
-
+
double W = fabs(U(3,3)) > FLT_EPSILON ? 1./U(3,3) : 0;
return Point3f((float)(U(3,0)*W), (float)(U(3,1)*W), (float)(U(3,2)*W));
}
}
-void triangulatePoint_test(void)
+static void triangulatePoint_test(void)
{
int i, n = 100;
vector<Point3f> objpt(n), delta1(n), delta2(n);
{
EqKeypoints(const vector<int>* _dstart, const Set2i* _pairs)
: dstart(_dstart), pairs(_pairs) {}
-
+
bool operator()(const Pair2i& a, const Pair2i& b) const
{
return pairs->find(Pair2i(dstart->at(a.first) + a.second,
dstart->at(b.first) + b.second)) != pairs->end();
}
-
+
const vector<int>* dstart;
const Set2i* pairs;
};
vector<KeyPoint> keypoints;
detector->detect(gray, keypoints);
descriptorExtractor->compute(gray, keypoints, descriptorbuf);
- Point2f roiofs = roiList[i].tl();
+ Point2f roiofs = roiList[i].tl();
for( size_t k = 0; k < keypoints.size(); k++ )
keypoints[k].pt += roiofs;
allkeypoints.push_back(keypoints);
size_t prev = alldescriptorsVec.size();
size_t delta = buf.rows*buf.cols;
- alldescriptorsVec.resize(prev + delta);
+ alldescriptorsVec.resize(prev + delta);
std::copy(buf.ptr<float>(), buf.ptr<float>() + delta,
alldescriptorsVec.begin() + prev);
dstart.push_back(dstart.back() + (int)keypoints.size());
pairsFound++;
- //model.points.push_back(objpt);
+ //model.points.push_back(objpt);
pairs[Pair2i(i1+dstart[i], i2+dstart[j])] = 1;
pairs[Pair2i(i2+dstart[j], i1+dstart[i])] = 1;
keypointsIdxMap[Pair2i((int)i,i1)] = 1;
const char* intrinsicsFilename = 0;
const char* modelName = 0;
const char* detectorName = "SURF";
- const char* descriptorExtractorName = "SURF";
+ const char* descriptorExtractorName = "SURF";
vector<Point3f> modelBox;
vector<string> imageList;
" 'g' - start capturing images\n"
" 'u' - switch undistortion on/off\n";
-void help()
+static void help()
{
printf( "This is a camera calibration sample.\n"
"Usage: calibration\n"
int i, totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
-
+
for( i = 0; i < (int)objectPoints.size(); i++ )
{
projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i],
totalErr += err*err;
totalPoints += n;
}
-
+
return std::sqrt(totalErr/totalPoints);
}
static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners, Pattern patternType = CHESSBOARD)
{
corners.resize(0);
-
+
switch(patternType)
{
case CHESSBOARD:
cameraMatrix = Mat::eye(3, 3, CV_64F);
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = aspectRatio;
-
+
distCoeffs = Mat::zeros(8, 1, CV_64F);
-
+
vector<vector<Point3f> > objectPoints(1);
calcChessboardCorners(boardSize, squareSize, objectPoints[0], patternType);
objectPoints.resize(imagePoints.size(),objectPoints[0]);
-
+
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
///*|CV_CALIB_FIX_K3*/|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
printf("RMS error reported by calibrateCamera: %g\n", rms);
-
+
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
-
+
totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints,
rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);
}
-void saveCameraParams( const string& filename,
+static void saveCameraParams( const string& filename,
Size imageSize, Size boardSize,
float squareSize, float aspectRatio, int flags,
const Mat& cameraMatrix, const Mat& distCoeffs,
double totalAvgErr )
{
FileStorage fs( filename, FileStorage::WRITE );
-
- time_t t;
- time( &t );
- struct tm *t2 = localtime( &t );
+
+ time_t tt;
+ time( &tt );
+ struct tm *t2 = localtime( &tt );
char buf[1024];
strftime( buf, sizeof(buf)-1, "%c", t2 );
fs << "calibration_time" << buf;
-
+
if( !rvecs.empty() || !reprojErrs.empty() )
fs << "nframes" << (int)std::max(rvecs.size(), reprojErrs.size());
fs << "image_width" << imageSize.width;
fs << "board_width" << boardSize.width;
fs << "board_height" << boardSize.height;
fs << "square_size" << squareSize;
-
+
if( flags & CV_CALIB_FIX_ASPECT_RATIO )
fs << "aspectRatio" << aspectRatio;
flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "" );
cvWriteComment( *fs, buf, 0 );
}
-
+
fs << "flags" << flags;
fs << "camera_matrix" << cameraMatrix;
fs << "avg_reprojection_error" << totalAvgErr;
if( !reprojErrs.empty() )
fs << "per_view_reprojection_errors" << Mat(reprojErrs);
-
+
if( !rvecs.empty() && !tvecs.empty() )
{
CV_Assert(rvecs[0].type() == tvecs[0].type());
cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
fs << "extrinsic_parameters" << bigmat;
}
-
+
if( !imagePoints.empty() )
{
Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
}
-bool runAndSave(const string& outputFilename,
+static bool runAndSave(const string& outputFilename,
const vector<vector<Point2f> >& imagePoints,
Size imageSize, Size boardSize, Pattern patternType, float squareSize,
float aspectRatio, int flags, Mat& cameraMatrix,
vector<Mat> rvecs, tvecs;
vector<float> reprojErrs;
double totalAvgErr = 0;
-
+
bool ok = runCalibration(imagePoints, imageSize, boardSize, patternType, squareSize,
aspectRatio, flags, cameraMatrix, distCoeffs,
rvecs, tvecs, reprojErrs, totalAvgErr);
printf("%s. avg reprojection error = %.2f\n",
ok ? "Calibration succeeded" : "Calibration failed",
totalAvgErr);
-
+
if( ok )
saveCameraParams( outputFilename, imageSize,
boardSize, squareSize, aspectRatio,
Mat cameraMatrix, distCoeffs;
const char* outputFilename = "out_camera_data.yml";
const char* inputFilename = 0;
-
+
int i, nframes = 10;
bool writeExtrinsics = false, writePoints = false;
bool undistortImage = false;
{
if( !videofile && readStringList(inputFilename, imageList) )
mode = CAPTURING;
- else
+ else
capture.open(inputFilename);
}
else
if( !capture.isOpened() && imageList.empty() )
return fprintf( stderr, "Could not initialize video (%d) capture\n",cameraId ), -2;
-
+
if( !imageList.empty() )
nframes = (int)imageList.size();
{
Mat view, viewGray;
bool blink = false;
-
+
if( capture.isOpened() )
{
Mat view0;
}
else if( i < (int)imageList.size() )
view = imread(imageList[i], 1);
-
+
if(!view.data)
{
if( imagePoints.size() > 0 )
writeExtrinsics, writePoints);
break;
}
-
+
imageSize = view.size();
if( flipVertical )
flip( view, view, 0 );
vector<Point2f> pointbuf;
- cvtColor(view, viewGray, CV_BGR2GRAY);
+ cvtColor(view, viewGray, CV_BGR2GRAY);
bool found;
switch( pattern )
prevTimestamp = clock();
blink = capture.isOpened();
}
-
+
if(found)
drawChessboardCorners( view, boardSize, Mat(pointbuf), found );
string msg = mode == CAPTURING ? "100/100" :
mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
int baseLine = 0;
- Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
+ Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
if( mode == CAPTURING )
if( (key & 255) == 27 )
break;
-
+
if( key == 'u' && mode == CALIBRATED )
undistortImage = !undistortImage;
break;
}
}
-
+
if( !capture.isOpened() && showUndistorted )
{
Mat view, rview, map1, map2;
initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
imageSize, CV_16SC2, map1, map2);
-
+
for( i = 0; i < (int)imageList.size(); i++ )
{
view = imread(imageList[i], 1);
break;
}
}
-
+
return 0;
}
using namespace cv;
using namespace std;
-void help()
+static void help()
{
printf( "\nThis code generates an artificial camera and artificial chessboard images,\n"
"and then calibrates. It is basically test code for calibration that shows\n"
class ChessBoardGenerator
{
public:
- double sensorWidth;
- double sensorHeight;
+ double sensorWidth;
+ double sensorHeight;
size_t squareEdgePointsNum;
double min_cos;
mutable double cov;
int rendererResolutionMultiplier;
ChessBoardGenerator(const Size& patternSize = Size(8, 6));
- Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const;
+ Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const;
Size cornersSize() const;
private:
void generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const;
- Mat generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
- const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
+ Mat generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
+ const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
float sqWidth, float sqHeight, const vector<Point3f>& whole, vector<Point2f>& corners) const;
- void generateBasis(Point3f& pb1, Point3f& pb2) const;
+ void generateBasis(Point3f& pb1, Point3f& pb2) const;
Point3f generateChessBoardCenter(const Mat& camMat, const Size& imgSize) const;
Mat rvec, tvec;
};
const size_t brds_num = 20;
template<class T> ostream& operator<<(ostream& out, const Mat_<T>& mat)
-{
+{
for(int j = 0; j < mat.rows; ++j)
for(int i = 0; i < mat.cols; ++i)
- out << mat(j, i) << " ";
+ out << mat(j, i) << " ";
return out;
}
int main()
-{
- help();
- cout << "Initializing background...";
- Mat background(imgSize, CV_8UC3);
- randu(background, Scalar::all(32), Scalar::all(255));
+{
+ help();
+ cout << "Initializing background...";
+ Mat background(imgSize, CV_8UC3);
+ randu(background, Scalar::all(32), Scalar::all(255));
GaussianBlur(background, background, Size(5, 5), 2);
cout << "Done" << endl;
- cout << "Initializing chess board generator...";
+ cout << "Initializing chess board generator...";
ChessBoardGenerator cbg(brdSize);
cbg.rendererResolutionMultiplier = 4;
cout << "Done" << endl;
/* camera params */
Mat_<double> camMat(3, 3);
camMat << 300., 0., background.cols/2., 0, 300., background.rows/2., 0., 0., 1.;
-
+
Mat_<double> distCoeffs(1, 5);
distCoeffs << 1.2, 0.2, 0., 0., 0.;
-
- cout << "Generating chessboards...";
+
+ cout << "Generating chessboards...";
vector<Mat> boards(brds_num);
vector<Point2f> tmp;
for(size_t i = 0; i < brds_num; ++i)
cout << (boards[i] = cbg(background, camMat, distCoeffs, tmp), i) << " ";
- cout << "Done" << endl;
+ cout << "Done" << endl;
vector<Point3f> chessboard3D;
for(int j = 0; j < cbg.cornersSize().height; ++j)
for(int i = 0; i < cbg.cornersSize().width; ++i)
chessboard3D.push_back(Point3i(i, j, 0));
-
+
/* init points */
- vector< vector<Point3f> > objectPoints;
+ vector< vector<Point3f> > objectPoints;
vector< vector<Point2f> > imagePoints;
cout << endl << "Finding chessboards' corners...";
if (found)
{
imagePoints.push_back(tmp);
- objectPoints.push_back(chessboard3D);
- cout<< "-found ";
+ objectPoints.push_back(chessboard3D);
+ cout<< "-found ";
}
else
- cout<< "-not-found ";
+ cout<< "-not-found ";
drawChessboardCorners(boards[i], cbg.cornersSize(), Mat(tmp), found);
imshow("Current chessboard", boards[i]); waitKey(1000);
}
cout << "Done" << endl;
cvDestroyAllWindows();
-
+
Mat camMat_est;
Mat distCoeffs_est;
vector<Mat> rvecs, tvecs;
-
+
cout << "Calibrating...";
double rep_err = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs, tvecs);
cout << "Done" << endl;
cout << "==================================" << endl;
cout << "Estimated camera matrix:\n" << (Mat_<double>&)camMat_est << endl;
cout << "Estimated distCoeffs:\n" << (Mat_<double>&)distCoeffs_est << endl;
-
+
return 0;
}
ChessBoardGenerator::ChessBoardGenerator(const Size& _patternSize) : sensorWidth(32), sensorHeight(24),
- squareEdgePointsNum(200), min_cos(sqrt(2.f)*0.5f), cov(0.5),
+ squareEdgePointsNum(200), min_cos(sqrt(2.f)*0.5f), cov(0.5),
patternSize(_patternSize), rendererResolutionMultiplier(4), tvec(Mat::zeros(1, 3, CV_32F))
-{
+{
Rodrigues(Mat::eye(3, 3, CV_32F), rvec);
}
void cv::ChessBoardGenerator::generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const
-{
- Point3f step = (p2 - p1) * (1.f/squareEdgePointsNum);
+{
+ Point3f step = (p2 - p1) * (1.f/squareEdgePointsNum);
for(size_t n = 0; n < squareEdgePointsNum; ++n)
out.push_back( p1 + step * (float)n);
-}
+}
Size cv::ChessBoardGenerator::cornersSize() const
{
{
float m;
Mult(int mult) : m((float)mult) {}
- Point2f operator()(const Point2f& p)const { return p * m; }
+ Point2f operator()(const Point2f& p)const { return p * m; }
};
void cv::ChessBoardGenerator::generateBasis(Point3f& pb1, Point3f& pb2) const
Vec3f n;
for(;;)
- {
+ {
n[0] = rng.uniform(-1.f, 1.f);
n[1] = rng.uniform(-1.f, 1.f);
- n[2] = rng.uniform(-1.f, 1.f);
- float len = (float)norm(n);
- n[0]/=len;
- n[1]/=len;
+ n[2] = rng.uniform(-1.f, 1.f);
+ float len = (float)norm(n);
+ n[0]/=len;
+ n[1]/=len;
n[2]/=len;
-
+
if (fabs(n[2]) > min_cos)
break;
}
Vec3f n_temp = n; n_temp[0] += 100;
- Vec3f b1 = n.cross(n_temp);
+ Vec3f b1 = n.cross(n_temp);
Vec3f b2 = n.cross(b1);
float len_b1 = (float)norm(b1);
- float len_b2 = (float)norm(b2);
+ float len_b2 = (float)norm(b2);
pb1 = Point3f(b1[0]/len_b1, b1[1]/len_b1, b1[2]/len_b1);
pb2 = Point3f(b2[0]/len_b1, b2[1]/len_b2, b2[2]/len_b2);
}
-Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
- const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
+Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
+ const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
float sqWidth, float sqHeight, const vector<Point3f>& whole,
vector<Point2f>& corners) const
{
- vector< vector<Point> > squares_black;
+ vector< vector<Point> > squares_black;
for(int i = 0; i < patternSize.width; ++i)
for(int j = 0; j < patternSize.height; ++j)
- if ( (i % 2 == 0 && j % 2 == 0) || (i % 2 != 0 && j % 2 != 0) )
- {
+ if ( (i % 2 == 0 && j % 2 == 0) || (i % 2 != 0 && j % 2 != 0) )
+ {
vector<Point3f> pts_square3d;
vector<Point2f> pts_square2d;
generateEdge(p1, p2, pts_square3d);
generateEdge(p2, p3, pts_square3d);
generateEdge(p3, p4, pts_square3d);
- generateEdge(p4, p1, pts_square3d);
-
+ generateEdge(p4, p1, pts_square3d);
+
projectPoints( Mat(pts_square3d), rvec, tvec, camMat, distCoeffs, pts_square2d);
- squares_black.resize(squares_black.size() + 1);
- vector<Point2f> temp;
- approxPolyDP(Mat(pts_square2d), temp, 1.0, true);
- transform(temp.begin(), temp.end(), back_inserter(squares_black.back()), Mult(rendererResolutionMultiplier));
- }
+ squares_black.resize(squares_black.size() + 1);
+ vector<Point2f> temp;
+ approxPolyDP(Mat(pts_square2d), temp, 1.0, true);
+ transform(temp.begin(), temp.end(), back_inserter(squares_black.back()), Mult(rendererResolutionMultiplier));
+ }
/* calculate corners */
- vector<Point3f> corners3d;
+ vector<Point3f> corners3d;
for(int j = 0; j < patternSize.height - 1; ++j)
for(int i = 0; i < patternSize.width - 1; ++i)
corners3d.push_back(zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2);
generateEdge(whole[2], whole[3], whole3d);
generateEdge(whole[3], whole[0], whole3d);
projectPoints( Mat(whole3d), rvec, tvec, camMat, distCoeffs, whole2d);
- vector<Point2f> temp_whole2d;
- approxPolyDP(Mat(whole2d), temp_whole2d, 1.0, true);
+ vector<Point2f> temp_whole2d;
+ approxPolyDP(Mat(whole2d), temp_whole2d, 1.0, true);
vector< vector<Point > > whole_contour(1);
- transform(temp_whole2d.begin(), temp_whole2d.end(),
- back_inserter(whole_contour.front()), Mult(rendererResolutionMultiplier));
+ transform(temp_whole2d.begin(), temp_whole2d.end(),
+ back_inserter(whole_contour.front()), Mult(rendererResolutionMultiplier));
Mat result;
if (rendererResolutionMultiplier == 1)
- {
+ {
result = bg.clone();
- drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
+ drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(result, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
}
else
{
- Mat tmp;
+ Mat tmp;
resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
- drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
+ drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
drawContours(tmp, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
- }
+ }
return result;
}
Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const
-{
+{
cov = min(cov, 0.8);
double fovx, fovy, focalLen;
Point2d principalPoint;
double aspect;
- calibrationMatrixValues( camMat, bg.size(), sensorWidth, sensorHeight,
+ calibrationMatrixValues( camMat, bg.size(), sensorWidth, sensorHeight,
fovx, fovy, focalLen, principalPoint, aspect);
RNG& rng = theRNG();
- float d1 = static_cast<float>(rng.uniform(0.1, 10.0));
+ float d1 = static_cast<float>(rng.uniform(0.1, 10.0));
float ah = static_cast<float>(rng.uniform(-fovx/2 * cov, fovx/2 * cov) * CV_PI / 180);
- float av = static_cast<float>(rng.uniform(-fovy/2 * cov, fovy/2 * cov) * CV_PI / 180);
-
+ float av = static_cast<float>(rng.uniform(-fovy/2 * cov, fovy/2 * cov) * CV_PI / 180);
+
Point3f p;
p.z = cos(ah) * d1;
p.x = sin(ah) * d1;
- p.y = p.z * tan(av);
+ p.y = p.z * tan(av);
- Point3f pb1, pb2;
+ Point3f pb1, pb2;
generateBasis(pb1, pb2);
-
+
float cbHalfWidth = static_cast<float>(norm(p) * sin( min(fovx, fovy) * 0.5 * CV_PI / 180));
float cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
-
+
vector<Point3f> pts3d(4);
vector<Point2f> pts2d(4);
for(;;)
- {
+ {
pts3d[0] = p + pb1 * cbHalfWidth + cbHalfHeight * pb2;
pts3d[1] = p + pb1 * cbHalfWidth - cbHalfHeight * pb2;
pts3d[2] = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
pts3d[3] = p - pb1 * cbHalfWidth + cbHalfHeight * pb2;
-
+
/* can remake with better perf */
projectPoints( Mat(pts3d), rvec, tvec, camMat, distCoeffs, pts2d);
bool inrect2 = pts2d[1].x < bg.cols && pts2d[1].y < bg.rows && pts2d[1].x > 0 && pts2d[1].y > 0;
bool inrect3 = pts2d[2].x < bg.cols && pts2d[2].y < bg.rows && pts2d[2].x > 0 && pts2d[2].y > 0;
bool inrect4 = pts2d[3].x < bg.cols && pts2d[3].y < bg.rows && pts2d[3].x > 0 && pts2d[3].y > 0;
-
+
if ( inrect1 && inrect2 && inrect3 && inrect4)
break;
cbHalfWidth*=0.8f;
- cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
+ cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
}
cbHalfWidth *= static_cast<float>(patternSize.width)/(patternSize.width + 1);
Point3f zero = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
float sqWidth = 2 * cbHalfWidth/patternSize.width;
float sqHeight = 2 * cbHalfHeight/patternSize.height;
-
- return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight, pts3d, corners);
+
+ return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight, pts3d, corners);
}
Rect selection;
int vmin = 10, vmax = 256, smin = 30;
-void onMouse( int event, int x, int y, int, void* )
+static void onMouse( int event, int x, int y, int, void* )
{
if( selectObject )
{
}
}
-void help()
+static void help()
{
cout << "\nThis is a demo that shows mean-shift based tracking\n"
- "You select a color objects such as your face and it tracks it.\n"
- "This reads from video camera (0 by default, or the camera number the user enters\n"
- "Usage: \n"
- " ./camshiftdemo [camera number]\n";
+ "You select a color objects such as your face and it tracks it.\n"
+ "This reads from video camera (0 by default, or the camera number the user enters\n"
+ "Usage: \n"
+ " ./camshiftdemo [camera number]\n";
cout << "\n\nHot keys: \n"
- "\tESC - quit the program\n"
- "\tc - stop the tracking\n"
- "\tb - switch to/from backprojection view\n"
- "\th - show/hide object histogram\n"
- "\tp - pause video\n"
+ "\tESC - quit the program\n"
+ "\tc - stop the tracking\n"
+ "\tb - switch to/from backprojection view\n"
+ "\th - show/hide object histogram\n"
+ "\tp - pause video\n"
"To initialize tracking, select the object with mouse\n";
}
-const char* keys =
+const char* keys =
{
- "{1| | 0 | camera number}"
+ "{1| | 0 | camera number}"
};
int main( int argc, const char** argv )
{
- help();
+ help();
VideoCapture cap;
Rect trackWindow;
- RotatedRect trackBox;
int hsize = 16;
float hranges[] = {0,180};
const float* phranges = hranges;
- CommandLineParser parser(argc, argv, keys);
- int camNum = parser.get<int>("1");
-
- cap.open(camNum);
+ CommandLineParser parser(argc, argv, keys);
+ int camNum = parser.get<int>("1");
+
+ cap.open(camNum);
if( !cap.isOpened() )
{
- help();
+ help();
cout << "***Could not initialize capturing...***\n";
cout << "Current parameter's value: \n";
- parser.printParams();
+ parser.printParams();
return -1;
}
Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
bool paused = false;
-
+
for(;;)
{
if( !paused )
}
frame.copyTo(image);
-
+
if( !paused )
{
cvtColor(image, hsv, CV_BGR2HSV);
Mat roi(hue, selection), maskroi(mask, selection);
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
normalize(hist, hist, 0, 255, CV_MINMAX);
-
+
trackWindow = selection;
trackObject = 1;
for( int i = 0; i < hsize; i++ )
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
cvtColor(buf, buf, CV_HSV2BGR);
-
+
for( int i = 0; i < hsize; i++ )
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
using namespace cv;
using namespace std;
-void help()
+static void help()
{
-
+
cout << "\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
"edge template and a query edge image.\n"
"Usage: \n"
" By default the inputs are logo_in_clutter.png logo.png\n";
}
-const char* keys =
+const char* keys =
{
- "{1| |logo_in_clutter.png|image edge map }"
- "{2| |logo.png |template edge map}"
+ "{1| |logo_in_clutter.png|image edge map }"
+ "{2| |logo.png |template edge map}"
};
int main( int argc, const char** argv )
{
- help();
- CommandLineParser parser(argc, argv, keys);
+ help();
+ CommandLineParser parser(argc, argv, keys);
- string image = parser.get<string>("1");
- string templ = parser.get<string>("2");
- Mat img = imread(image.c_str(), 0);
- Mat tpl = imread(templ.c_str(), 0);
+ string image = parser.get<string>("1");
+ string templ = parser.get<string>("2");
+ Mat img = imread(image.c_str(), 0);
+ Mat tpl = imread(templ.c_str(), 0);
- if (img.empty() || tpl.empty())
+ if (img.empty() || tpl.empty())
{
cout << "Could not read image file " << image << " or " << templ << "." << endl;
return -1;
Mat img;
int threshval = 100;
-void on_trackbar(int, void*)
+static void on_trackbar(int, void*)
{
- Mat bw = threshval < 128 ? (img < threshval) : (img > threshval);
-
+ Mat bw = threshval < 128 ? (img < threshval) : (img > threshval);
+
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
-
+
findContours( bw, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
-
- Mat dst = Mat::zeros(img.size(), CV_8UC3);
+
+ Mat dst = Mat::zeros(img.size(), CV_8UC3);
if( !contours.empty() && !hierarchy.empty() )
{
}
}
- imshow( "Connected Components", dst );
+ imshow( "Connected Components", dst );
}
-void help()
+static void help()
{
cout << "\n This program demonstrates connected components and use of the trackbar\n"
- "Usage: \n"
- " ./connected_components <image(stuff.jpg as default)>\n"
- "The image is converted to grayscale and displayed, another image has a trackbar\n"
+ "Usage: \n"
+ " ./connected_components <image(stuff.jpg as default)>\n"
+ "The image is converted to grayscale and displayed, another image has a trackbar\n"
"that controls thresholding and thereby the extracted contours which are drawn in color\n";
}
-const char* keys =
+const char* keys =
{
- "{1| |stuff.jpg|image for converting to a grayscale}"
+ "{1| |stuff.jpg|image for converting to a grayscale}"
};
int main( int argc, const char** argv )
{
- help();
- CommandLineParser parser(argc, argv, keys);
- string inputImage = parser.get<string>("1");
- img = imread(inputImage.c_str(), 0);
+ help();
+ CommandLineParser parser(argc, argv, keys);
+ string inputImage = parser.get<string>("1");
+ img = imread(inputImage.c_str(), 0);
- if(img.empty())
- {
+ if(img.empty())
+ {
cout << "Could not read input image file: " << inputImage << endl;
- return -1;
- }
+ return -1;
+ }
- namedWindow( "Image", 1 );
+ namedWindow( "Image", 1 );
imshow( "Image", img );
- namedWindow( "Connected Components", 1 );
- createTrackbar( "Threshold", "Connected Components", &threshval, 255, on_trackbar );
- on_trackbar(threshval, 0);
+ namedWindow( "Connected Components", 1 );
+ createTrackbar( "Threshold", "Connected Components", &threshval, 255, on_trackbar );
+ on_trackbar(threshval, 0);
waitKey(0);
return 0;
using namespace cv;
using namespace std;
-void help()
+static void help()
{
cout
<< "\nThis program illustrates the use of findContours and drawContours\n"
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
-void on_trackbar(int, void*)
+static void on_trackbar(int, void*)
{
Mat cnt_img = Mat::zeros(w, w, CV_8UC3);
int _levels = levels - 3;
using namespace cv;
using namespace std;
-void help()
+static void help()
{
- cout << "\nThis sample program demonstrates the use of the convexHull() function\n"
- << "Call:\n"
- << "./convexhull\n" << endl;
+ cout << "\nThis sample program demonstrates the use of the convexHull() function\n"
+ << "Call:\n"
+ << "./convexhull\n" << endl;
}
int main( int /*argc*/, char** /*argv*/ )
{
char key;
int i, count = (unsigned)rng%100 + 1;
-
+
vector<Point> points;
for( i = 0; i < count; i++ )
Point pt;
pt.x = rng.uniform(img.cols/4, img.cols*3/4);
pt.y = rng.uniform(img.rows/4, img.rows*3/4);
-
+
points.push_back(pt);
}
-
+
vector<int> hull;
convexHull(Mat(points), hull, true);
-
+
img = Scalar::all(0);
for( i = 0; i < count; i++ )
circle(img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA);
-
+
int hullcount = (int)hull.size();
Point pt0 = points[hull[hullcount-1]];
-
+
for( i = 0; i < hullcount; i++ )
{
Point pt = points[hull[i]];
using namespace std;
using namespace cv;
-void help()
+static void help()
{
- cout
- << "\n------------------------------------------------------------------\n"
- << " This program shows the serial out capabilities of cv::Mat\n"
- << "That is, cv::Mat M(...); cout << M; Now works.\n"
- << "Output can be formated to OpenCV, python, numpy, csv and C styles"
- << "Usage:\n"
- << "./cvout_sample\n"
- << "------------------------------------------------------------------\n\n"
- << endl;
+ cout
+ << "\n------------------------------------------------------------------\n"
+ << " This program shows the serial out capabilities of cv::Mat\n"
+ << "That is, cv::Mat M(...); cout << M; Now works.\n"
+ << "Output can be formated to OpenCV, python, numpy, csv and C styles"
+ << "Usage:\n"
+ << "./cvout_sample\n"
+ << "------------------------------------------------------------------\n\n"
+ << endl;
}
int main(int,char**)
{
- help();
- Mat i = Mat::eye(4, 4, CV_64F);
- i.at<double>(1,1) = CV_PI;
- cout << "i = " << i << ";" << endl;
+ help();
+ Mat I = Mat::eye(4, 4, CV_64F);
+ I.at<double>(1,1) = CV_PI;
+ cout << "I = " << I << ";" << endl;
Mat r = Mat(10, 3, CV_8UC3);
randu(r, Scalar::all(0), Scalar::all(255));
cout << "r (default) = " << r << ";" << endl << endl;
cout << "r (python) = " << format(r,"python") << ";" << endl << endl;
cout << "r (numpy) = " << format(r,"numpy") << ";" << endl << endl;
- cout << "r (csv) = " << format(r,"csv") << ";" << endl << endl;
+ cout << "r (csv) = " << format(r,"csv") << ";" << endl << endl;
cout << "r (c) = " << format(r,"C") << ";" << endl << endl;
Point2f p(5, 1);
v.push_back(1);
v.push_back(2);
v.push_back(3);
-
+
cout << "shortvec = " << Mat(v) << endl;
-
+
vector<Point2f> points(20);
for (size_t i = 0; i < points.size(); ++i)
points[i] = Point2f((float)(i * 5), (float)(i % 7));
static void help()
{
- cout << "\nThis program demostrates iterative construction of\n"
+ cout << "\nThis program demostrates iterative construction of\n"
"delaunay triangulation and voronoi tesselation.\n"
"It draws a random set of points in an image and then delaunay triangulates them.\n"
"Usage: \n"
vector<Vec6f> triangleList;
subdiv.getTriangleList(triangleList);
vector<Point> pt(3);
-
+
for( size_t i = 0; i < triangleList.size(); i++ )
{
Vec6f t = triangleList[i];
static void locate_point( Mat& img, Subdiv2D& subdiv, Point2f fp, Scalar active_color )
{
int e0=0, vertex=0;
-
+
subdiv.locate(fp, e0, vertex);
-
+
if( e0 > 0 )
{
int e = e0;
Point2f org, dst;
if( subdiv.edgeOrg(e, &org) > 0 && subdiv.edgeDst(e, &dst) > 0 )
line( img, org, dst, active_color, 3, CV_AA, 0 );
-
+
e = subdiv.getEdge(e, Subdiv2D::NEXT_AROUND_LEFT);
}
while( e != e0 );
}
-
+
draw_subdiv_point( img, fp, active_color );
}
-void paint_voronoi( Mat& img, Subdiv2D& subdiv )
+static void paint_voronoi( Mat& img, Subdiv2D& subdiv )
{
vector<vector<Point2f> > facets;
vector<Point2f> centers;
subdiv.getVoronoiFacetList(vector<int>(), facets, centers);
-
+
vector<Point> ifacet;
vector<vector<Point> > ifacets(1);
-
+
for( size_t i = 0; i < facets.size(); i++ )
{
ifacet.resize(facets[i].size());
for( size_t j = 0; j < facets[i].size(); j++ )
ifacet[j] = facets[i][j];
-
+
Scalar color;
color[0] = rand() & 255;
color[1] = rand() & 255;
color[2] = rand() & 255;
fillConvexPoly(img, ifacet, color, 8, 0);
-
+
ifacets[0] = ifacet;
polylines(img, ifacets, true, Scalar(), 1, CV_AA, 0);
circle(img, centers[i], 3, Scalar(), -1, CV_AA, 0);
int main( int, char** )
{
help();
-
+
Scalar active_facet_color(0, 0, 255), delaunay_color(255,255,255);
Rect rect(0, 0, 600, 600);
-
+
Subdiv2D subdiv(rect);
Mat img(rect.size(), CV_8UC3);
-
+
img = Scalar::all(0);
string win = "Delaunay Demo";
imshow(win, img);
-
+
for( int i = 0; i < 200; i++ )
{
Point2f fp( (float)(rand()%(rect.width-10)+5),
(float)(rand()%(rect.height-10)+5));
-
+
locate_point( img, subdiv, fp, active_facet_color );
imshow( win, img );
-
+
if( waitKey( 100 ) >= 0 )
break;
-
+
subdiv.insert(fp);
-
+
img = Scalar::all(0);
draw_subdiv( img, subdiv, delaunay_color );
imshow( win, img );
-
+
if( waitKey( 100 ) >= 0 )
break;
}
-
+
img = Scalar::all(0);
paint_voronoi( img, subdiv );
imshow( win, img );
-
+
waitKey(0);
-
+
return 0;
}
Mat image;
/* brightness/contrast callback function */
-void updateBrightnessContrast( int /*arg*/, void* )
+static void updateBrightnessContrast( int /*arg*/, void* )
{
int histSize = 64;
int brightness = _brightness - 100;
calcHist(&dst, 1, 0, Mat(), hist, 1, &histSize, 0);
Mat histImage = Mat::ones(200, 320, CV_8U)*255;
-
+
normalize(hist, hist, 0, histImage.rows, CV_MINMAX, CV_32F);
histImage = Scalar::all(255);
Scalar::all(0), -1, 8, 0 );
imshow("histogram", histImage);
}
-void help()
+static void help()
{
- std::cout << "\nThis program demonstrates the use of calcHist() -- histogram creation.\n"
- << "Usage: \n" << "demhist [image_name -- Defaults to baboon.jpg]" << std::endl;
+ std::cout << "\nThis program demonstrates the use of calcHist() -- histogram creation.\n"
+ << "Usage: \n" << "demhist [image_name -- Defaults to baboon.jpg]" << std::endl;
}
-const char* keys =
+const char* keys =
{
- "{1| |baboon.jpg|input image file}"
+ "{1| |baboon.jpg|input image file}"
};
int main( int argc, const char** argv )
{
- help();
-
- CommandLineParser parser(argc, argv, keys);
- string inputImage = parser.get<string>("1");
-
- // Load the source image. HighGUI use.
- image = imread( inputImage, 0 );
- if(image.empty())
- {
- std::cerr << "Cannot read image file: " << inputImage << std::endl;
- return -1;
- }
+ help();
+
+ CommandLineParser parser(argc, argv, keys);
+ string inputImage = parser.get<string>("1");
+
+ // Load the source image. HighGUI use.
+ image = imread( inputImage, 0 );
+ if(image.empty())
+ {
+ std::cerr << "Cannot read image file: " << inputImage << std::endl;
+ return -1;
+ }
namedWindow("image", 0);
namedWindow("histogram", 0);
using namespace cv;
using namespace std;
-void help(char** argv)
+static void help(char** argv)
{
cout << "\nThis program demonstrats keypoint finding and matching between 2 images using features2d framework.\n"
<< " In one case, the 2nd image is synthesized by homography from the first, in the second case, there are 2 images\n"
enum { NONE_FILTER = 0, CROSS_CHECK_FILTER = 1 };
-int getMatcherFilterType( const string& str )
+static int getMatcherFilterType( const string& str )
{
if( str == "NoneFilter" )
return NONE_FILTER;
return -1;
}
-void simpleMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
+static void simpleMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
const Mat& descriptors1, const Mat& descriptors2,
vector<DMatch>& matches12 )
{
descriptorMatcher->match( descriptors1, descriptors2, matches12 );
}
-void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
+static void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
const Mat& descriptors1, const Mat& descriptors2,
vector<DMatch>& filteredMatches12, int knn=1 )
{
}
}
-void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
+static void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
{
H.create(3, 3, CV_32FC1);
H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
warpPerspective( src, dst, H, src.size() );
}
-void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
+static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
vector<KeyPoint>& keypoints1, const Mat& descriptors1,
Ptr<FeatureDetector>& detector, Ptr<DescriptorExtractor>& descriptorExtractor,
Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilter, bool eval,
#define DEBUGLOGS 1
-#if ANDROID
+#ifdef ANDROID
#include <android/log.h>
#define LOG_TAG "DETECTIONBASEDTRACKER__TEST_APPLICAT"
#define LOGD0(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#define LOGI(_str, ...) LOGI0(_str , ## __VA_ARGS__)
#define LOGW(_str, ...) LOGW0(_str , ## __VA_ARGS__)
#define LOGE(_str, ...) LOGE0(_str , ## __VA_ARGS__)
-#else
+#else
#define LOGD(...) do{} while(0)
#define LOGI(...) do{} while(0)
#define LOGW(...) do{} while(0)
#define ORIGINAL 0
#define SHOULD_USE_EXTERNAL_BUFFERS 1
-void usage()
+static void usage()
{
LOGE0("usage: filepattern outfilepattern cascadefile");
LOGE0("\t where ");
LOGE0("\t (e.g.\"opencv/data/lbpcascades/lbpcascade_frontalface.xml\" ");
}
-int test_FaceDetector(int argc, char *argv[])
+static int test_FaceDetector(int argc, char *argv[])
{
if (argc < 4) {
usage();
fd.run();
Mat gray;
- Mat m;
+ Mat m;
int64 tprev=getTickCount();
double freq=getTickFrequency();
void run();
+ virtual ~BaseQualityEvaluator(){}
+
protected:
virtual string getRunParamsFilename() const = 0;
virtual string getResultsFilename() const = 0;
isWriteParams = false;
FileNode topfn = fs.getFirstTopLevelNode();
- FileNode fn = topfn[DEFAULT_PARAMS];
- readDefaultRunParams(fn);
+ FileNode pfn = topfn[DEFAULT_PARAMS];
+ readDefaultRunParams(pfn);
for( int i = 0; i < DATASETS_COUNT; i++ )
{
if( !fs.isOpened() )
{
cout << "filename " << dirname + filename.str() << endl;
- FileStorage fs( dirname + filename.str(), FileStorage::READ );
+ FileStorage fs2( dirname + filename.str(), FileStorage::READ );
return false;
}
fs.getFirstTopLevelNode() >> Hs[i];
}
}
-int update_progress( const string& /*name*/, int progress, int test_case_idx, int count, double dt )
+static int update_progress( const string& /*name*/, int progress, int test_case_idx, int count, double dt )
{
int width = 60 /*- (int)name.length()*/;
if( count > 0 )
}
}
-void testLog( bool isBadAccuracy )
-{
- if( isBadAccuracy )
- printf(" bad accuracy\n");
- else
- printf("\n");
-}
+// static void testLog( bool isBadAccuracy )
+// {
+// if( isBadAccuracy )
+// printf(" bad accuracy\n");
+// else
+// printf("\n");
+// }
/****************************************************************************************\
* Descriptors evaluation *
using namespace cv;
using namespace std;
-void help()
+static void help()
{
- printf("\nThis program demonstrated the use of the discrete Fourier transform (dft)\n"
- "The dft of an image is taken and it's power spectrum is displayed.\n"
- "Usage:\n"
- "./dft [image_name -- default lena.jpg]\n");
+ printf("\nThis program demonstrated the use of the discrete Fourier transform (dft)\n"
+ "The dft of an image is taken and it's power spectrum is displayed.\n"
+ "Usage:\n"
+ "./dft [image_name -- default lena.jpg]\n");
}
-const char* keys =
+const char* keys =
{
- "{1| |lena.jpg|input image file}"
+ "{1| |lena.jpg|input image file}"
};
int main(int argc, const char ** argv)
{
help();
- CommandLineParser parser(argc, argv, keys);
- string filename = parser.get<string>("1");
+ CommandLineParser parser(argc, argv, keys);
+ string filename = parser.get<string>("1");
- Mat img = imread(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img = imread(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
if( img.empty() )
{
help();
- printf("Cannot read image file: %s\n", filename.c_str());
+ printf("Cannot read image file: %s\n", filename.c_str());
return -1;
}
int M = getOptimalDFTSize( img.rows );
int N = getOptimalDFTSize( img.cols );
Mat padded;
copyMakeBorder(img, padded, 0, M - img.rows, 0, N - img.cols, BORDER_CONSTANT, Scalar::all(0));
-
+
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
-
+
dft(complexImg, complexImg);
-
+
// compute log(1 + sqrt(Re(DFT(img))**2 + Im(DFT(img))**2))
split(complexImg, planes);
magnitude(planes[0], planes[1], planes[0]);
Mat mag = planes[0];
mag += Scalar::all(1);
log(mag, mag);
-
+
// crop the spectrum, if it has an odd number of rows or columns
mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));
-
+
int cx = mag.cols/2;
int cy = mag.rows/2;
-
+
// rearrange the quadrants of Fourier image
// so that the origin is at the image center
Mat tmp;
Mat q1(mag, Rect(cx, 0, cx, cy));
Mat q2(mag, Rect(0, cy, cx, cy));
Mat q3(mag, Rect(cx, cy, cx, cy));
-
+
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
-
+
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
-
+
normalize(mag, mag, 0, 1, CV_MINMAX);
-
+
imshow("spectrum magnitude", mag);
waitKey();
return 0;
Mat gray;
// threshold trackbar callback
-void onTrackbar( int, void* )
+static void onTrackbar( int, void* )
{
static const Scalar colors[] =
{
// begin "painting" the distance transform result
dist *= 5000;
pow(dist, 0.5, dist);
-
+
Mat dist32s, dist8u1, dist8u2;
-
+
dist.convertTo(dist32s, CV_32S, 1, 0.5);
dist32s &= Scalar::all(255);
-
+
dist32s.convertTo(dist8u1, CV_8U, 1, 0);
dist32s *= -1;
-
+
dist32s += Scalar::all(255);
dist32s.convertTo(dist8u2, CV_8U);
-
+
Mat planes[] = {dist8u1, dist8u2, dist8u2};
- merge(planes, 3, dist8u);
+ merge(planes, 3, dist8u);
}
else
{
imshow("Distance Map", dist8u );
}
-void help()
+static void help()
{
- printf("\nProgram to demonstrate the use of the distance transform function between edge images.\n"
- "Usage:\n"
- "./distrans [image_name -- default image is stuff.jpg]\n"
- "\nHot keys: \n"
- "\tESC - quit the program\n"
- "\tC - use C/Inf metric\n"
- "\tL1 - use L1 metric\n"
- "\tL2 - use L2 metric\n"
- "\t3 - use 3x3 mask\n"
- "\t5 - use 5x5 mask\n"
- "\t0 - use precise distance transform\n"
- "\tv - switch to Voronoi diagram mode\n"
+ printf("\nProgram to demonstrate the use of the distance transform function between edge images.\n"
+ "Usage:\n"
+ "./distrans [image_name -- default image is stuff.jpg]\n"
+ "\nHot keys: \n"
+ "\tESC - quit the program\n"
+ "\tC - use C/Inf metric\n"
+ "\tL1 - use L1 metric\n"
+ "\tL2 - use L2 metric\n"
+ "\t3 - use 3x3 mask\n"
+ "\t5 - use 5x5 mask\n"
+ "\t0 - use precise distance transform\n"
+ "\tv - switch to Voronoi diagram mode\n"
"\tp - switch to pixel-based Voronoi diagram mode\n"
- "\tSPACE - loop through all the modes\n\n");
+ "\tSPACE - loop through all the modes\n\n");
}
-const char* keys =
+const char* keys =
{
- "{1| |stuff.jpg|input image file}"
+ "{1| |stuff.jpg|input image file}"
};
int main( int argc, const char** argv )
{
help();
- CommandLineParser parser(argc, argv, keys);
- string filename = parser.get<string>("1");
- gray = imread(filename.c_str(), 0);
+ CommandLineParser parser(argc, argv, keys);
+ string filename = parser.get<string>("1");
+ gray = imread(filename.c_str(), 0);
if(gray.empty())
{
- printf("Cannot read image file: %s\n", filename.c_str());
- help();
- return -1;
+ printf("Cannot read image file: %s\n", filename.c_str());
+ help();
+ return -1;
}
namedWindow("Distance Map", 1);
if( c == 'c' || c == 'C' || c == '1' || c == '2' ||
c == '3' || c == '5' || c == '0' )
voronoiType = -1;
-
+
if( c == 'c' || c == 'C' )
distType0 = CV_DIST_C;
else if( c == '1' )
#include <stdio.h>
using namespace cv;
-void help()
+static void help()
{
- printf("\nThis program demonstrates OpenCV drawing and text output functions.\n"
- "Usage:\n"
- " ./drawing\n");
+ printf("\nThis program demonstrates OpenCV drawing and text output functions.\n"
+ "Usage:\n"
+ " ./drawing\n");
}
static Scalar randomColor(RNG& rng)
{
int main()
{
help();
- char wndname[] = "Drawing Demo";
+ char wndname[] = "Drawing Demo";
const int NUMBER = 100;
const int DELAY = 5;
int lineType = CV_AA; // change it to 8 to see non-antialiased graphics
pt2.y = rng.uniform(y1, y2);
line( image, pt1, pt2, randomColor(rng), rng.uniform(1,10), lineType );
-
+
imshow(wndname, image);
if(waitKey(DELAY) >= 0)
return 0;
pt2.x = rng.uniform(x1, x2);
pt2.y = rng.uniform(y1, y2);
int thickness = rng.uniform(-3, 10);
-
+
rectangle( image, pt1, pt2, randomColor(rng), MAX(thickness, -1), lineType );
-
+
imshow(wndname, image);
if(waitKey(DELAY) >= 0)
return 0;
}
-
+
for (i = 0; i < NUMBER; i++)
{
Point center;
ellipse( image, center, axes, angle, angle - 100, angle + 200,
randomColor(rng), rng.uniform(-1,9), lineType );
-
+
imshow(wndname, image);
if(waitKey(DELAY) >= 0)
return 0;
Point pt[2][3];
pt[0][0].x = rng.uniform(x1, x2);
pt[0][0].y = rng.uniform(y1, y2);
- pt[0][1].x = rng.uniform(x1, x2);
- pt[0][1].y = rng.uniform(y1, y2);
+ pt[0][1].x = rng.uniform(x1, x2);
+ pt[0][1].y = rng.uniform(y1, y2);
pt[0][2].x = rng.uniform(x1, x2);
pt[0][2].y = rng.uniform(y1, y2);
- pt[1][0].x = rng.uniform(x1, x2);
+ pt[1][0].x = rng.uniform(x1, x2);
pt[1][0].y = rng.uniform(y1, y2);
- pt[1][1].x = rng.uniform(x1, x2);
+ pt[1][1].x = rng.uniform(x1, x2);
pt[1][1].y = rng.uniform(y1, y2);
- pt[1][2].x = rng.uniform(x1, x2);
+ pt[1][2].x = rng.uniform(x1, x2);
pt[1][2].y = rng.uniform(y1, y2);
const Point* ppt[2] = {pt[0], pt[1]};
int npt[] = {3, 3};
-
+
polylines(image, ppt, npt, 2, true, randomColor(rng), rng.uniform(1,10), lineType);
-
+
imshow(wndname, image);
if(waitKey(DELAY) >= 0)
return 0;
}
-
+
for (i = 0; i< NUMBER; i++)
{
Point pt[2][3];
pt[0][0].x = rng.uniform(x1, x2);
pt[0][0].y = rng.uniform(y1, y2);
- pt[0][1].x = rng.uniform(x1, x2);
- pt[0][1].y = rng.uniform(y1, y2);
+ pt[0][1].x = rng.uniform(x1, x2);
+ pt[0][1].y = rng.uniform(y1, y2);
pt[0][2].x = rng.uniform(x1, x2);
pt[0][2].y = rng.uniform(y1, y2);
- pt[1][0].x = rng.uniform(x1, x2);
+ pt[1][0].x = rng.uniform(x1, x2);
pt[1][0].y = rng.uniform(y1, y2);
- pt[1][1].x = rng.uniform(x1, x2);
+ pt[1][1].x = rng.uniform(x1, x2);
pt[1][1].y = rng.uniform(y1, y2);
- pt[1][2].x = rng.uniform(x1, x2);
+ pt[1][2].x = rng.uniform(x1, x2);
pt[1][2].y = rng.uniform(y1, y2);
const Point* ppt[2] = {pt[0], pt[1]};
int npt[] = {3, 3};
-
+
fillPoly(image, ppt, npt, 2, randomColor(rng), lineType);
-
+
imshow(wndname, image);
if(waitKey(DELAY) >= 0)
return 0;
Point center;
center.x = rng.uniform(x1, x2);
center.y = rng.uniform(y1, y2);
-
+
circle(image, center, rng.uniform(0, 300), randomColor(rng),
rng.uniform(-1, 9), lineType);
-
+
imshow(wndname, image);
if(waitKey(DELAY) >= 0)
return 0;
putText(image, "Testing text rendering", org, rng.uniform(0,8),
rng.uniform(0,100)*0.05+0.1, randomColor(rng), rng.uniform(1, 10), lineType);
-
+
imshow(wndname, image);
if(waitKey(DELAY) >= 0)
return 0;
Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((width - textsize.width)/2, (height - textsize.height)/2);
-
+
Mat image2;
for( i = 0; i < 255; i += 2 )
{
image2 = image - Scalar::all(i);
putText(image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType);
-
+
imshow(wndname, image2);
if(waitKey(DELAY) >= 0)
return 0;
Mat image, gray, edge, cedge;
// define a trackbar callback
-void onTrackbar(int, void*)
+static void onTrackbar(int, void*)
{
blur(gray, edge, Size(3,3));
// Run the edge detector on grayscale
Canny(edge, edge, edgeThresh, edgeThresh*3, 3);
cedge = Scalar::all(0);
-
+
image.copyTo(cedge, edge);
imshow("Edge map", cedge);
}
-void help()
+static void help()
{
- printf("\nThis sample demonstrates Canny edge detection\n"
- "Call:\n"
- " /.edge [image_name -- Default is fruits.jpg]\n\n");
+ printf("\nThis sample demonstrates Canny edge detection\n"
+ "Call:\n"
+ " /.edge [image_name -- Default is fruits.jpg]\n\n");
}
-const char* keys =
+const char* keys =
{
- "{1| |fruits.jpg|input image name}"
+ "{1| |fruits.jpg|input image name}"
};
int main( int argc, const char** argv )
{
help();
- CommandLineParser parser(argc, argv, keys);
- string filename = parser.get<string>("1");
+ CommandLineParser parser(argc, argv, keys);
+ string filename = parser.get<string>("1");
image = imread(filename, 1);
if(image.empty())
{
- printf("Cannot read image file: %s\n", filename.c_str());
- help();
+ printf("Cannot read image file: %s\n", filename.c_str());
+ help();
return -1;
}
cedge.create(image.size(), image.type());
using namespace cv;
using namespace std;
-Mat toGrayscale(InputArray _src) {
+static Mat toGrayscale(InputArray _src) {
Mat src = _src.getMat();
// only allow one channel
if(src.channels() != 1)
return dst;
}
-void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
+static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file)
throw std::exception();
using namespace cv;
using namespace std;
-void help()
+static void help()
{
- cout <<
- "\nThis program demonstrates dense optical flow algorithm by Gunnar Farneback\n"
- "Mainly the function: calcOpticalFlowFarneback()\n"
- "Call:\n"
- "./fback\n"
- "This reads from video camera 0\n" << endl;
+ cout <<
+ "\nThis program demonstrates dense optical flow algorithm by Gunnar Farneback\n"
+ "Mainly the function: calcOpticalFlowFarneback()\n"
+ "Call:\n"
+ "./fback\n"
+ "This reads from video camera 0\n" << endl;
}
-void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step,
+static void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step,
double, const Scalar& color)
{
for(int y = 0; y < cflowmap.rows; y += step)
help();
if( !cap.isOpened() )
return -1;
-
+
Mat prevgray, gray, flow, cflow, frame;
namedWindow("flow", 1);
-
+
for(;;)
{
cap >> frame;
cvtColor(frame, gray, CV_BGR2GRAY);
-
+
if( prevgray.data )
{
calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
using namespace cv;
using namespace std;
-void help()
+static void help()
{
cout << "\nThis program demonstrated the floodFill() function\n"
- "Call:\n"
- "./ffilldemo [image_name -- Default: fruits.jpg]\n" << endl;
-
- cout << "Hot keys: \n"
- "\tESC - quit the program\n"
- "\tc - switch color/grayscale mode\n"
- "\tm - switch mask mode\n"
- "\tr - restore the original image\n"
- "\ts - use null-range floodfill\n"
- "\tf - use gradient floodfill with fixed(absolute) range\n"
- "\tg - use gradient floodfill with floating(relative) range\n"
- "\t4 - use 4-connectivity mode\n"
- "\t8 - use 8-connectivity mode\n" << endl;
+ "Call:\n"
+ "./ffilldemo [image_name -- Default: fruits.jpg]\n" << endl;
+
+ cout << "Hot keys: \n"
+ "\tESC - quit the program\n"
+ "\tc - switch color/grayscale mode\n"
+ "\tm - switch mask mode\n"
+ "\tr - restore the original image\n"
+ "\ts - use null-range floodfill\n"
+ "\tf - use gradient floodfill with fixed(absolute) range\n"
+ "\tg - use gradient floodfill with floating(relative) range\n"
+ "\t4 - use 4-connectivity mode\n"
+ "\t8 - use 8-connectivity mode\n" << endl;
}
Mat image0, image, gray, mask;
bool useMask = false;
int newMaskVal = 255;
-void onMouse( int event, int x, int y, int, void* )
+static void onMouse( int event, int x, int y, int, void* )
{
if( event != CV_EVENT_LBUTTONDOWN )
return;
Scalar newVal = isColor ? Scalar(b, g, r) : Scalar(r*0.299 + g*0.587 + b*0.114);
Mat dst = isColor ? image : gray;
int area;
-
+
if( useMask )
{
threshold(mask, mask, 1, 128, CV_THRESH_BINARY);
area = floodFill(dst, seed, newVal, &ccomp, Scalar(lo, lo, lo),
Scalar(up, up, up), flags);
}
-
+
imshow("image", dst);
cout << area << " pixels were repainted\n";
}
{
char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
image0 = imread(filename, 1);
-
+
if( image0.empty() )
{
cout << "Image empty. Usage: ffilldemo <image_name>\n";
using std::ostream;
using namespace cv;
-void help(char** av)
+static void help(char** av)
{
cout << "\nfilestorage_sample demonstrate the usage of the opencv serialization functionality.\n"
- << "usage:\n"
- << av[0] << " outputfile.yml.gz\n"
- << "\n outputfile above can have many different extenstions, see below."
- << "\nThis program demonstrates the use of FileStorage for serialization, that is use << and >> in OpenCV\n"
- << "For example, how to create a class and have it serialize, but also how to use it to read and write matrices.\n"
- << "FileStorage allows you to serialize to various formats specified by the file end type."
+ << "usage:\n"
+ << av[0] << " outputfile.yml.gz\n"
+ << "\n outputfile above can have many different extenstions, see below."
+ << "\nThis program demonstrates the use of FileStorage for serialization, that is use << and >> in OpenCV\n"
+ << "For example, how to create a class and have it serialize, but also how to use it to read and write matrices.\n"
+ << "FileStorage allows you to serialize to various formats specified by the file end type."
<< "\nYou should try using different file extensions.(e.g. yaml yml xml xml.gz yaml.gz etc...)\n" << endl;
}
};
//These write and read functions must exist as per the inline functions in operations.hpp
-void write(FileStorage& fs, const std::string&, const MyData& x){
+static void write(FileStorage& fs, const std::string&, const MyData& x){
x.write(fs);
}
-void read(const FileNode& node, MyData& x, const MyData& default_value = MyData()){
+static void read(const FileNode& node, MyData& x, const MyData& default_value = MyData()){
if(node.empty())
x = default_value;
else
x.read(node);
}
-ostream& operator<<(ostream& out, const MyData& m){
+static ostream& operator<<(ostream& out, const MyData& m){
out << "{ id = " << m.id << ", ";
out << "X = " << m.X << ", ";
out << "A = " << m.A << "}";
using namespace cv;
using namespace std;
-void help()
-{
- cout <<
- "\nThis program is demonstration for ellipse fitting. The program finds\n"
- "contours and approximate it by ellipses.\n"
- "Call:\n"
- "./fitellipse [image_name -- Default stuff.jpg]\n" << endl;
-}
+// static void help()
+// {
+// cout <<
+// "\nThis program is demonstration for ellipse fitting. The program finds\n"
+// "contours and approximate it by ellipses.\n"
+// "Call:\n"
+// "./fitellipse [image_name -- Default stuff.jpg]\n" << endl;
+// }
int sliderPos = 70;
imshow("source", image);
namedWindow("result", 1);
-
+
// Create toolbars. HighGUI use.
createTrackbar( "threshold", "result", &sliderPos, 255, processImage );
processImage(0, 0);
{
vector<vector<Point> > contours;
Mat bimage = image >= sliderPos;
-
+
findContours(bimage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
Mat cimage = Mat::zeros(bimage.size(), CV_8UC3);
size_t count = contours[i].size();
if( count < 6 )
continue;
-
+
Mat pointsf;
Mat(contours[i]).convertTo(pointsf, CV_32F);
RotatedRect box = fitEllipse(pointsf);
-
+
if( MAX(box.size.width, box.size.height) > MIN(box.size.width, box.size.height)*30 )
continue;
drawContours(cimage, contours, (int)i, Scalar::all(255), 1, 8);
using namespace cv;
-void help()
+static void help()
{
printf("Use the SURF descriptor for matching keypoints between 2 images\n");
printf("Format: \n./generic_descriptor_match <image1> <image2> <algorithm> <XML params>\n");
{
if (argc != 5)
{
- help();
+ help();
return 0;
}
//printf("Reading the images...\n");
Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE);
Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE);
-
+
// extract keypoints from the first image
SURF surf_extractor(5.0e3);
vector<KeyPoint> keypoints1;
// printf("Extracting keypoints\n");
surf_extractor(img1, Mat(), keypoints1);
-
+
printf("Extracted %d keypoints from the first image\n", (int)keypoints1.size());
vector<KeyPoint> keypoints2;
using namespace std;
using namespace cv;
-void help()
+static void help()
{
cout << "\nThis program demonstrates GrabCut segmentation -- select an object in a region\n"
"and then grabcut will attempt to segment it out.\n"
const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY;
const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY;
-void getBinMask( const Mat& comMask, Mat& binMask )
+static void getBinMask( const Mat& comMask, Mat& binMask )
{
if( comMask.empty() || comMask.type()!=CV_8UC1 )
CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );
GCApplication gcapp;
-void on_mouse( int event, int x, int y, int flags, void* param )
+static void on_mouse( int event, int x, int y, int flags, void* param )
{
gcapp.mouseClick( event, x, y, flags, param );
}
using namespace cv;
using namespace std;
-void help()
+static void help()
{
cout << "\nThis program demonstrates circle finding with the Hough transform.\n"
"Usage:\n"
Mat cimg;
medianBlur(img, img, 5);
cvtColor(img, cimg, CV_GRAY2BGR);
-
+
vector<Vec3f> circles;
HoughCircles(img, circles, CV_HOUGH_GRADIENT, 1, 10,
- 100, 30, 1, 30 // change the last two parameters
+ 100, 30, 1, 30 // change the last two parameters
// (min_radius & max_radius) to detect larger circles
);
for( size_t i = 0; i < circles.size(); i++ )
using namespace cv;
using namespace std;
-void help()
+static void help()
{
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
"Usage:\n"
int trackObject = 0;
int live = 1;
-void drawRectangle(Mat* image, Rect win) {
- rectangle(*image, Point(win.x, win.y), Point(win.x + win.width, win.y
- + win.height), Scalar(0, 255, 0), 2, CV_AA);
+static void drawRectangle(Mat* img, Rect win) {
+ rectangle(*img, Point(win.x, win.y), Point(win.x + win.width, win.y
+ + win.height), Scalar(0, 255, 0), 2, CV_AA);
}
-void onMouse(int event, int x, int y, int, void*) {
- if (selectObject) {
- selection.x = MIN(x, origin.x);
- selection.y = MIN(y, origin.y);
- selection.width = std::abs(x - origin.x);
- selection.height = std::abs(y - origin.y);
- selection &= Rect(0, 0, image.cols, image.rows);
- }
-
- switch (event) {
- case CV_EVENT_LBUTTONDOWN:
- origin = Point(x, y);
- selection = Rect(x, y, 0, 0);
- selectObject = true;
- break;
- case CV_EVENT_LBUTTONUP:
- selectObject = false;
- trackObject = -1;
- break;
- }
+static void onMouse(int event, int x, int y, int, void*) {
+ if (selectObject) {
+ selection.x = MIN(x, origin.x);
+ selection.y = MIN(y, origin.y);
+ selection.width = std::abs(x - origin.x);
+ selection.height = std::abs(y - origin.y);
+ selection &= Rect(0, 0, image.cols, image.rows);
+ }
+
+ switch (event) {
+ case CV_EVENT_LBUTTONDOWN:
+ origin = Point(x, y);
+ selection = Rect(x, y, 0, 0);
+ selectObject = true;
+ break;
+ case CV_EVENT_LBUTTONUP:
+ selectObject = false;
+ trackObject = -1;
+ break;
+ }
}
-void help()
+static void help()
{
- printf("Usage: ./hytrack live or ./hytrack <test_file> \n\
+ printf("Usage: ./hytrack live or ./hytrack <test_file> \n\
For Live View or Benchmarking. Read documentation is source code.\n\n");
}
int main(int argc, char** argv)
{
- if(argc != 2) {
- help();
- return 1;
- }
-
- FILE* f = 0;
- VideoCapture cap;
- char test_file[20] = "";
-
- if (strcmp(argv[1], "live") != 0)
- {
- sprintf(test_file, "%s", argv[1]);
- f = fopen(test_file, "r");
- char vid[20];
- int values_read = fscanf(f, "%s\n", vid);
- CV_Assert(values_read == 1);
- cout << "Benchmarking against " << vid << endl;
- live = 0;
- }
- else
- {
- cap.open(0);
- if (!cap.isOpened())
- {
- cout << "Failed to open camera" << endl;
- return 0;
- }
- cout << "Opened camera" << endl;
+ if(argc != 2) {
+ help();
+ return 1;
+ }
+
+ FILE* f = 0;
+ VideoCapture cap;
+ char test_file[20] = "";
+
+ if (strcmp(argv[1], "live") != 0)
+ {
+ sprintf(test_file, "%s", argv[1]);
+ f = fopen(test_file, "r");
+ char vid[20];
+ int values_read = fscanf(f, "%s\n", vid);
+ CV_Assert(values_read == 1);
+ cout << "Benchmarking against " << vid << endl;
+ live = 0;
+ }
+ else
+ {
+ cap.open(0);
+ if (!cap.isOpened())
+ {
+ cout << "Failed to open camera" << endl;
+ return 0;
+ }
+ cout << "Opened camera" << endl;
cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
- cap >> frame;
- }
-
- HybridTrackerParams params;
- // motion model params
- params.motion_model = CvMotionModel::LOW_PASS_FILTER;
- params.low_pass_gain = 0.1f;
- // mean shift params
- params.ms_tracker_weight = 0.8f;
- params.ms_params.tracking_type = CvMeanShiftTrackerParams::HS;
- // feature tracking params
- params.ft_tracker_weight = 0.2f;
- params.ft_params.feature_type = CvFeatureTrackerParams::OPTICAL_FLOW;
- params.ft_params.window_size = 0;
-
- HybridTracker tracker(params);
- char img_file[20] = "seqG/0001.png";
- char img_file_num[10];
- namedWindow("Win", 1);
-
- setMouseCallback("Win", onMouse, 0);
-
- int i = 0;
- float w[4];
- for(;;)
- {
- i++;
- if (live)
- {
- cap >> frame;
+ cap >> frame;
+ }
+
+ HybridTrackerParams params;
+ // motion model params
+ params.motion_model = CvMotionModel::LOW_PASS_FILTER;
+ params.low_pass_gain = 0.1f;
+ // mean shift params
+ params.ms_tracker_weight = 0.8f;
+ params.ms_params.tracking_type = CvMeanShiftTrackerParams::HS;
+ // feature tracking params
+ params.ft_tracker_weight = 0.2f;
+ params.ft_params.feature_type = CvFeatureTrackerParams::OPTICAL_FLOW;
+ params.ft_params.window_size = 0;
+
+ HybridTracker tracker(params);
+ char img_file[20] = "seqG/0001.png";
+ char img_file_num[10];
+ namedWindow("Win", 1);
+
+ setMouseCallback("Win", onMouse, 0);
+
+ int i = 0;
+ float w[4];
+ for(;;)
+ {
+ i++;
+ if (live)
+ {
+ cap >> frame;
if( frame.empty() )
break;
- frame.copyTo(image);
- }
- else
- {
- int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
- CV_Assert(values_read == 5);
- sprintf(img_file, "seqG/%04d.png", i);
- image = imread(img_file, CV_LOAD_IMAGE_COLOR);
+ frame.copyTo(image);
+ }
+ else
+ {
+ int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
+ CV_Assert(values_read == 5);
+ sprintf(img_file, "seqG/%04d.png", i);
+ image = imread(img_file, CV_LOAD_IMAGE_COLOR);
if (image.empty())
- break;
- selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
+ break;
+ selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
cvRound(w[2]*image.cols), cvRound(w[3]*image.rows));
- }
-
- sprintf(img_file_num, "Frame: %d", i);
- putText(image, img_file_num, Point(10, image.rows-20), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
- if (!image.empty())
- {
-
- if (trackObject < 0)
- {
- tracker.newTracker(image, selection);
- trackObject = 1;
- }
-
- if (trackObject)
- {
- tracker.updateTracker(image);
- drawRectangle(&image, tracker.getTrackingWindow());
- }
-
- if (selectObject && selection.width > 0 && selection.height > 0)
- {
- Mat roi(image, selection);
- bitwise_not(roi, roi);
- }
-
- drawRectangle(&image, Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
+ }
+
+ sprintf(img_file_num, "Frame: %d", i);
+ putText(image, img_file_num, Point(10, image.rows-20), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
+ if (!image.empty())
+ {
+
+ if (trackObject < 0)
+ {
+ tracker.newTracker(image, selection);
+ trackObject = 1;
+ }
+
+ if (trackObject)
+ {
+ tracker.updateTracker(image);
+ drawRectangle(&image, tracker.getTrackingWindow());
+ }
+
+ if (selectObject && selection.width > 0 && selection.height > 0)
+ {
+ Mat roi(image, selection);
+ bitwise_not(roi, roi);
+ }
+
+ drawRectangle(&image, Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
cvRound(w[2]*image.cols), cvRound(w[3]*image.rows)));
- imshow("Win", image);
+ imshow("Win", image);
- waitKey(100);
- }
- else
- i = 0;
- }
+ waitKey(100);
+ }
+ else
+ i = 0;
+ }
- fclose(f);
- return 0;
+ fclose(f);
+ return 0;
}
using namespace std;
using namespace cv::flann;
-void help()
+static void help()
{
- cout <<
- "\nThis program shows how to use cv::Mat and IplImages converting back and forth.\n"
- "It shows reading of images, converting to planes and merging back, color conversion\n"
- "and also iterating through pixels.\n"
- "Call:\n"
- "./image [image-name Default: lena.jpg]\n" << endl;
+ cout <<
+ "\nThis program shows how to use cv::Mat and IplImages converting back and forth.\n"
+ "It shows reading of images, converting to planes and merging back, color conversion\n"
+ "and also iterating through pixels.\n"
+ "Call:\n"
+ "./image [image-name Default: lena.jpg]\n" << endl;
}
// enable/disable use of mixed API in the code below.
int main( int argc, char** argv )
{
- help();
+ help();
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#if DEMO_MIXED_API_USE
Ptr<IplImage> iplimg = cvLoadImage(imagename); // Ptr<T> is safe ref-conting pointer class
return -1;
}
#endif
-
+
if( !img.data ) // check if the image has been loaded properly
return -1;
-
+
Mat img_yuv;
cvtColor(img, img_yuv, CV_BGR2YCrCb); // convert image to YUV color space. The output image will be created automatically
-
+
vector<Mat> planes; // Vector is template vector class, similar to STL's vector. It can store matrices too.
split(img_yuv, planes); // split the image into separate color planes
-
+
#if 1
// method 1. process Y plane using an iterator
MatIterator_<uchar> it = planes[0].begin<uchar>(), it_end = planes[0].end<uchar>();
double v = *it*1.7 + rand()%21-10;
*it = saturate_cast<uchar>(v*v/255.);
}
-
+
// method 2. process the first chroma plane using pre-stored row pointer.
// method 3. process the second chroma plane using individual element access
for( int y = 0; y < img_yuv.rows; y++ )
Vxy = saturate_cast<uchar>((Vxy-128)/2 + 128);
}
}
-
+
#else
Mat noise(img.size(), CV_8U); // another Mat constructor; allocates a matrix of the specified size and type
randn(noise, Scalar::all(128), Scalar::all(20)); // fills the matrix with normally distributed random values;
// there is also randu() for uniformly distributed random number generation
GaussianBlur(noise, noise, Size(3, 3), 0.5, 0.5); // blur the noise a bit, kernel size is 3x3 and both sigma's are set to 0.5
-
+
const double brightness_gain = 0;
const double contrast_gain = 1.7;
#if DEMO_MIXED_API_USE
// alternative form of cv::convertScale if we know the datatype at compile time ("uchar" here).
// This expression will not create any temporary arrays and should be almost as fast as the above variant
planes[2] = Mat_<uchar>(planes[2]*color_scale + 128*(1-color_scale));
-
+
// Mat::mul replaces cvMul(). Again, no temporary arrays are created in case of simple expressions.
planes[0] = planes[0].mul(planes[0], 1./255);
#endif
-
+
// now merge the results back
merge(planes, img_yuv);
// and produce the output RGB image
cvtColor(img_yuv, img, CV_YCrCb2BGR);
-
+
// this is counterpart for cvNamedWindow
namedWindow("image with grain", CV_WINDOW_AUTOSIZE);
#if DEMO_MIXED_API_USE
imshow("image with grain", img);
#endif
waitKey();
-
+
return 0;
// all the memory will automatically be released by Vector<>, Mat and Ptr<> destructors.
}
using namespace cv;
-void help(char** av)
+static void help(char** av)
{
cout << "\nThis creates a yaml or xml list of files from the command line args\n"
- "usage:\n./" << av[0] << " imagelist.yaml *.png\n"
+ "usage:\n./" << av[0] << " imagelist.yaml *.png\n"
<< "Try using different extensions.(e.g. yaml yml xml xml.gz etc...)\n"
<< "This will serialize this list of images or whatever with opencv's FileStorage framework" << endl;
}
using namespace cv;
using namespace std;
-void help()
+static void help()
{
cout << "\nCool inpainging demo. Inpainting repairs damage to images by floodfilling the damage \n"
- << "with surrounding image areas.\n"
- "Using OpenCV version %s\n" << CV_VERSION << "\n"
- "Usage:\n"
- "./inpaint [image_name -- Default fruits.jpg]\n" << endl;
+ << "with surrounding image areas.\n"
+ "Using OpenCV version %s\n" << CV_VERSION << "\n"
+ "Usage:\n"
+ "./inpaint [image_name -- Default fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
"\tESC - quit the program\n"
Mat img, inpaintMask;
Point prevPt(-1,-1);
-void onMouse( int event, int x, int y, int flags, void* )
+static void onMouse( int event, int x, int y, int flags, void* )
{
if( event == CV_EVENT_LBUTTONUP || !(flags & CV_EVENT_FLAG_LBUTTON) )
prevPt = Point(-1,-1);
}
help();
-
+
namedWindow( "image", 1 );
img = img0.clone();
return center + Point2f((float)cos(angle), (float)-sin(angle))*(float)R;
}
-void help()
+static void help()
{
- printf( "\nExamle of c calls to OpenCV's Kalman filter.\n"
+ printf( "\nExamle of c calls to OpenCV's Kalman filter.\n"
" Tracking of rotating point.\n"
" Rotation speed is constant.\n"
" Both state and measurements vectors are 1D (a point angle),\n"
" the real and the measured points are connected with red line segment.\n"
" (if Kalman filter works correctly,\n"
" the yellow segment should be shorter than the red one).\n"
- "\n"
+ "\n"
" Pressing any key (except ESC) will reset the tracking with a different speed.\n"
" Pressing ESC will stop the program.\n"
- );
+ );
}
int main(int, char**)
using namespace cv;
using namespace std;
-void help()
-{
- cout << "\nThis program demonstrates kmeans clustering.\n"
- "It generates an image with random points, then assigns a random number of cluster\n"
- "centers and uses kmeans to move those cluster centers to their representitive location\n"
- "Call\n"
- "./kmeans\n" << endl;
-}
+// static void help()
+// {
+// cout << "\nThis program demonstrates kmeans clustering.\n"
+// "It generates an image with random points, then assigns a random number of cluster\n"
+// "centers and uses kmeans to move those cluster centers to their representitive location\n"
+// "Call\n"
+// "./kmeans\n" << endl;
+// }
int main( int /*argc*/, char** /*argv*/ )
{
Scalar(255,0,255),
Scalar(0,255,255)
};
-
+
Mat img(500, 500, CV_8UC3);
RNG rng(12345);
int k, clusterCount = rng.uniform(2, MAX_CLUSTERS+1);
int i, sampleCount = rng.uniform(1, 1001);
Mat points(sampleCount, 1, CV_32FC2), labels;
-
+
clusterCount = MIN(clusterCount, sampleCount);
Mat centers(clusterCount, 1, points.type());
randShuffle(points, 1, &rng);
- kmeans(points, clusterCount, labels,
+ kmeans(points, clusterCount, labels,
TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0),
3, KMEANS_PP_CENTERS, centers);
using namespace cv;
using namespace std;
-void help()
+static void help()
{
- cout <<
- "\nThis program demonstrates Laplace point/edge detection using OpenCV function Laplacian()\n"
- "It captures from the camera of your choice: 0, 1, ... default 0\n"
- "Call:\n"
- "./laplace [camera #, default 0]\n" << endl;
+ cout <<
+ "\nThis program demonstrates Laplace point/edge detection using OpenCV function Laplacian()\n"
+ "It captures from the camera of your choice: 0, 1, ... default 0\n"
+ "Call:\n"
+ "./laplace [camera #, default 0]\n" << endl;
}
int sigma = 3;
createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 );
Mat smoothed, laplace, result;
-
+
for(;;)
{
Mat frame;
blur(frame, smoothed, Size(ksize, ksize));
else
medianBlur(frame, smoothed, ksize);
-
+
Laplacian(smoothed, laplace, CV_16S, 5);
convertScaleAbs(laplace, result, (sigma+1)*0.25);
imshow("Laplacian", result);
#include <dirent.h>
#endif
-#ifdef HAVE_CVCONFIG_H
-#include <cvconfig.h>
+#ifdef HAVE_CVCONFIG_H
+#include <cvconfig.h>
#endif
#ifdef HAVE_TBB
using namespace std;
using namespace cv;
-void help()
+static void help()
{
cout << "This program demonstrated the use of the latentSVM detector." << endl <<
"It reads in a trained object models and then uses them to detect the objects in an images." << endl <<
endl;
}
-void detectAndDrawObjects( Mat& image, LatentSvmDetector& detector, const vector<Scalar>& colors, float overlapThreshold, int numThreads )
+static void detectAndDrawObjects( Mat& image, LatentSvmDetector& detector, const vector<Scalar>& colors, float overlapThreshold, int numThreads )
{
vector<LatentSvmDetector::ObjectDetection> detections;
}
}
-void readDirectory( const string& directoryName, vector<string>& filenames, bool addDirectoryName=true )
+static void readDirectory( const string& directoryName, vector<string>& filenames, bool addDirectoryName=true )
{
filenames.clear();
struct _finddata_t s_file;
string str = directoryName + "\\*.*";
- intptr_t h_file = _findfirst( str.c_str(), &s_file );
- if( h_file != static_cast<intptr_t>(-1.0) )
+ intptr_t h_file = _findfirst( str.c_str(), &s_file );
+ if( h_file != static_cast<intptr_t>(-1.0) )
{
do
{
int main(int argc, char* argv[])
{
- help();
+ help();
string images_folder, models_folder;
float overlapThreshold = 0.2f;
int numThreads = -1;
if( argc > 2 )
- {
+ {
images_folder = argv[1];
models_folder = argv[2];
if( argc > 3 ) overlapThreshold = (float)atof(argv[3]);
}
if( argc > 4 ) numThreads = atoi(argv[4]);
- }
+ }
vector<string> images_filenames, models_filenames;
readDirectory( images_folder, images_filenames );
exit(0);
}
}
-
- return 0;
+
+ return 0;
}
*/
-void help()
+static void help()
{
- printf("\nThe sample demonstrates how to train Random Trees classifier\n"
- "(or Boosting classifier, or MLP, or Knearest, or Nbayes, or Support Vector Machines - see main()) using the provided dataset.\n"
- "\n"
- "We use the sample database letter-recognition.data\n"
- "from UCI Repository, here is the link:\n"
- "\n"
- "Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\n"
- "UCI Repository of machine learning databases\n"
- "[http://www.ics.uci.edu/~mlearn/MLRepository.html].\n"
- "Irvine, CA: University of California, Department of Information and Computer Science.\n"
- "\n"
- "The dataset consists of 20000 feature vectors along with the\n"
- "responses - capital latin letters A..Z.\n"
- "The first 16000 (10000 for boosting)) samples are used for training\n"
- "and the remaining 4000 (10000 for boosting) - to test the classifier.\n"
- "======================================================\n");
+ printf("\nThe sample demonstrates how to train Random Trees classifier\n"
+ "(or Boosting classifier, or MLP, or Knearest, or Nbayes, or Support Vector Machines - see main()) using the provided dataset.\n"
+ "\n"
+ "We use the sample database letter-recognition.data\n"
+ "from UCI Repository, here is the link:\n"
+ "\n"
+ "Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\n"
+ "UCI Repository of machine learning databases\n"
+ "[http://www.ics.uci.edu/~mlearn/MLRepository.html].\n"
+ "Irvine, CA: University of California, Department of Information and Computer Science.\n"
+ "\n"
+ "The dataset consists of 20000 feature vectors along with the\n"
+ "responses - capital latin letters A..Z.\n"
+ "The first 16000 (10000 for boosting)) samples are used for training\n"
+ "and the remaining 4000 (10000 for boosting) - to test the classifier.\n"
+ "======================================================\n");
printf("\nThis is letter recognition sample.\n"
"The usage: letter_recog [-data <path to letter-recognition.data>] \\\n"
" [-save <output XML file for the classifier>] \\\n"
}
temp_sample = cvCreateMat( 1, var_count + 1, CV_32F );
- weak_responses = cvCreateMat( 1, boost.get_weak_predictors()->total, CV_32F );
+ weak_responses = cvCreateMat( 1, boost.get_weak_predictors()->total, CV_32F );
// compute prediction error on train and test data
for( i = 0; i < nsamples_all; i++ )
}
}
- printf("true_resp = %f%%\tavg accuracy = %f%%\n", (float)true_resp / (nsamples_all - ntrain_samples) * 100,
+ printf("true_resp = %f%%\tavg accuracy = %f%%\n", (float)true_resp / (nsamples_all - ntrain_samples) * 100,
(float)accuracy / (nsamples_all - ntrain_samples) / K * 100);
delete[] true_results;
for (int j = ntrain_samples; j < nsamples_all; j++)
{
float *s = data->data.fl + j * var_count;
-
+
for (int i = 0; i < var_count; i++)
- {
+ {
sample.data.fl[(j - ntrain_samples) * var_count + i] = s[i];
}
true_results[j - ntrain_samples] = responses->data.fl[j];
}
CvMat *result = cvCreateMat(1, nsamples_all - ntrain_samples, CV_32FC1);
-
+
printf("Classification (may take a few minutes)...\n");
svm.predict(&sample, result);
if (result->data.fl[i] == true_results[i])
true_resp++;
}
-
+
printf("true_resp = %f%%\n", (float)true_resp / (nsamples_all - ntrain_samples) * 100);
-
+
cvReleaseMat( &train_resp );
cvReleaseMat( &result );
cvReleaseMat( &data );
method = 2;
}
else if ( strcmp(argv[i], "-knearest") == 0)
- {
- method = 3;
- }
- else if ( strcmp(argv[i], "-nbayes") == 0)
- {
- method = 4;
- }
- else if ( strcmp(argv[i], "-svm") == 0)
- {
- method = 5;
- }
+ {
+ method = 3;
+ }
+ else if ( strcmp(argv[i], "-nbayes") == 0)
+ {
+ method = 4;
+ }
+ else if ( strcmp(argv[i], "-svm") == 0)
+ {
+ method = 5;
+ }
else
break;
}
build_svm_classifier( data_filename ):
-1) < 0)
{
- help();
+ help();
}
return 0;
}
// Function prototypes
void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f);
-std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
+std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& mask, cv::Mat& dst);
-void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
+void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& dst);
-void drawResponse(const std::vector<cv::linemod::Template>& templates,
+void drawResponse(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Mat& dst, cv::Point offset, int T);
cv::Mat displayQuantized(const cv::Mat& quantized);
m_x = a_x;
m_y = a_y;
}
-
+
static int m_event;
static int m_x;
static int m_y;
int Mouse::m_x;
int Mouse::m_y;
-void help()
+static void help()
{
printf("Usage: openni_demo [templates.yml]\n\n"
"Place your object on a planar, featureless surface. With the mouse,\n"
};
// Functions to store detector and templates in single XML/YAML file
-cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
+static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
{
cv::Ptr<cv::linemod::Detector> detector = new cv::linemod::Detector;
cv::FileStorage fs(filename, cv::FileStorage::READ);
return detector;
}
-void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
+static void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
{
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
detector->write(fs);
capture.grab();
capture.retrieve(depth, CV_CAP_OPENNI_DEPTH_MAP);
capture.retrieve(color, CV_CAP_OPENNI_BGR_IMAGE);
-
+
std::vector<cv::Mat> sources;
sources.push_back(color);
sources.push_back(depth);
subtractPlane(depth, mask, chain, focal_length);
cv::imshow("mask", mask);
-
+
// Extract template
std::string class_id = cv::format("class%d", num_classes);
cv::Rect bb;
int classes_visited = 0;
std::set<std::string> visited;
-
+
for (int i = 0; (i < (int)matches.size()) && (classes_visited < num_classes); ++i)
{
cv::linemod::Match m = matches[i];
printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
}
-
+
// Draw matching template
const std::vector<cv::linemod::Template>& templates = detector->getTemplates(m.class_id, m.template_id);
drawResponse(templates, num_modalities, display, cv::Point(m.x, m.y), detector->getT(0));
{
/// @todo Online learning possibly broken by new gradient feature extraction,
/// which assumes an accurate object outline.
-
+
// Compute masks based on convex hull of matched template
cv::Mat color_mask, depth_mask;
std::vector<CvPoint> chain = maskFromTemplate(templates, num_modalities,
return 0;
}
-void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
+static void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
{
real.resize(proj.size());
double f_inv = 1.0 / f;
-
+
for (int i = 0; i < (int)proj.size(); ++i)
{
double Z = proj[i].z;
}
}
-void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
+static void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
{
const int l_num_cost_pts = 200;
filterPlane(&depth_ipl, tmp, chain, f);
}
-std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
+std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& mask, cv::Mat& dst)
{
{
const uchar* quant_r = quantized.ptr(r);
cv::Vec3b* color_r = color.ptr<cv::Vec3b>(r);
-
+
for (int c = 0; c < quantized.cols; ++c)
{
cv::Vec3b& bgr = color_r[c];
}
}
}
-
+
return color;
}
// Adapted from cv_line_template::convex_hull
-void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
+void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Point offset, cv::Size size,
cv::Mat& dst)
{
points.push_back(cv::Point(f.x, f.y) + offset);
}
}
-
+
std::vector<cv::Point> hull;
cv::convexHull(points, hull);
cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
}
-void drawResponse(const std::vector<cv::linemod::Template>& templates,
+void drawResponse(const std::vector<cv::linemod::Template>& templates,
int num_modalities, cv::Mat& dst, cv::Point offset, int T)
{
static const cv::Scalar COLORS[5] = { CV_RGB(0, 0, 255),
// box around it and chose the display color based on that response. Here
// the display color just depends on the modality.
cv::Scalar color = COLORS[m];
-
+
for (int i = 0; i < (int)templates[m].features.size(); ++i)
{
cv::linemod::Feature f = templates[m].features[i];
using namespace cv;
using namespace std;
-void help()
+static void help()
{
// print a welcome message, and the OpenCV version
cout << "\nThis is a demo of Lukas-Kanade optical flow lkdemo(),\n"
- "Using OpenCV version %s\n" << CV_VERSION << "\n"
- << endl;
+ "Using OpenCV version %s\n" << CV_VERSION << "\n"
+ << endl;
cout << "\nHot keys: \n"
"\tESC - quit the program\n"
"To add/remove a feature point click it\n" << endl;
}
-Point2f pt;
+Point2f point;
bool addRemovePt = false;
-void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ )
+static void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ )
{
if( event == CV_EVENT_LBUTTONDOWN )
{
- pt = Point2f((float)x,(float)y);
+ point = Point2f((float)x,(float)y);
addRemovePt = true;
}
}
VideoCapture cap;
TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);
Size subPixWinSize(10,10), winSize(31,31);
-
+
const int MAX_COUNT = 500;
bool needToInit = false;
bool nightMode = false;
-
+
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
cap.open(argc == 2 ? argv[1][0] - '0' : 0);
else if( argc == 2 )
Mat gray, prevGray, image;
vector<Point2f> points[2];
-
+
for(;;)
{
Mat frame;
break;
frame.copyTo(image);
- cvtColor(image, gray, CV_BGR2GRAY);
+ cvtColor(image, gray, CV_BGR2GRAY);
if( nightMode )
image = Scalar::all(0);
{
if( addRemovePt )
{
- if( norm(pt - points[1][i]) <= 5 )
+ if( norm(point - points[1][i]) <= 5 )
{
addRemovePt = false;
continue;
if( addRemovePt && points[1].size() < (size_t)MAX_COUNT )
{
vector<Point2f> tmp;
- tmp.push_back(pt);
+ tmp.push_back(point);
cornerSubPix( gray, tmp, winSize, cvSize(-1,-1), termcrit);
points[1].push_back(tmp[0]);
addRemovePt = false;
default:
;
}
-
+
std::swap(points[1], points[0]);
swap(prevGray, gray);
}
using namespace cv;
using namespace std;
-void help()
+static void help()
{
cout << "LogPolar Blind Spot Model sample.\nShortcuts:"
"\n\tn for nearest pixel technique"
"\n\ta for adjacent receptive fields"
"\n\tq or ESC quit\n";
}
-
+
int main(int argc, char** argv)
{
Mat img = imread(argc > 1 ? argv[1] : "lena.jpg",1); // open the image
return 0;
}
help();
-
+
Size s=img.size();
int w=s.width, h=s.height;
int ro0=3; //radius of the blind spot
- int R=120; //number of rings
-
+ int R=120; //number of rings
+
//Creation of the four different objects that implement the four log-polar transformations
//Off-line computation
Point2i center(w/2,h/2);
Retinal=nearest.to_cartesian(Cortical);
}else if (wk=='b'){
Cortical=bilin.to_cortical(img);
- Retinal=bilin.to_cartesian(Cortical);
+ Retinal=bilin.to_cartesian(Cortical);
}else if (wk=='o'){
Cortical=overlap.to_cortical(img);
- Retinal=overlap.to_cartesian(Cortical);
+ Retinal=overlap.to_cartesian(Cortical);
}else if (wk=='a'){
Cortical=adj.to_cortical(img);
- Retinal=adj.to_cartesian(Cortical);
+ Retinal=adj.to_cartesian(Cortical);
}
imshow("Cartesian", img);
using namespace cv;
-void help()
+static void help()
{
- printf("\nThis program demonstrates using features2d detector, descriptor extractor and simple matcher\n"
- "Using the SURF desriptor:\n"
- "\n"
- "Usage:\n matcher_simple <image1> <image2>\n");
+ printf("\nThis program demonstrates using features2d detector, descriptor extractor and simple matcher\n"
+ "Using the SURF desriptor:\n"
+ "\n"
+ "Usage:\n matcher_simple <image1> <image2>\n");
}
int main(int argc, char** argv)
{
- if(argc != 3)
- {
- help();
- return -1;
- }
-
- Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
- Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
- if(img1.empty() || img2.empty())
- {
- printf("Can't read one of the images\n");
- return -1;
- }
-
- // detecting keypoints
- SurfFeatureDetector detector(400);
- vector<KeyPoint> keypoints1, keypoints2;
- detector.detect(img1, keypoints1);
- detector.detect(img2, keypoints2);
-
- // computing descriptors
- SurfDescriptorExtractor extractor;
- Mat descriptors1, descriptors2;
- extractor.compute(img1, keypoints1, descriptors1);
- extractor.compute(img2, keypoints2, descriptors2);
-
- // matching descriptors
- BFMatcher matcher(NORM_L2);
+ if(argc != 3)
+ {
+ help();
+ return -1;
+ }
+
+ Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
+ if(img1.empty() || img2.empty())
+ {
+ printf("Can't read one of the images\n");
+ return -1;
+ }
+
+ // detecting keypoints
+ SurfFeatureDetector detector(400);
+ vector<KeyPoint> keypoints1, keypoints2;
+ detector.detect(img1, keypoints1);
+ detector.detect(img2, keypoints2);
+
+ // computing descriptors
+ SurfDescriptorExtractor extractor;
+ Mat descriptors1, descriptors2;
+ extractor.compute(img1, keypoints1, descriptors1);
+ extractor.compute(img2, keypoints2, descriptors2);
+
+ // matching descriptors
+ BFMatcher matcher(NORM_L2);
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
- // drawing the results
- namedWindow("matches", 1);
- Mat img_matches;
- drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
- imshow("matches", img_matches);
- waitKey(0);
+ // drawing the results
+ namedWindow("matches", 1);
+ Mat img_matches;
+ drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
+ imshow("matches", img_matches);
+ waitKey(0);
- return 0;
+ return 0;
}
const string defaultFileWithTrainImages = "../../opencv/samples/cpp/matching_to_many_images/train/trainImages.txt";
const string defaultDirToSaveResImages = "../../opencv/samples/cpp/matching_to_many_images/results";
-void printPrompt( const string& applName )
+static void printPrompt( const string& applName )
{
cout << "/*\n"
<< " * This is a sample on matching descriptors detected on one image to descriptors detected in image set.\n"
<< defaultQueryImageName << " " << defaultFileWithTrainImages << " " << defaultDirToSaveResImages << endl;
}
-void maskMatchesByTrainImgIdx( const vector<DMatch>& matches, int trainImgIdx, vector<char>& mask )
+static void maskMatchesByTrainImgIdx( const vector<DMatch>& matches, int trainImgIdx, vector<char>& mask )
{
mask.resize( matches.size() );
fill( mask.begin(), mask.end(), 0 );
}
}
-void readTrainFilenames( const string& filename, string& dirName, vector<string>& trainFilenames )
+static void readTrainFilenames( const string& filename, string& dirName, vector<string>& trainFilenames )
{
trainFilenames.clear();
file.close();
}
-bool createDetectorDescriptorMatcher( const string& detectorType, const string& descriptorType, const string& matcherType,
+static bool createDetectorDescriptorMatcher( const string& detectorType, const string& descriptorType, const string& matcherType,
Ptr<FeatureDetector>& featureDetector,
Ptr<DescriptorExtractor>& descriptorExtractor,
Ptr<DescriptorMatcher>& descriptorMatcher )
return isCreated;
}
-bool readImages( const string& queryImageName, const string& trainFilename,
+static bool readImages( const string& queryImageName, const string& trainFilename,
Mat& queryImage, vector <Mat>& trainImages, vector<string>& trainImageNames )
{
cout << "< Reading the images..." << endl;
return true;
}
-void detectKeypoints( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+static void detectKeypoints( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
const vector<Mat>& trainImages, vector<vector<KeyPoint> >& trainKeypoints,
Ptr<FeatureDetector>& featureDetector )
{
cout << ">" << endl;
}
-void computeDescriptors( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, Mat& queryDescriptors,
+static void computeDescriptors( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, Mat& queryDescriptors,
const vector<Mat>& trainImages, vector<vector<KeyPoint> >& trainKeypoints, vector<Mat>& trainDescriptors,
Ptr<DescriptorExtractor>& descriptorExtractor )
{
cout << "< Computing descriptors for keypoints..." << endl;
descriptorExtractor->compute( queryImage, queryKeypoints, queryDescriptors );
descriptorExtractor->compute( trainImages, trainKeypoints, trainDescriptors );
-
+
int totalTrainDesc = 0;
for( vector<Mat>::const_iterator tdIter = trainDescriptors.begin(); tdIter != trainDescriptors.end(); tdIter++ )
totalTrainDesc += tdIter->rows;
cout << ">" << endl;
}
-void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
+static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
vector<DMatch>& matches, Ptr<DescriptorMatcher>& descriptorMatcher )
{
cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;
double matchTime = tm.getTimeMilli();
CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );
-
+
cout << "Number of matches: " << matches.size() << endl;
cout << "Build time: " << buildTime << " ms; Match time: " << matchTime << " ms" << endl;
cout << ">" << endl;
}
-void saveResultImages( const Mat& queryImage, const vector<KeyPoint>& queryKeypoints,
+static void saveResultImages( const Mat& queryImage, const vector<KeyPoint>& queryKeypoints,
const vector<Mat>& trainImages, const vector<vector<KeyPoint> >& trainKeypoints,
const vector<DMatch>& matches, const vector<string>& trainImagesNames, const string& resultDir )
{
using namespace cv;
using namespace std;
-void help(char** argv)
+static void help(char** argv)
{
- cout << "\nDemonstrate mean-shift based color segmentation in spatial pyramid.\n"
+ cout << "\nDemonstrate mean-shift based color segmentation in spatial pyramid.\n"
<< "Call:\n " << argv[0] << " image\n"
<< "This program allows you to set the spatial and color radius\n"
<< "of the mean shift window as well as the number of pyramid reduction levels explored\n"
}
//This colors the segmentations
-void floodFillPostprocess( Mat& img, const Scalar& colorDiff=Scalar::all(1) )
+static void floodFillPostprocess( Mat& img, const Scalar& colorDiff=Scalar::all(1) )
{
CV_Assert( !img.empty() );
RNG rng = theRNG();
int spatialRad, colorRad, maxPyrLevel;
Mat img, res;
-void meanShiftSegmentation( int, void* )
+static void meanShiftSegmentation( int, void* )
{
cout << "spatialRad=" << spatialRad << "; "
<< "colorRad=" << colorRad << "; "
{
if( argc !=2 )
{
- help(argv);
+ help(argv);
return -1;
}
using namespace cv;
using namespace std;
-void help()
+static void help()
{
- cout << "This program demonstrates finding the minimum enclosing box or circle of a set\n"
+ cout << "This program demonstrates finding the minimum enclosing box or circle of a set\n"
"of points using functions: minAreaRect() minEnclosingCircle().\n"
"Random points are generated and then enclosed.\n"
"Call:\n"
help();
Mat img(500, 500, CV_8UC3);
- RNG& rng = theRNG();
+ RNG& rng = theRNG();
for(;;)
{
Point pt;
pt.x = rng.uniform(img.cols/4, img.cols*3/4);
pt.y = rng.uniform(img.rows/4, img.rows*3/4);
-
+
points.push_back(pt);
}
-
+
RotatedRect box = minAreaRect(Mat(points));
Point2f center, vtx[4];
float radius = 0;
minEnclosingCircle(Mat(points), center, radius);
box.points(vtx);
-
+
img = Scalar::all(0);
for( i = 0; i < count; i++ )
circle( img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA );
for( i = 0; i < 4; i++ )
line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, CV_AA);
-
- circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, CV_AA);
+
+ circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, CV_AA);
imshow( "rect & circle", img );
using namespace cv;
-void help()
+static void help()
{
printf("\nShow off image morphology: erosion, dialation, open and close\n"
- "Call:\n morphology2 [image]\n"
- "This program also shows use of rect, elipse and cross kernels\n\n");
+ "Call:\n morphology2 [image]\n"
+ "This program also shows use of rect, elipse and cross kernels\n\n");
printf( "Hot keys: \n"
"\tESC - quit the program\n"
"\tr - use rectangle structuring element\n"
int erode_dilate_pos = 0;
// callback function for open/close trackbar
-void OpenClose(int, void*)
+static void OpenClose(int, void*)
{
int n = open_close_pos - max_iters;
int an = n > 0 ? n : -n;
}
// callback function for erode/dilate trackbar
-void ErodeDilate(int, void*)
+static void ErodeDilate(int, void*)
{
int n = erode_dilate_pos - max_iters;
int an = n > 0 ? n : -n;
using namespace std;
using namespace cv;
-void help()
+static void help()
{
cout << "\nThis program demonstrates the multi cascade recognizer. It is a generalization of facedetect sample.\n\n"
"Usage: ./multicascadeclassifier \n"
using namespace cv;
using namespace std;
-void help()
+static void help()
{
cout << "\nThis program demonstrates usage of depth sensors (Kinect, XtionPRO,...).\n"
"The user gets some of the supported output images.\n"
<< endl;
}
-void colorizeDisparity( const Mat& gray, Mat& rgb, double maxDisp=-1.f, float S=1.f, float V=1.f )
+static void colorizeDisparity( const Mat& gray, Mat& rgb, double maxDisp=-1.f, float S=1.f, float V=1.f )
{
CV_Assert( !gray.empty() );
CV_Assert( gray.type() == CV_8UC1 );
float t = V * (1 - (1 - f) * S);
Point3f res;
-
- if( hi == 0 ) //R = V, G = t, B = p
+
+ if( hi == 0 ) //R = V, G = t, B = p
res = Point3f( p, t, V );
- if( hi == 1 ) // R = q, G = V, B = p
+ if( hi == 1 ) // R = q, G = V, B = p
res = Point3f( p, V, q );
- if( hi == 2 ) // R = p, G = V, B = t
+ if( hi == 2 ) // R = p, G = V, B = t
res = Point3f( t, V, p );
- if( hi == 3 ) // R = p, G = q, B = V
+ if( hi == 3 ) // R = p, G = q, B = V
res = Point3f( V, q, p );
- if( hi == 4 ) // R = t, G = p, B = V
+ if( hi == 4 ) // R = t, G = p, B = V
res = Point3f( V, p, t );
- if( hi == 5 ) // R = V, G = p, B = q
+ if( hi == 5 ) // R = V, G = p, B = q
res = Point3f( q, p, V );
uchar b = (uchar)(std::max(0.f, std::min (res.x, 1.f)) * 255.f);
uchar g = (uchar)(std::max(0.f, std::min (res.y, 1.f)) * 255.f);
uchar r = (uchar)(std::max(0.f, std::min (res.z, 1.f)) * 255.f);
- rgb.at<Point3_<uchar> >(y,x) = Point3_<uchar>(b, g, r);
+ rgb.at<Point3_<uchar> >(y,x) = Point3_<uchar>(b, g, r);
}
}
}
-float getMaxDisparity( VideoCapture& capture )
+static float getMaxDisparity( VideoCapture& capture )
{
const int minDistance = 400; // mm
float b = (float)capture.get( CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE ); // mm
return b * F / minDistance;
}
-void printCommandLineParams()
+static void printCommandLineParams()
{
cout << "-cd Colorized disparity? (0 or 1; 1 by default) Ignored if disparity map is not selected to show." << endl;
cout << "-fmd Fixed max disparity? (0 or 1; 0 by default) Ignored if disparity map is not colorized (-cd 0)." << endl;
cout << "-r Filename of .oni video file. The data will grabbed from it." << endl ;
}
-void parseCommandLine( int argc, char* argv[], bool& isColorizeDisp, bool& isFixedMaxDisp, int& imageMode, bool retrievedImageFlags[],
+static void parseCommandLine( int argc, char* argv[], bool& isColorizeDisp, bool& isFixedMaxDisp, int& imageMode, bool retrievedImageFlags[],
string& filename, bool& isFileReading )
{
// set defaut values
int val = atoi(mask.c_str());
int l = 100000, r = 10000, sum = 0;
- for( int i = 0; i < 5; i++ )
+ for( int j = 0; j < 5; j++ )
{
- retrievedImageFlags[i] = ((val % l) / r ) == 0 ? false : true;
+ retrievedImageFlags[j] = ((val % l) / r ) == 0 ? false : true;
l /= 10; r /= 10;
- if( retrievedImageFlags[i] ) sum++;
+ if( retrievedImageFlags[j] ) sum++;
}
if( sum == 0 )
using namespace cv;
using namespace std;
-void help()
-{
- printf(
- "\nDemonstrate the use of the HoG descriptor using\n"
- " HOGDescriptor::hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());\n"
- "Usage:\n"
- "./peopledetect (<image_filename> | <image_list>.txt)\n\n");
-}
+// static void help()
+// {
+// printf(
+// "\nDemonstrate the use of the HoG descriptor using\n"
+// " HOGDescriptor::hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());\n"
+// "Usage:\n"
+// "./peopledetect (<image_filename> | <image_list>.txt)\n\n");
+// }
int main(int argc, char** argv)
{
if( img.data )
{
- strcpy(_filename, argv[1]);
+ strcpy(_filename, argv[1]);
}
else
{
f = fopen(argv[1], "rt");
if(!f)
{
- fprintf( stderr, "ERROR: the specified file could not be loaded\n");
- return -1;
- }
+ fprintf( stderr, "ERROR: the specified file could not be loaded\n");
+ return -1;
+ }
}
HOGDescriptor hog;
for(;;)
{
- char* filename = _filename;
- if(f)
- {
- if(!fgets(filename, (int)sizeof(_filename)-2, f))
- break;
- //while(*filename && isspace(*filename))
- // ++filename;
- if(filename[0] == '#')
- continue;
- int l = (int)strlen(filename);
- while(l > 0 && isspace(filename[l-1]))
- --l;
- filename[l] = '\0';
- img = imread(filename);
- }
- printf("%s:\n", filename);
- if(!img.data)
- continue;
-
- fflush(stdout);
- vector<Rect> found, found_filtered;
- double t = (double)getTickCount();
- // run the detector with default parameters. to get a higher hit-rate
- // (and more false alarms, respectively), decrease the hitThreshold and
- // groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
- hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
- t = (double)getTickCount() - t;
- printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
- size_t i, j;
- for( i = 0; i < found.size(); i++ )
- {
- Rect r = found[i];
- for( j = 0; j < found.size(); j++ )
- if( j != i && (r & found[j]) == r)
- break;
- if( j == found.size() )
- found_filtered.push_back(r);
- }
- for( i = 0; i < found_filtered.size(); i++ )
- {
- Rect r = found_filtered[i];
- // the HOG detector returns slightly larger rectangles than the real objects.
- // so we slightly shrink the rectangles to get a nicer output.
- r.x += cvRound(r.width*0.1);
- r.width = cvRound(r.width*0.8);
- r.y += cvRound(r.height*0.07);
- r.height = cvRound(r.height*0.8);
- rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
- }
- imshow("people detector", img);
- int c = waitKey(0) & 255;
- if( c == 'q' || c == 'Q' || !f)
+ char* filename = _filename;
+ if(f)
+ {
+ if(!fgets(filename, (int)sizeof(_filename)-2, f))
+ break;
+ //while(*filename && isspace(*filename))
+ // ++filename;
+ if(filename[0] == '#')
+ continue;
+ int l = (int)strlen(filename);
+ while(l > 0 && isspace(filename[l-1]))
+ --l;
+ filename[l] = '\0';
+ img = imread(filename);
+ }
+ printf("%s:\n", filename);
+ if(!img.data)
+ continue;
+
+ fflush(stdout);
+ vector<Rect> found, found_filtered;
+ double t = (double)getTickCount();
+ // run the detector with default parameters. to get a higher hit-rate
+ // (and more false alarms, respectively), decrease the hitThreshold and
+ // groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
+ hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
+ t = (double)getTickCount() - t;
+ printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
+ size_t i, j;
+ for( i = 0; i < found.size(); i++ )
+ {
+ Rect r = found[i];
+ for( j = 0; j < found.size(); j++ )
+ if( j != i && (r & found[j]) == r)
+ break;
+ if( j == found.size() )
+ found_filtered.push_back(r);
+ }
+ for( i = 0; i < found_filtered.size(); i++ )
+ {
+ Rect r = found_filtered[i];
+ // the HOG detector returns slightly larger rectangles than the real objects.
+ // so we slightly shrink the rectangles to get a nicer output.
+ r.x += cvRound(r.width*0.1);
+ r.width = cvRound(r.width*0.8);
+ r.y += cvRound(r.height*0.07);
+ r.height = cvRound(r.height*0.8);
+ rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
+ }
+ imshow("people detector", img);
+ int c = waitKey(0) & 255;
+ if( c == 'q' || c == 'Q' || !f)
break;
}
if(f)
\r
bool stop = false;\r
\r
-void mouseCallback(int event, int x, int y, int flags, void* userdata)\r
+static void mouseCallback(int event, int x, int y, int flags, void* userdata)\r
{\r
if (stop)\r
return;\r
renderer->onMouseEvent(event, x, y, flags);\r
}\r
\r
-void openGlDrawCallback(void* userdata)\r
+static void openGlDrawCallback(void* userdata)\r
{\r
if (stop)\r
return;\r
mouse_dy_ = clamp(mouse_dy_, -mouseClamp, mouseClamp);\r
}\r
\r
-Point3d rotate(Point3d v, double yaw, double pitch)\r
+static Point3d rotate(Point3d v, double yaw, double pitch)\r
{\r
Point3d t1;\r
t1.x = v.x * cos(-yaw / 180.0 * CV_PI) - v.z * sin(-yaw / 180.0 * CV_PI);\r
#define _ANN_ 0 // artificial neural networks
#define _EM_ 0 // expectation-maximization
-void on_mouse( int event, int x, int y, int /*flags*/, void* )
+static void on_mouse( int event, int x, int y, int /*flags*/, void* )
{
if( img.empty() )
return;
}
}
-void prepare_train_data( Mat& samples, Mat& classes )
+static void prepare_train_data( Mat& samples, Mat& classes )
{
Mat( trainedPoints ).copyTo( samples );
Mat( trainedPointsMarkers ).copyTo( classes );
}
#if _NBC_
-void find_decision_boundary_NBC()
+static void find_decision_boundary_NBC()
{
img.copyTo( imgDst );
#if _KNN_
-void find_decision_boundary_KNN( int K )
+static void find_decision_boundary_KNN( int K )
{
img.copyTo( imgDst );
#endif
#if _SVM_
-void find_decision_boundary_SVM( CvSVMParams params )
+static void find_decision_boundary_SVM( CvSVMParams params )
{
img.copyTo( imgDst );
#endif
#if _DT_
-void find_decision_boundary_DT()
+static void find_decision_boundary_DT()
{
img.copyTo( imgDst );
#include "opencv2/opencv.hpp"
-void help(std::string errorMessage)
+static void help(std::string errorMessage)
{
- std::cout<<"Program init error : "<<errorMessage<<std::endl;
- std::cout<<"\nProgram call procedure : retinaDemo [processing mode] [Optional : media target] [Optional LAST parameter: \"log\" to activate retina log sampling]"<<std::endl;
- std::cout<<"\t[processing mode] :"<<std::endl;
- std::cout<<"\t -image : for still image processing"<<std::endl;
- std::cout<<"\t -video : for video stream processing"<<std::endl;
- std::cout<<"\t[Optional : media target] :"<<std::endl;
- std::cout<<"\t if processing an image or video file, then, specify the path and filename of the target to process"<<std::endl;
- std::cout<<"\t leave empty if processing video stream coming from a connected video device"<<std::endl;
- std::cout<<"\t[Optional : activate retina log sampling] : an optional last parameter can be specified for retina spatial log sampling"<<std::endl;
- std::cout<<"\t set \"log\" without quotes to activate this sampling, output frame size will be divided by 4"<<std::endl;
- std::cout<<"\nExamples:"<<std::endl;
- std::cout<<"\t-Image processing : ./retinaDemo -image lena.jpg"<<std::endl;
- std::cout<<"\t-Image processing with log sampling : ./retinaDemo -image lena.jpg log"<<std::endl;
- std::cout<<"\t-Video processing : ./retinaDemo -video myMovie.mp4"<<std::endl;
- std::cout<<"\t-Live video processing : ./retinaDemo -video"<<std::endl;
- std::cout<<"\nPlease start again with new parameters"<<std::endl;
+ std::cout<<"Program init error : "<<errorMessage<<std::endl;
+ std::cout<<"\nProgram call procedure : retinaDemo [processing mode] [Optional : media target] [Optional LAST parameter: \"log\" to activate retina log sampling]"<<std::endl;
+ std::cout<<"\t[processing mode] :"<<std::endl;
+ std::cout<<"\t -image : for still image processing"<<std::endl;
+ std::cout<<"\t -video : for video stream processing"<<std::endl;
+ std::cout<<"\t[Optional : media target] :"<<std::endl;
+ std::cout<<"\t if processing an image or video file, then, specify the path and filename of the target to process"<<std::endl;
+ std::cout<<"\t leave empty if processing video stream coming from a connected video device"<<std::endl;
+ std::cout<<"\t[Optional : activate retina log sampling] : an optional last parameter can be specified for retina spatial log sampling"<<std::endl;
+ std::cout<<"\t set \"log\" without quotes to activate this sampling, output frame size will be divided by 4"<<std::endl;
+ std::cout<<"\nExamples:"<<std::endl;
+ std::cout<<"\t-Image processing : ./retinaDemo -image lena.jpg"<<std::endl;
+ std::cout<<"\t-Image processing with log sampling : ./retinaDemo -image lena.jpg log"<<std::endl;
+ std::cout<<"\t-Video processing : ./retinaDemo -video myMovie.mp4"<<std::endl;
+ std::cout<<"\t-Live video processing : ./retinaDemo -video"<<std::endl;
+ std::cout<<"\nPlease start again with new parameters"<<std::endl;
}
int main(int argc, char* argv[]) {
- // welcome message
- std::cout<<"****************************************************"<<std::endl;
- std::cout<<"* Retina demonstration : demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
- std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
- std::cout<<"* As a summary, these are the retina model properties:"<<std::endl;
- std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
- std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
- std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
- std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
- std::cout<<"* for more information, reer to the following papers :"<<std::endl;
- std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
- std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
- std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
- std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
- std::cout<<"****************************************************"<<std::endl;
- std::cout<<" NOTE : this program generates the default retina parameters file 'RetinaDefaultParameters.xml'"<<std::endl;
- std::cout<<" => you can use this to fine tune parameters and load them if you save to file 'RetinaSpecificParameters.xml'"<<std::endl;
-
- // basic input arguments checking
- if (argc<2)
- {
- help("bad number of parameter");
- return -1;
- }
-
- bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
-
- std::string inputMediaType=argv[1];
-
- // declare the retina input buffer... that will be fed differently in regard of the input media
- cv::Mat inputFrame;
- cv::VideoCapture videoCapture; // in case a video media is used, its manager is declared here
-
- //////////////////////////////////////////////////////////////////////////////
- // checking input media type (still image, video file, live video acquisition)
- if (!strcmp(inputMediaType.c_str(), "-image") && argc >= 3)
- {
- std::cout<<"RetinaDemo: processing image "<<argv[2]<<std::endl;
- // image processing case
- inputFrame = cv::imread(std::string(argv[2]), 1); // load image in RGB mode
- }else
- if (!strcmp(inputMediaType.c_str(), "-video"))
- {
- if (argc == 2 || (argc == 3 && useLogSampling)) // attempt to grab images from a video capture device
- {
- videoCapture.open(0);
- }else// attempt to grab images from a video filestream
- {
- std::cout<<"RetinaDemo: processing video stream "<<argv[2]<<std::endl;
- videoCapture.open(argv[2]);
- }
-
- // grab a first frame to check if everything is ok
- videoCapture>>inputFrame;
- }else
- {
- // bad command parameter
- help("bad command parameter");
- return -1;
- }
-
- if (inputFrame.empty())
- {
- help("Input media could not be loaded, aborting");
- return -1;
- }
-
-
- //////////////////////////////////////////////////////////////////////////////
- // Program start in a try/catch safety context (Retina may throw errors)
- try
- {
- // create a retina instance with default parameters setup, uncomment the initialisation you wanna test
- cv::Ptr<cv::Retina> myRetina;
-
- // if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
- if (useLogSampling)
+ // welcome message
+ std::cout<<"****************************************************"<<std::endl;
+ std::cout<<"* Retina demonstration : demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
+ std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
+ std::cout<<"* As a summary, these are the retina model properties:"<<std::endl;
+ std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
+ std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
+ std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
+ std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
+ std::cout<<"* for more information, reer to the following papers :"<<std::endl;
+ std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
+ std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
+ std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
+ std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
+ std::cout<<"****************************************************"<<std::endl;
+ std::cout<<" NOTE : this program generates the default retina parameters file 'RetinaDefaultParameters.xml'"<<std::endl;
+ std::cout<<" => you can use this to fine tune parameters and load them if you save to file 'RetinaSpecificParameters.xml'"<<std::endl;
+
+ // basic input arguments checking
+ if (argc<2)
+ {
+ help("bad number of parameter");
+ return -1;
+ }
+
+ bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
+
+ std::string inputMediaType=argv[1];
+
+ // declare the retina input buffer... that will be fed differently in regard of the input media
+ cv::Mat inputFrame;
+ cv::VideoCapture videoCapture; // in case a video media is used, its manager is declared here
+
+ //////////////////////////////////////////////////////////////////////////////
+ // checking input media type (still image, video file, live video acquisition)
+ if (!strcmp(inputMediaType.c_str(), "-image") && argc >= 3)
+ {
+ std::cout<<"RetinaDemo: processing image "<<argv[2]<<std::endl;
+ // image processing case
+ inputFrame = cv::imread(std::string(argv[2]), 1); // load image in RGB mode
+ }else
+ if (!strcmp(inputMediaType.c_str(), "-video"))
+ {
+ if (argc == 2 || (argc == 3 && useLogSampling)) // attempt to grab images from a video capture device
+ {
+ videoCapture.open(0);
+ }else// attempt to grab images from a video filestream
+ {
+ std::cout<<"RetinaDemo: processing video stream "<<argv[2]<<std::endl;
+ videoCapture.open(argv[2]);
+ }
+
+ // grab a first frame to check if everything is ok
+ videoCapture>>inputFrame;
+ }else
+ {
+ // bad command parameter
+ help("bad command parameter");
+ return -1;
+ }
+
+ if (inputFrame.empty())
+ {
+ help("Input media could not be loaded, aborting");
+ return -1;
+ }
+
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Program start in a try/catch safety context (Retina may throw errors)
+ try
+ {
+ // create a retina instance with default parameters setup, uncomment the initialisation you wanna test
+ cv::Ptr<cv::Retina> myRetina;
+
+ // if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
+ if (useLogSampling)
{
myRetina = new cv::Retina(inputFrame.size(), true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
- else// -> else allocate "classical" retina :
- myRetina = new cv::Retina(inputFrame.size());
-
- // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
- myRetina->write("RetinaDefaultParameters.xml");
-
- // load parameters if file exists
- myRetina->setup("RetinaSpecificParameters.xml");
- myRetina->clearBuffers();
-
- // declare retina output buffers
- cv::Mat retinaOutput_parvo;
- cv::Mat retinaOutput_magno;
-
- // processing loop with stop condition
- bool continueProcessing=true; // FIXME : not yet managed during process...
- while(continueProcessing)
- {
- // if using video stream, then, grabbing a new frame, else, input remains the same
- if (videoCapture.isOpened())
- videoCapture>>inputFrame;
-
- // run retina filter
- myRetina->run(inputFrame);
- // Retrieve and display retina output
- myRetina->getParvo(retinaOutput_parvo);
- myRetina->getMagno(retinaOutput_magno);
- cv::imshow("retina input", inputFrame);
- cv::imshow("Retina Parvo", retinaOutput_parvo);
- cv::imshow("Retina Magno", retinaOutput_magno);
- cv::waitKey(10);
- }
- }catch(cv::Exception e)
- {
- std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
- }
-
- // Program end message
- std::cout<<"Retina demo end"<<std::endl;
-
- return 0;
+ else// -> else allocate "classical" retina :
+ myRetina = new cv::Retina(inputFrame.size());
+
+ // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
+ myRetina->write("RetinaDefaultParameters.xml");
+
+ // load parameters if file exists
+ myRetina->setup("RetinaSpecificParameters.xml");
+ myRetina->clearBuffers();
+
+ // declare retina output buffers
+ cv::Mat retinaOutput_parvo;
+ cv::Mat retinaOutput_magno;
+
+ // processing loop with stop condition
+ bool continueProcessing=true; // FIXME : not yet managed during process...
+ while(continueProcessing)
+ {
+ // if using video stream, then, grabbing a new frame, else, input remains the same
+ if (videoCapture.isOpened())
+ videoCapture>>inputFrame;
+
+ // run retina filter
+ myRetina->run(inputFrame);
+ // Retrieve and display retina output
+ myRetina->getParvo(retinaOutput_parvo);
+ myRetina->getMagno(retinaOutput_magno);
+ cv::imshow("retina input", inputFrame);
+ cv::imshow("Retina Parvo", retinaOutput_parvo);
+ cv::imshow("Retina Magno", retinaOutput_magno);
+ cv::waitKey(10);
+ }
+ }catch(cv::Exception e)
+ {
+ std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
+ }
+
+ // Program end message
+ std::cout<<"Retina demo end"<<std::endl;
+
+ return 0;
}
using namespace cv;
-void help()
+static void help()
{
- printf("\n"
- "This program demonstrated a simple method of connected components clean up of background subtraction\n"
- "When the program starts, it begins learning the background.\n"
- "You can toggle background learning on and off by hitting the space bar.\n"
- "Call\n"
- "./segment_objects [video file, else it reads camera 0]\n\n");
+ printf("\n"
+ "This program demonstrated a simple method of connected components clean up of background subtraction\n"
+ "When the program starts, it begins learning the background.\n"
+ "You can toggle background learning on and off by hitting the space bar.\n"
+ "Call\n"
+ "./segment_objects [video file, else it reads camera 0]\n\n");
}
-void refineSegments(const Mat& img, Mat& mask, Mat& dst)
+static void refineSegments(const Mat& img, Mat& mask, Mat& dst)
{
int niters = 3;
-
+
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
-
+
Mat temp;
-
+
dilate(mask, temp, Mat(), Point(-1,-1), niters);
erode(temp, temp, Mat(), Point(-1,-1), niters*2);
dilate(temp, temp, Mat(), Point(-1,-1), niters);
-
+
findContours( temp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
-
- dst = Mat::zeros(img.size(), CV_8UC3);
-
+
+ dst = Mat::zeros(img.size(), CV_8UC3);
+
if( contours.size() == 0 )
return;
-
+
// iterate through all the top-level contours,
// draw each connected component with its own random color
int idx = 0, largestComp = 0;
double maxArea = 0;
-
+
for( ; idx >= 0; idx = hierarchy[idx][0] )
{
const vector<Point>& c = contours[idx];
{
VideoCapture cap;
bool update_bg_model = true;
-
+
help();
if( argc < 2 )
cap.open(0);
else
cap.open(std::string(argv[1]));
-
+
if( !cap.isOpened() )
{
printf("\nCan not open camera or video file\n");
return -1;
}
-
+
Mat tmp_frame, bgmask, out_frame;
-
+
cap >> tmp_frame;
if(!tmp_frame.data)
{
printf("can not read data from the video source\n");
return -1;
}
-
+
namedWindow("video", 1);
namedWindow("segmented", 1);
-
+
BackgroundSubtractorMOG bgsubtractor;
bgsubtractor.set("noiseSigma", 10);
-
+
for(;;)
{
cap >> tmp_frame;
printf("Learn background is in state = %d\n",update_bg_model);
}
}
-
+
return 0;
}
"select3dobj -w <board_width> -h <board_height> [-s <square_size>]\n"
" -i <camera_intrinsics_filename> -o <output_prefix> [video_filename/cameraId]\n"
"\n"
-" -w <board_width> Number of chessboard corners wide\n"
-" -h <board_height> Number of chessboard corners width\n"
-" [-s <square_size>] Optional measure of chessboard squares in meters\n"
+" -w <board_width> Number of chessboard corners wide\n"
+" -h <board_height> Number of chessboard corners width\n"
+" [-s <square_size>] Optional measure of chessboard squares in meters\n"
" -i <camera_intrinsics_filename> Camera matrix .yml file from calibration.cpp\n"
-" -o <output_prefix> Prefix the output segmentation images with this\n"
+" -o <output_prefix> Prefix the output segmentation images with this\n"
" [video_filename/cameraId] If present, read from that video file or that ID\n"
"\n"
"Using a camera's intrinsics (from calibrating a camera -- see calibration.cpp) and an\n"
" q - Exit the program\n"
"\n\n";
-void help()
-{
- puts(helphelp);
-}
+// static void help()
+// {
+// puts(helphelp);
+// }
struct MouseEvent
fs["image_height"] >> calibratedImageSize.height;
fs["distortion_coefficients"] >> distCoeffs;
fs["camera_matrix"] >> cameraMatrix;
-
+
if( distCoeffs.type() != CV_64F )
distCoeffs = Mat_<double>(distCoeffs);
if( cameraMatrix.type() != CV_64F )
cameraMatrix = Mat_<double>(cameraMatrix);
-
+
return true;
}
static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
-
+
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float(j*squareSize),
}
-static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFrame,
+static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFrame,
const Mat& cameraMatrix, const Mat& rvec, const Mat& tvec,
const vector<Point3f>& box, int nobjpt, bool runExtraSegmentation)
{
return Rect();
vector<Point3f> objpt;
vector<Point2f> imgpt;
-
+
objpt.push_back(box[0]);
if( nobjpt > 1 )
objpt.push_back(box[1]);
if( nobjpt > 3 )
for( int i = 0; i < 4; i++ )
objpt.push_back(Point3f(objpt[i].x, objpt[i].y, box[3].z));
-
+
projectPoints(Mat(objpt), rvec, tvec, cameraMatrix, Mat(), imgpt);
-
+
if( shownFrame.data )
{
if( nobjpt == 1 )
{
circle(shownFrame, imgpt[i], 3, Scalar(0,255,0), -1, CV_AA);
line(shownFrame, imgpt[i], imgpt[(i+1)%4], Scalar(0,255,0), 3, CV_AA);
- }
+ }
else
for( int i = 0; i < 8; i++ )
{
line(shownFrame, imgpt[i], imgpt[i%4], Scalar(0,255,0), 3, CV_AA);
}
}
-
+
if( nobjpt <= 2 )
return Rect();
vector<Point> hull;
Mat selectedObjMask = Mat::zeros(frame.size(), CV_8U);
fillConvexPoly(selectedObjMask, &hull[0], (int)hull.size(), Scalar::all(255), 8, 0);
Rect roi = boundingRect(Mat(hull)) & Rect(Point(), frame.size());
-
+
if( runExtraSegmentation )
{
selectedObjMask = Scalar::all(GC_BGD);
3, GC_INIT_WITH_RECT + GC_INIT_WITH_MASK);
bitwise_and(selectedObjMask, Scalar::all(1), selectedObjMask);
}
-
+
frame.copyTo(selectedObjFrame, selectedObjMask);
return roi;
}
{
const float eps = 1e-3f;
MouseEvent mouse;
-
+
setMouseCallback(windowname, onMouse, &mouse);
vector<Point3f> tempobj(8);
vector<Point2f> imgpt(4), tempimg(8);
Mat R, selectedObjMask, selectedObjFrame, shownFrame;
Rodrigues(rvec, R);
box.resize(4);
-
+
for(;;)
{
float Z = 0.f;
bool dragging = (mouse.buttonState & CV_EVENT_FLAG_LBUTTON) != 0;
int npt = nobjpt;
-
+
if( (mouse.event == CV_EVENT_LBUTTONDOWN ||
mouse.event == CV_EVENT_LBUTTONUP ||
dragging) && nobjpt < 4 )
{
Point2f m = mouse.pt;
-
+
if( nobjpt < 2 )
imgpt[npt] = m;
else
if( norm(m - imgpt[i]) < norm(m - imgpt[nearestIdx]) )
nearestIdx = i;
}
-
+
if( npt == 2 )
{
float dx = box[1].x - box[0].x, dy = box[1].y - box[0].y;
}
else
tempobj[0] = Point3f(box[nearestIdx].x, box[nearestIdx].y, 1.f);
-
+
projectPoints(Mat(tempobj), rvec, tvec, cameraMatrix, Mat(), tempimg);
-
+
Point2f a = imgpt[nearestIdx], b = tempimg[0], d1 = b - a, d2 = m - a;
float n1 = (float)norm(d1), n2 = (float)norm(d2);
if( n1*n2 < eps )
}
}
box[npt] = image2plane(imgpt[npt], R, tvec, cameraMatrix, npt<3 ? 0 : Z);
-
+
if( (npt == 0 && mouse.event == CV_EVENT_LBUTTONDOWN) ||
(npt > 0 && norm(box[npt] - box[npt-1]) > eps &&
mouse.event == CV_EVENT_LBUTTONUP) )
box[nobjpt] = box[nobjpt-1];
}
}
-
+
// reset the event
mouse.event = -1;
//mouse.buttonState = 0;
npt++;
}
-
+
frame.copyTo(shownFrame);
extract3DBox(frame, shownFrame, selectedObjFrame,
cameraMatrix, rvec, tvec, box, npt, false);
imshow(windowname, shownFrame);
imshow(selWinName, selectedObjFrame);
-
+
int c = waitKey(30);
if( (c & 255) == 27 )
{
roiList.resize(0);
poseList.resize(0);
box.resize(0);
-
+
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
fs["box"] >> box;
-
+
FileNode all = fs["views"];
if( all.type() != FileNode::SEQ )
return false;
FileNodeIterator it = all.begin(), it_end = all.end();
-
+
for(; it != it_end; ++it)
{
FileNode n = *it;
poseList.push_back(Vec6f((float)np[0], (float)np[1], (float)np[2],
(float)np[3], (float)np[4], (float)np[5]));
}
-
+
return true;
}
FileStorage fs(filename, FileStorage::WRITE);
if( !fs.isOpened() )
return false;
-
+
fs << "box" << "[:";
fs << box << "]" << "views" << "[";
-
+
size_t i, nviews = imagelist.size();
-
+
CV_Assert( nviews == roiList.size() && nviews == poseList.size() );
-
+
for( i = 0; i < nviews; i++ )
{
Rect r = roiList[i];
Vec6f p = poseList[i];
-
+
fs << "{" << "image" << imagelist[i] <<
"roi" << "[:" << r.x << r.y << r.width << r.height << "]" <<
"pose" << "[:" << p[0] << p[1] << p[2] << p[3] << p[4] << p[5] << "]" << "}";
}
fs << "]";
-
+
return true;
}
"\tSPACE - Skip the frame; move to the next frame (not in video mode)\n"
"\tENTER - Confirm the selection. Grab next object in video mode.\n"
"\tq - Exit the program\n";
-
+
if(argc < 5)
{
- puts(helphelp);
+ puts(helphelp);
puts(help);
return 0;
}
const char* intrinsicsFilename = 0;
const char* outprefix = 0;
- const char* inputName = 0;
- int cameraId = 0;
- Size boardSize;
- double squareSize = 1;
+ const char* inputName = 0;
+ int cameraId = 0;
+ Size boardSize;
+ double squareSize = 1;
vector<string> imageList;
-
+
for( int i = 1; i < argc; i++ )
{
if( strcmp(argv[i], "-i") == 0 )
- intrinsicsFilename = argv[++i];
- else if( strcmp(argv[i], "-o") == 0 )
- outprefix = argv[++i];
- else if( strcmp(argv[i], "-w") == 0 )
- {
- if(sscanf(argv[++i], "%d", &boardSize.width) != 1 || boardSize.width <= 0)
- {
- printf("Incorrect -w parameter (must be a positive integer)\n");
- puts(help);
- return 0;
- }
- }
- else if( strcmp(argv[i], "-h") == 0 )
- {
- if(sscanf(argv[++i], "%d", &boardSize.height) != 1 || boardSize.height <= 0)
- {
- printf("Incorrect -h parameter (must be a positive integer)\n");
- puts(help);
- return 0;
- }
- }
- else if( strcmp(argv[i], "-s") == 0 )
- {
- if(sscanf(argv[++i], "%lf", &squareSize) != 1 || squareSize <= 0)
- {
- printf("Incorrect -w parameter (must be a positive real number)\n");
- puts(help);
- return 0;
- }
- }
- else if( argv[i][0] != '-' )
- {
- if( isdigit(argv[i][0]))
- sscanf(argv[i], "%d", &cameraId);
- else
- inputName = argv[i];
- }
- else
- {
- printf("Incorrect option\n");
- puts(help);
- return 0;
- }
+ intrinsicsFilename = argv[++i];
+ else if( strcmp(argv[i], "-o") == 0 )
+ outprefix = argv[++i];
+ else if( strcmp(argv[i], "-w") == 0 )
+ {
+ if(sscanf(argv[++i], "%d", &boardSize.width) != 1 || boardSize.width <= 0)
+ {
+ printf("Incorrect -w parameter (must be a positive integer)\n");
+ puts(help);
+ return 0;
+ }
+ }
+ else if( strcmp(argv[i], "-h") == 0 )
+ {
+ if(sscanf(argv[++i], "%d", &boardSize.height) != 1 || boardSize.height <= 0)
+ {
+ printf("Incorrect -h parameter (must be a positive integer)\n");
+ puts(help);
+ return 0;
+ }
+ }
+ else if( strcmp(argv[i], "-s") == 0 )
+ {
+ if(sscanf(argv[++i], "%lf", &squareSize) != 1 || squareSize <= 0)
+ {
+ printf("Incorrect -w parameter (must be a positive real number)\n");
+ puts(help);
+ return 0;
+ }
+ }
+ else if( argv[i][0] != '-' )
+ {
+ if( isdigit(argv[i][0]))
+ sscanf(argv[i], "%d", &cameraId);
+ else
+ inputName = argv[i];
+ }
+ else
+ {
+ printf("Incorrect option\n");
+ puts(help);
+ return 0;
+ }
+ }
+
+ if( !intrinsicsFilename || !outprefix ||
+ boardSize.width <= 0 || boardSize.height <= 0 )
+ {
+ printf("Some of the required parameters are missing\n");
+ puts(help);
+ return 0;
}
-
- if( !intrinsicsFilename || !outprefix ||
- boardSize.width <= 0 || boardSize.height <= 0 )
- {
- printf("Some of the required parameters are missing\n");
- puts(help);
- return 0;
- }
-
+
Mat cameraMatrix, distCoeffs;
Size calibratedImageSize;
readCameraMatrix(intrinsicsFilename, cameraMatrix, distCoeffs, calibratedImageSize );
-
- VideoCapture capture;
+
+ VideoCapture capture;
if( inputName )
{
if( !readStringList(inputName, imageList) &&
}
else
capture.open(cameraId);
-
+
if( !capture.isOpened() && imageList.empty() )
return fprintf( stderr, "Could not initialize video capture\n" ), -2;
-
+
const char* outbarename = 0;
{
outbarename = strrchr(outprefix, '/');
else
outbarename = outprefix;
}
-
- Mat frame, shownFrame, selectedObjFrame, mapxy;
-
- namedWindow("View", 1);
+
+ Mat frame, shownFrame, selectedObjFrame, mapxy;
+
+ namedWindow("View", 1);
namedWindow("Selected Object", 1);
setMouseCallback("View", onMouse, 0);
bool boardFound = false;
-
+
string indexFilename = format("%s_index.yml", outprefix);
-
+
vector<string> capturedImgList;
vector<Rect> roiList;
vector<Vec6f> poseList;
vector<Point3f> box, boardPoints;
-
+
readModelViews(indexFilename, box, capturedImgList, roiList, poseList);
calcChessboardCorners(boardSize, (float)squareSize, boardPoints);
int frameIdx = 0;
bool grabNext = !imageList.empty();
-
+
puts(screen_help);
- for(int i = 0;;i++)
- {
+ for(int i = 0;;i++)
+ {
Mat frame0;
if( !imageList.empty() )
{
{
double sx = (double)frame0.cols/calibratedImageSize.width;
double sy = (double)frame0.rows/calibratedImageSize.height;
-
+
// adjust the camera matrix for the new resolution
cameraMatrix.at<double>(0,0) *= sx;
cameraMatrix.at<double>(0,2) *= sx;
remap(frame0, frame, mapxy, Mat(), INTER_LINEAR);
vector<Point2f> foundBoardCorners;
boardFound = findChessboardCorners(frame, boardSize, foundBoardCorners);
-
+
Mat rvec, tvec;
if( boardFound )
solvePnP(Mat(boardPoints), Mat(foundBoardCorners), cameraMatrix,
distCoeffs, rvec, tvec, false);
-
+
frame.copyTo(shownFrame);
drawChessboardCorners(shownFrame, boardSize, Mat(foundBoardCorners), boardFound);
selectedObjFrame = Mat::zeros(frame.size(), frame.type());
-
- if( boardFound && grabNext )
+
+ if( boardFound && grabNext )
{
if( box.empty() )
{
if( code == -100 )
break;
}
-
+
if( !box.empty() )
{
Rect r = extract3DBox(frame, shownFrame, selectedObjFrame,
break;
}
imwrite(path, selectedObjFrame(r));
-
+
capturedImgList.push_back(string(path));
roiList.push_back(r);
-
+
float p[6];
Mat RV(3, 1, CV_32F, p), TV(3, 1, CV_32F, p+3);
rvec.convertTo(RV, RV.type());
imshow("View", shownFrame);
imshow("Selected Object", selectedObjFrame);
- int c = waitKey(imageList.empty() && !box.empty() ? 30 : 300);
+ int c = waitKey(imageList.empty() && !box.empty() ? 30 : 300);
if( c == 'q' || c == 'Q' )
break;
if( c == '\r' || c == '\n' )
grabNext = true;
- }
+ }
writeModelViews(indexFilename, box, capturedImgList, roiList, poseList);
return 0;
using namespace cv;
using namespace std;
-void help()
+static void help()
{
- cout <<
- "\nA program using pyramid scaling, Canny, contours, contour simpification and\n"
- "memory storage (it's got it all folks) to find\n"
- "squares in a list of images pic1-6.png\n"
- "Returns sequence of squares detected on the image.\n"
- "the sequence is stored in the specified memory storage\n"
- "Call:\n"
- "./squares\n"
+ cout <<
+ "\nA program using pyramid scaling, Canny, contours, contour simpification and\n"
+ "memory storage (it's got it all folks) to find\n"
+ "squares in a list of images pic1-6.png\n"
+ "Returns sequence of squares detected on the image.\n"
+ "the sequence is stored in the specified memory storage\n"
+ "Call:\n"
+ "./squares\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
}
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
-double angle( Point pt1, Point pt2, Point pt0 )
+static double angle( Point pt1, Point pt2, Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
-void findSquares( const Mat& image, vector<vector<Point> >& squares )
+static void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();
-
+
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
-
+
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
-
+
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
-
+
// try several threshold levels
for( int l = 0; l < N; l++ )
{
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
-
+
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
-
+
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// the function draws all the squares in the image
-void drawSquares( Mat& image, const vector<vector<Point> >& squares )
+static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{
for( size_t i = 0; i < squares.size(); i++ )
{
help();
namedWindow( wndname, 1 );
vector<vector<Point> > squares;
-
+
for( int i = 0; names[i] != 0; i++ )
{
Mat image = imread(names[i], 1);
cout << "Couldn't load " << names[i] << endl;
continue;
}
-
+
findSquares(image, squares);
drawSquares(image, squares);
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
-
- AVAILABLE AT:
+
+ AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
- ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
+ ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
OTHER OPENCV SITES:
* The source code is on sourceforge at:
using namespace cv;
using namespace std;
-int print_help()
+static int print_help()
{
- cout <<
- " Given a list of chessboard images, the number of corners (nx, ny)\n"
- " on the chessboards, and a flag: useCalibrated for \n"
- " calibrated (0) or\n"
- " uncalibrated \n"
- " (1: use cvStereoCalibrate(), 2: compute fundamental\n"
- " matrix separately) stereo. \n"
- " Calibrate the cameras and display the\n"
- " rectified results along with the computed disparity images. \n" << endl;
+ cout <<
+ " Given a list of chessboard images, the number of corners (nx, ny)\n"
+ " on the chessboards, and a flag: useCalibrated for \n"
+ " calibrated (0) or\n"
+ " uncalibrated \n"
+ " (1: use cvStereoCalibrate(), 2: compute fundamental\n"
+ " matrix separately) stereo. \n"
+ " Calibrate the cameras and display the\n"
+ " rectified results along with the computed disparity images. \n" << endl;
cout << "Usage:\n ./stereo_calib -w board_width -h board_height [-nr /*dot not view results*/] <image list XML/YML file>\n" << endl;
return 0;
}
cout << "Error: the image list contains odd (non-even) number of elements\n";
return;
}
-
+
bool displayCorners = false;//true;
const int maxScale = 2;
const float squareSize = 1.f; // Set this to your actual square size
// ARRAY AND VECTOR STORAGE:
-
+
vector<vector<Point2f> > imagePoints[2];
vector<vector<Point3f> > objectPoints;
Size imageSize;
-
+
int i, j, k, nimages = (int)imagelist.size()/2;
-
+
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
vector<string> goodImageList;
-
+
for( i = j = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
timg = img;
else
resize(img, timg, Size(), scale, scale);
- found = findChessboardCorners(timg, boardSize, corners,
+ found = findChessboardCorners(timg, boardSize, corners,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
if( found )
{
cout << "Error: too little pairs to run the calibration\n";
return;
}
-
+
imagePoints[0].resize(nimages);
imagePoints[1].resize(nimages);
objectPoints.resize(nimages);
-
+
for( i = 0; i < nimages; i++ )
{
for( j = 0; j < boardSize.height; j++ )
for( k = 0; k < boardSize.width; k++ )
objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0));
}
-
+
cout << "Running stereo calibration ...\n";
-
+
Mat cameraMatrix[2], distCoeffs[2];
cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
Mat R, T, E, F;
-
+
double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
CV_CALIB_RATIONAL_MODEL +
CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
cout << "done with RMS error=" << rms << endl;
-
+
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
npoints += npt;
}
cout << "average reprojection err = " << err/npoints << endl;
-
+
// save intrinsic parameters
FileStorage fs("intrinsics.yml", CV_STORAGE_WRITE);
if( fs.isOpened() )
}
else
cout << "Error: can not save the intrinsic parameters\n";
-
+
Mat R1, R2, P1, P2, Q;
Rect validRoi[2];
-
+
stereoRectify(cameraMatrix[0], distCoeffs[0],
cameraMatrix[1], distCoeffs[1],
imageSize, R, T, R1, R2, P1, P2, Q,
CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
-
+
fs.open("extrinsics.yml", CV_STORAGE_WRITE);
if( fs.isOpened() )
{
}
else
cout << "Error: can not save the intrinsic parameters\n";
-
+
// OpenCV can handle left-right
// or up-down camera arrangements
bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));
-
+
// COMPUTE AND DISPLAY RECTIFICATION
if( !showRectified )
return;
-
+
Mat rmap[2][2];
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useCalibrated )
F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
Mat H1, H2;
stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);
-
+
R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
P1 = cameraMatrix[0];
//Precompute maps for cv::remap()
initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
-
+
Mat canvas;
double sf;
int w, h;
h = cvRound(imageSize.height*sf);
canvas.create(h*2, w, CV_8UC3);
}
-
+
for( i = 0; i < nimages; i++ )
{
for( k = 0; k < 2; k++ )
if( useCalibrated )
{
Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
- cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
+ cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);
}
}
-
+
if( !isVerticalStereo )
for( j = 0; j < canvas.rows; j += 16 )
line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
}
}
-
+
static bool readStringList( const string& filename, vector<string>& l )
{
l.resize(0);
l.push_back((string)*it);
return true;
}
-
+
int main(int argc, char** argv)
{
Size boardSize;
string imagelistfn;
bool showRectified = true;
-
+
for( int i = 1; i < argc; i++ )
{
if( string(argv[i]) == "-w" )
else
imagelistfn = argv[i];
}
-
+
if( imagelistfn == "" )
{
imagelistfn = "stereo_calib.xml";
}
else if( boardSize.width <= 0 || boardSize.height <= 0 )
{
- cout << "if you specified XML file with chessboards, you should also specify the board width and height (-w and -h options)" << endl;
+ cout << "if you specified XML file with chessboards, you should also specify the board width and height (-w and -h options)" << endl;
return 0;
}
-
+
vector<string> imagelist;
bool ok = readStringList(imagelistfn, imagelist);
if(!ok || imagelist.empty())
cout << "can not open " << imagelistfn << " or the string list is empty" << endl;
return print_help();
}
-
+
StereoCalib(imagelist, boardSize, true, showRectified);
return 0;
}
using namespace cv;
-void print_help()
+static void print_help()
{
- printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
+ printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
printf("\nUsage: stereo_match <left_image> <right_image> [--algorithm=bm|sgbm|hh|var] [--blocksize=<block_size>]\n"
"[--max-disparity=<max_disparity>] [--scale=scale_factor>] [-i <intrinsic_filename>] [-e <extrinsic_filename>]\n"
"[--no-display] [-o <disparity_image>] [-p <point_cloud_file>]\n");
}
-void saveXYZ(const char* filename, const Mat& mat)
+static void saveXYZ(const char* filename, const Mat& mat)
{
const double max_z = 1.0e4;
FILE* fp = fopen(filename, "wt");
const char* blocksize_opt = "--blocksize=";
const char* nodisplay_opt = "--no-display=";
const char* scale_opt = "--scale=";
-
+
if(argc < 3)
{
print_help();
- return 0;
+ return 0;
}
const char* img1_filename = 0;
const char* img2_filename = 0;
const char* extrinsic_filename = 0;
const char* disparity_filename = 0;
const char* point_cloud_filename = 0;
-
+
enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2, STEREO_VAR=3 };
int alg = STEREO_SGBM;
int SADWindowSize = 0, numberOfDisparities = 0;
bool no_display = false;
float scale = 1.f;
-
+
StereoBM bm;
StereoSGBM sgbm;
StereoVar var;
-
+
for( int i = 1; i < argc; i++ )
{
if( argv[i][0] != '-' )
return -1;
}
}
-
+
if( !img1_filename || !img2_filename )
{
printf("Command-line parameter error: both left and right images must be specified\n");
return -1;
}
-
+
if( (intrinsic_filename != 0) ^ (extrinsic_filename != 0) )
{
printf("Command-line parameter error: either both intrinsic and extrinsic parameters must be specified, or none of them (when the stereo pair is already rectified)\n");
return -1;
}
-
+
if( extrinsic_filename == 0 && point_cloud_filename )
{
printf("Command-line parameter error: extrinsic and intrinsic parameters must be specified to compute the point cloud\n");
return -1;
}
-
+
int color_mode = alg == STEREO_BM ? 0 : -1;
Mat img1 = imread(img1_filename, color_mode);
Mat img2 = imread(img2_filename, color_mode);
-
+
if( scale != 1.f )
{
Mat temp1, temp2;
resize(img2, temp2, Size(), scale, scale, method);
img2 = temp2;
}
-
+
Size img_size = img1.size();
-
+
Rect roi1, roi2;
Mat Q;
-
+
if( intrinsic_filename )
{
// reading intrinsic parameters
printf("Failed to open file %s\n", intrinsic_filename);
return -1;
}
-
+
Mat M1, D1, M2, D2;
fs["M1"] >> M1;
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
-
+
fs.open(extrinsic_filename, CV_STORAGE_READ);
if(!fs.isOpened())
{
printf("Failed to open file %s\n", extrinsic_filename);
return -1;
}
-
+
Mat R, T, R1, P1, R2, P2;
fs["R"] >> R;
fs["T"] >> T;
-
+
stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );
-
+
Mat map11, map12, map21, map22;
initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
-
+
Mat img1r, img2r;
remap(img1, img1r, map11, map12, INTER_LINEAR);
remap(img2, img2r, map21, map22, INTER_LINEAR);
-
+
img1 = img1r;
img2 = img2r;
}
-
+
numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width/8) + 15) & -16;
-
+
bm.state->roi1 = roi1;
bm.state->roi2 = roi2;
bm.state->preFilterCap = 31;
bm.state->speckleWindowSize = 100;
bm.state->speckleRange = 32;
bm.state->disp12MaxDiff = 1;
-
+
sgbm.preFilterCap = 63;
sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;
-
+
int cn = img1.channels();
-
+
sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.minDisparity = 0;
sgbm.speckleRange = bm.state->speckleRange;
sgbm.disp12MaxDiff = 1;
sgbm.fullDP = alg == STEREO_HH;
-
- var.levels = 3; // ignored with USE_AUTO_PARAMS
- var.pyrScale = 0.5; // ignored with USE_AUTO_PARAMS
- var.nIt = 25;
- var.minDisp = -numberOfDisparities;
- var.maxDisp = 0;
- var.poly_n = 3;
- var.poly_sigma = 0.0;
- var.fi = 15.0f;
- var.lambda = 0.03f;
- var.penalization = var.PENALIZATION_TICHONOV; // ignored with USE_AUTO_PARAMS
- var.cycle = var.CYCLE_V; // ignored with USE_AUTO_PARAMS
- var.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING ;
-
+
+ var.levels = 3; // ignored with USE_AUTO_PARAMS
+ var.pyrScale = 0.5; // ignored with USE_AUTO_PARAMS
+ var.nIt = 25;
+ var.minDisp = -numberOfDisparities;
+ var.maxDisp = 0;
+ var.poly_n = 3;
+ var.poly_sigma = 0.0;
+ var.fi = 15.0f;
+ var.lambda = 0.03f;
+ var.penalization = var.PENALIZATION_TICHONOV; // ignored with USE_AUTO_PARAMS
+ var.cycle = var.CYCLE_V; // ignored with USE_AUTO_PARAMS
+ var.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING ;
+
Mat disp, disp8;
//Mat img1p, img2p, dispp;
//copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
//copyMakeBorder(img2, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
-
+
int64 t = getTickCount();
if( alg == STEREO_BM )
bm(img1, img2, disp);
else if( alg == STEREO_VAR ) {
var(img1, img2, disp);
- }
+ }
else if( alg == STEREO_SGBM || alg == STEREO_HH )
sgbm(img1, img2, disp);
t = getTickCount() - t;
waitKey();
printf("\n");
}
-
+
if(disparity_filename)
imwrite(disparity_filename, disp8);
-
+
if(point_cloud_filename)
{
printf("storing the point cloud...");
saveXYZ(point_cloud_filename, xyz);
printf("\n");
}
-
+
return 0;
}
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
-//
+//
//
//M*/
using namespace cv;
using namespace cv::detail;
-void printUsage()
+static void printUsage()
{
cout <<
"Rotation model images stitcher.\n\n"
double seam_megapix = 0.1;
double compose_megapix = -1;
float conf_thresh = 1.f;
-string features = "surf";
+string features_type = "surf";
string ba_cost_func = "ray";
string ba_refine_mask = "xxxxx";
bool do_wave_correct = true;
float blend_strength = 5;
string result_name = "result.jpg";
-int parseCmdArgs(int argc, char** argv)
+static int parseCmdArgs(int argc, char** argv)
{
if (argc == 1)
{
}
else if (string(argv[i]) == "--features")
{
- features = argv[i + 1];
- if (features == "orb")
+ features_type = argv[i + 1];
+ if (features_type == "orb")
match_conf = 0.3f;
i++;
}
int64 t = getTickCount();
Ptr<FeaturesFinder> finder;
- if (features == "surf")
+ if (features_type == "surf")
{
#ifdef HAVE_OPENCV_GPU
if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
#endif
finder = new SurfFeaturesFinder();
}
- else if (features == "orb")
+ else if (features_type == "orb")
{
finder = new OrbFeaturesFinder();
}
else
{
- cout << "Unknown 2D features type: '" << features << "'.\n";
+ cout << "Unknown 2D features type: '" << features_type << "'.\n";
return -1;
}
Ptr<detail::BundleAdjusterBase> adjuster;
if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
- else
- {
- cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
- return -1;
+ else
+ {
+ cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
+ return -1;
}
adjuster->setConfThresh(conf_thresh);
Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
- else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
- else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
- else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
- else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
- else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
- else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
- else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
- else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
- else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
- else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
- else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
- else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
+ else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
+ else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
+ else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
+ else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
+ else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
+ else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
+ else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
+ else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
+ else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
+ else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
+ else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
+ else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
}
if (warper_creator.empty())
cout << "Can't create the following warper '" << warp_type << "'\n";
return 1;
}
-
+
Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
for (int i = 0; i < num_images; ++i)
Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
Ptr<Blender> blender;
- double compose_seam_aspect = 1;
+ //double compose_seam_aspect = 1;
double compose_work_aspect = 1;
for (int img_idx = 0; img_idx < num_images; ++img_idx)
is_compose_scale_set = true;
// Compute relative scales
- compose_seam_aspect = compose_scale / seam_scale;
+ //compose_seam_aspect = compose_scale / seam_scale;
compose_work_aspect = compose_scale / work_scale;
// Update warped image scale
using namespace std;
using namespace cv;
-void help(char **av)
+static void help(char **av)
{
cout << "\nThis program demonstrated the use of features2d with the Fast corner detector and brief descriptors\n"
<< "to track planar objects by computing their homography from the key (training) image to the query (test) image\n\n" << endl;
using namespace cv;
using namespace std;
-void help()
+static void help()
{
- cout << "\nThis program demonstrates the famous watershed segmentation algorithm in OpenCV: watershed()\n"
- "Usage:\n"
- "./watershed [image_name -- default is fruits.jpg]\n" << endl;
+ cout << "\nThis program demonstrates the famous watershed segmentation algorithm in OpenCV: watershed()\n"
+ "Usage:\n"
+ "./watershed [image_name -- default is fruits.jpg]\n" << endl;
- cout << "Hot keys: \n"
- "\tESC - quit the program\n"
- "\tr - restore the original image\n"
- "\tw or SPACE - run watershed segmentation algorithm\n"
- "\t\t(before running it, *roughly* mark the areas to segment on the image)\n"
- "\t (before that, roughly outline several markers on the image)\n";
+ cout << "Hot keys: \n"
+ "\tESC - quit the program\n"
+ "\tr - restore the original image\n"
+ "\tw or SPACE - run watershed segmentation algorithm\n"
+ "\t\t(before running it, *roughly* mark the areas to segment on the image)\n"
+ "\t (before that, roughly outline several markers on the image)\n";
}
Mat markerMask, img;
Point prevPt(-1, -1);
-void onMouse( int event, int x, int y, int flags, void* )
+static void onMouse( int event, int x, int y, int flags, void* )
{
if( x < 0 || x >= img.cols || y < 0 || y >= img.rows )
return;
{
char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
Mat img0 = imread(filename, 1), imgGray;
-
+
if( img0.empty() )
{
cout << "Couldn'g open image " << filename << ". Usage: watershed <image_name>\n";
int i, j, compCount = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
-
+
findContours(markerMask, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
-
+
if( contours.empty() )
continue;
Mat markers(markerMask.size(), CV_32S);
if( compCount == 0 )
continue;
-
+
vector<Vec3b> colorTab;
for( i = 0; i < compCount; i++ )
{
int b = theRNG().uniform(0, 255);
int g = theRNG().uniform(0, 255);
int r = theRNG().uniform(0, 255);
-
+
colorTab.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
}
printf( "execution time = %gms\n", t*1000./getTickFrequency() );
Mat wshed(markers.size(), CV_8UC3);
-
+
// paint the watershed image
for( i = 0; i < markers.rows; i++ )
for( j = 0; j < markers.cols; j++ )
{
- int idx = markers.at<int>(i,j);
- if( idx == -1 )
+ int index = markers.at<int>(i,j);
+ if( index == -1 )
wshed.at<Vec3b>(i,j) = Vec3b(255,255,255);
- else if( idx <= 0 || idx > compCount )
+ else if( index <= 0 || index > compCount )
wshed.at<Vec3b>(i,j) = Vec3b(0,0,0);
else
- wshed.at<Vec3b>(i,j) = colorTab[idx - 1];
+ wshed.at<Vec3b>(i,j) = colorTab[index - 1];
}
wshed = wshed*0.5 + imgGray*0.5;
GpuMat d_fu, d_fv;\r
\r
d_flow(d_frame0, d_frame1, d_fu, d_fv);\r
- \r
+\r
Mat flowFieldForward;\r
getFlowField(Mat(d_fu), Mat(d_fv), flowFieldForward);\r
- \r
+\r
cout << "\tBackward..." << endl;\r
\r
GpuMat d_bu, d_bv;\r
\r
d_flow(d_frame1, d_frame0, d_bu, d_bv);\r
- \r
+\r
Mat flowFieldBackward;\r
getFlowField(Mat(d_bu), Mat(d_bv), flowFieldBackward);\r
\r
#ifdef HAVE_OPENGL\r
cout << "Create Optical Flow Needle Map..." << endl;\r
- \r
+\r
GpuMat d_vertex, d_colors;\r
\r
createOpticalFlowNeedleMap(d_fu, d_fv, d_vertex, d_colors);\r
d_bt.upload(channels[0]);\r
d_gt.upload(channels[1]);\r
d_rt.upload(channels[2]);\r
- \r
+\r
// temporary buffer\r
GpuMat d_buf;\r
\r
// interpolate red channel\r
interpolateFrames(d_r, d_rt, d_fu, d_fv, d_bu, d_bv, timePos, d_rNew, d_buf);\r
\r
- GpuMat channels[] = {d_bNew, d_gNew, d_rNew};\r
- merge(channels, 3, d_newFrame);\r
+ GpuMat channels3[] = {d_bNew, d_gNew, d_rNew};\r
+ merge(channels3, 3, d_newFrame);\r
\r
frames.push_back(Mat(d_newFrame));\r
\r
return 0;\r
\r
case 'A':\r
- if (currentFrame > 0) \r
+ if (currentFrame > 0)\r
--currentFrame;\r
\r
imshow("Interpolated frame", frames[currentFrame]);\r
{\r
float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j]));\r
\r
- if (d > maxDisplacement) \r
+ if (d > maxDisplacement)\r
maxDisplacement = d;\r
}\r
}\r
using namespace cv::gpu;\r
\r
\r
-void help()\r
+static void help()\r
{\r
cout << "Usage: ./cascadeclassifier_gpu \n\t--cascade <cascade_file>\n\t(<image>|--video <video>|--camera <camera_id>)\n"\r
"Using OpenCV version " << CV_VERSION << endl << endl;\r
}\r
\r
\r
-void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss)\r
+static void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss)\r
{\r
int fontFace = FONT_HERSHEY_DUPLEX;\r
double fontScale = 0.8;\r
}\r
\r
\r
-void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps)\r
+static void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps)\r
{\r
Scalar fontColorRed = CV_RGB(255,0,0);\r
Scalar fontColorNV = CV_RGB(118,185,0);\r
-#if _MSC_VER >= 1400\r
+#if defined _MSC_VER && _MSC_VER >= 1400\r
#pragma warning( disable : 4201 4408 4127 4100)\r
#endif\r
\r
return c + (d-c) * (x-a) / (b-a);
}
-void colorizeFlow(const Mat &u, const Mat &v, Mat &dst)
+static void colorizeFlow(const Mat &u, const Mat &v, Mat &dst)
{
double uMin, uMax;
minMaxLoc(u, &uMin, &uMax, 0, 0);
double work_fps;\r
};\r
\r
-\r
-void printHelp()\r
+static void printHelp()\r
{\r
cout << "Histogram of Oriented Gradients descriptor and detector sample.\n"\r
<< "\nUsage: hog_gpu\n"\r
else if (string(argv[i]) == "--resize_src") args.resize_src = (string(argv[++i]) == "true");\r
else if (string(argv[i]) == "--width") args.width = atoi(argv[++i]);\r
else if (string(argv[i]) == "--height") args.height = atoi(argv[++i]);\r
- else if (string(argv[i]) == "--hit_threshold") \r
- { \r
- args.hit_threshold = atof(argv[++i]); \r
- args.hit_threshold_auto = false; \r
+ else if (string(argv[i]) == "--hit_threshold")\r
+ {\r
+ args.hit_threshold = atof(argv[++i]);\r
+ args.hit_threshold_auto = false;\r
}\r
else if (string(argv[i]) == "--scale") args.scale = atof(argv[++i]);\r
else if (string(argv[i]) == "--nlevels") args.nlevels = atoi(argv[++i]);\r
\r
// Create HOG descriptors and detectors here\r
vector<float> detector;\r
- if (win_size == Size(64, 128)) \r
+ if (win_size == Size(64, 128))\r
detector = cv::gpu::HOGDescriptor::getPeopleDetector64x128();\r
else\r
detector = cv::gpu::HOGDescriptor::getPeopleDetector48x96();\r
\r
- cv::gpu::HOGDescriptor gpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9, \r
- cv::gpu::HOGDescriptor::DEFAULT_WIN_SIGMA, 0.2, gamma_corr, \r
+ cv::gpu::HOGDescriptor gpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9,\r
+ cv::gpu::HOGDescriptor::DEFAULT_WIN_SIGMA, 0.2, gamma_corr,\r
cv::gpu::HOGDescriptor::DEFAULT_NLEVELS);\r
- cv::HOGDescriptor cpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9, 1, -1, \r
+ cv::HOGDescriptor cpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9, 1, -1,\r
HOGDescriptor::L2Hys, 0.2, gamma_corr, cv::HOGDescriptor::DEFAULT_NLEVELS);\r
gpu_hog.setSVMDetector(detector);\r
cpu_hog.setSVMDetector(detector);\r
if (use_gpu)\r
{\r
gpu_img.upload(img);\r
- gpu_hog.detectMultiScale(gpu_img, found, hit_threshold, win_stride, \r
+ gpu_hog.detectMultiScale(gpu_img, found, hit_threshold, win_stride,\r
Size(0, 0), scale, gr_threshold);\r
}\r
- else cpu_hog.detectMultiScale(img, found, hit_threshold, win_stride, \r
+ else cpu_hog.detectMultiScale(img, found, hit_threshold, win_stride,\r
Size(0, 0), scale, gr_threshold);\r
hogWorkEnd();\r
\r
{\r
if (!video_writer.isOpened())\r
{\r
- video_writer.open(args.dst_video, CV_FOURCC('x','v','i','d'), args.dst_video_fps, \r
+ video_writer.open(args.dst_video, CV_FOURCC('x','v','i','d'), args.dst_video_fps,\r
img_to_show.size(), true);\r
if (!video_writer.isOpened())\r
throw std::runtime_error("can't create video writer");\r
using namespace cv;\r
using namespace cv::gpu;\r
\r
-void help()\r
+static void help()\r
{\r
\r
printf("\nShow off image morphology: erosion, dialation, open and close\n"\r
- "Call:\n morphology2 [image]\n"\r
- "This program also shows use of rect, elipse and cross kernels\n\n");\r
+ "Call:\n morphology2 [image]\n"\r
+ "This program also shows use of rect, elipse and cross kernels\n\n");\r
printf( "Hot keys: \n"\r
"\tESC - quit the program\n"\r
"\tr - use rectangle structuring element\n"\r
int erode_dilate_pos = 0;\r
\r
// callback function for open/close trackbar\r
-void OpenClose(int, void*)\r
+static void OpenClose(int, void*)\r
{\r
int n = open_close_pos - max_iters;\r
int an = n > 0 ? n : -n;\r
Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );\r
if( n < 0 )\r
- cv::gpu::morphologyEx(src, dst, CV_MOP_OPEN, element);\r
+ cv::gpu::morphologyEx(src, dst, CV_MOP_OPEN, element);\r
else\r
cv::gpu::morphologyEx(src, dst, CV_MOP_CLOSE, element);\r
imshow("Open/Close",(Mat)dst);\r
}\r
\r
// callback function for erode/dilate trackbar\r
-void ErodeDilate(int, void*)\r
+static void ErodeDilate(int, void*)\r
{\r
int n = erode_dilate_pos - max_iters;\r
int an = n > 0 ? n : -n;\r
\r
help();\r
\r
- \r
- if (src.channels() == 3)\r
- {\r
- // gpu support only 4th channel images\r
- GpuMat src4ch;\r
- cv::gpu::cvtColor(src, src4ch, CV_BGR2BGRA); \r
- src = src4ch;\r
- }\r
+\r
+ if (src.channels() == 3)\r
+ {\r
+ // gpu support only 4th channel images\r
+ GpuMat src4ch;\r
+ cv::gpu::cvtColor(src, src4ch, CV_BGR2BGRA);\r
+ src = src4ch;\r
+ }\r
\r
//create windows for output images\r
namedWindow("Open/Close",1);\r
-#if _MSC_VER >= 1400\r
+#if defined _MSC_VER && _MSC_VER >= 1400\r
#pragma warning( disable : 4201 4408 4127 4100)\r
#endif\r
\r
return NCV_SUCCESS;\r
}\r
\r
-NCVStatus LoadImages (const char *frame0Name, \r
- const char *frame1Name, \r
- int &width, \r
- int &height, \r
+NCVStatus LoadImages (const char *frame0Name,\r
+ const char *frame1Name,\r
+ int &width,\r
+ int &height,\r
Ptr<NCVMatrixAlloc<Ncv32f> > &src,\r
Ptr<NCVMatrixAlloc<Ncv32f> > &dst,\r
- IplImage *&firstFrame, \r
+ IplImage *&firstFrame,\r
IplImage *&lastFrame)\r
{\r
IplImage *image;\r
std::cout << "Could not open '" << frame0Name << "'\n";\r
return NCV_FILE_ERROR;\r
}\r
- \r
+\r
firstFrame = image;\r
// copy data to src\r
ncvAssertReturnNcvStat (CopyData<RgbToMonochrome> (image, src));\r
- \r
+\r
IplImage *image2;\r
image2 = cvLoadImage (frame1Name);\r
if (image2 == 0)\r
NCVStatus ShowFlow (NCVMatrixAlloc<Ncv32f> &u, NCVMatrixAlloc<Ncv32f> &v, const char *name)\r
{\r
IplImage *flowField;\r
- \r
+\r
NCVMatrixAlloc<Ncv32f> host_u(*g_pHostMemAllocator, u.width(), u.height());\r
ncvAssertReturn(host_u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);\r
\r
ptr_u += u.stride () - u.width ();\r
ptr_v += v.stride () - v.width ();\r
}\r
- \r
+\r
cvShowImage (name, flowField);\r
\r
return NCV_SUCCESS;\r
if (image == 0) return 0;\r
\r
unsigned char *row = reinterpret_cast<unsigned char*> (image->imageData);\r
- \r
+\r
for (int i = 0; i < image->height; ++i)\r
{\r
for (int j = 0; j < image->width; ++j)\r
std::cout << "\t" << std::setw(15) << PARAM_HELP << " - display this help message\n";\r
}\r
\r
-int ProcessCommandLine(int argc, char **argv, \r
- Ncv32f &timeStep, \r
- char *&frame0Name, \r
- char *&frame1Name, \r
+int ProcessCommandLine(int argc, char **argv,\r
+ Ncv32f &timeStep,\r
+ char *&frame0Name,\r
+ char *&frame1Name,\r
NCVBroxOpticalFlowDescriptor &desc)\r
{\r
timeStep = 0.25f;\r
std::cout << "Failed\n";\r
return -1;\r
}\r
- \r
+\r
std::cout << "Backward...\n";\r
if (NCV_SUCCESS != NCVBroxOpticalFlow (desc, *g_pGPUMemAllocator, *dst, *src, uBck, vBck, 0))\r
{\r
}\r
\r
\r
-int CV_CDECL cvErrorCallback(int /*status*/, const char* /*func_name*/,\r
+static int CV_CDECL cvErrorCallback(int /*status*/, const char* /*func_name*/,\r
const char* err_msg, const char* /*file_name*/,\r
int /*line*/, void* /*userdata*/)\r
{\r
class Runnable\r
{\r
public:\r
- explicit Runnable(const std::string& name): name_(name) {} \r
+ explicit Runnable(const std::string& nameStr): name_(nameStr) {}\r
virtual ~Runnable() {}\r
- \r
- const std::string& name() const { return name_; } \r
- \r
+\r
+ const std::string& name() const { return name_; }\r
+\r
virtual void run() = 0;\r
\r
private:\r
bool stop() const { return cur_iter_idx_ >= num_iters_; }\r
\r
void cpuOn() { cpu_started_ = cv::getTickCount(); }\r
- void cpuOff() \r
+ void cpuOff()\r
{\r
int64 delta = cv::getTickCount() - cpu_started_;\r
cpu_times_.push_back(delta);\r
}\r
\r
void gpuOn() { gpu_started_ = cv::getTickCount(); }\r
- void gpuOff() \r
+ void gpuOff()\r
{\r
int64 delta = cv::getTickCount() - gpu_started_;\r
gpu_times_.push_back(delta);\r
{\r
cpu_times_.reserve(num_iters_);\r
gpu_times_.reserve(num_iters_);\r
- } \r
+ }\r
\r
void finishCurrentSubtest();\r
- void resetCurrentSubtest() \r
+ void resetCurrentSubtest()\r
{\r
cpu_elapsed_ = 0;\r
gpu_elapsed_ = 0;\r
} TestSystem::instance().gpuComplete()\r
\r
// Generates a matrix\r
-void gen(cv::Mat& mat, int rows, int cols, int type, cv::Scalar low, \r
+void gen(cv::Mat& mat, int rows, int cols, int type, cv::Scalar low,\r
cv::Scalar high);\r
\r
// Returns abs path taking into account test system working dir\r
using namespace std;\r
using namespace cv;\r
\r
-void InitMatchTemplate()\r
+static void InitMatchTemplate()\r
{\r
Mat src; gen(src, 500, 500, CV_32F, 0, 1);\r
Mat templ; gen(templ, 500, 500, CV_32F, 0, 1);\r
{\r
Mat src, dst, xmap, ymap;\r
gpu::GpuMat d_src, d_dst, d_xmap, d_ymap;\r
- \r
+\r
int interpolation = INTER_LINEAR;\r
int borderMode = BORDER_REPLICATE;\r
\r
\r
BFMatcher matcher(NORM_L2);\r
\r
- Mat query; \r
+ Mat query;\r
gen(query, 3000, desc_len, CV_32F, 0, 1);\r
- \r
- Mat train; \r
+\r
+ Mat train;\r
gen(train, 3000, desc_len, CV_32F, 0, 1);\r
\r
// Init GPU matcher\r
\r
gen(src, 4000, 4000, CV_8UC1, 0, 255);\r
d_src.upload(src);\r
- \r
+\r
SUBTEST << "4000x4000, 8UC1, CV_GRAY2BGRA";\r
- \r
+\r
cvtColor(src, dst, CV_GRAY2BGRA, 4);\r
\r
CPU_ON;\r
cvtColor(src, dst, CV_GRAY2BGRA, 4);\r
CPU_OFF;\r
- \r
+\r
gpu::cvtColor(d_src, d_dst, CV_GRAY2BGRA, 4);\r
- \r
+\r
GPU_ON;\r
gpu::cvtColor(d_src, d_dst, CV_GRAY2BGRA, 4);\r
GPU_OFF;\r
d_src.swap(d_dst);\r
\r
SUBTEST << "4000x4000, 8UC3 vs 8UC4, CV_BGR2YCrCb";\r
- \r
+\r
cvtColor(src, dst, CV_BGR2YCrCb);\r
\r
CPU_ON;\r
cvtColor(src, dst, CV_BGR2YCrCb);\r
CPU_OFF;\r
- \r
+\r
gpu::cvtColor(d_src, d_dst, CV_BGR2YCrCb, 4);\r
- \r
+\r
GPU_ON;\r
gpu::cvtColor(d_src, d_dst, CV_BGR2YCrCb, 4);\r
GPU_OFF;\r
- \r
+\r
cv::swap(src, dst);\r
d_src.swap(d_dst);\r
\r
SUBTEST << "4000x4000, 8UC4, CV_YCrCb2BGR";\r
- \r
+\r
cvtColor(src, dst, CV_YCrCb2BGR, 4);\r
\r
CPU_ON;\r
cvtColor(src, dst, CV_YCrCb2BGR, 4);\r
CPU_OFF;\r
- \r
+\r
gpu::cvtColor(d_src, d_dst, CV_YCrCb2BGR, 4);\r
- \r
+\r
GPU_ON;\r
gpu::cvtColor(d_src, d_dst, CV_YCrCb2BGR, 4);\r
GPU_OFF;\r
- \r
+\r
cv::swap(src, dst);\r
d_src.swap(d_dst);\r
\r
SUBTEST << "4000x4000, 8UC3 vs 8UC4, CV_BGR2XYZ";\r
- \r
+\r
cvtColor(src, dst, CV_BGR2XYZ);\r
\r
CPU_ON;\r
cvtColor(src, dst, CV_BGR2XYZ);\r
CPU_OFF;\r
- \r
+\r
gpu::cvtColor(d_src, d_dst, CV_BGR2XYZ, 4);\r
- \r
+\r
GPU_ON;\r
gpu::cvtColor(d_src, d_dst, CV_BGR2XYZ, 4);\r
GPU_OFF;\r
- \r
+\r
cv::swap(src, dst);\r
d_src.swap(d_dst);\r
\r
SUBTEST << "4000x4000, 8UC4, CV_XYZ2BGR";\r
- \r
+\r
cvtColor(src, dst, CV_XYZ2BGR, 4);\r
\r
CPU_ON;\r
cvtColor(src, dst, CV_XYZ2BGR, 4);\r
CPU_OFF;\r
- \r
+\r
gpu::cvtColor(d_src, d_dst, CV_XYZ2BGR, 4);\r
- \r
+\r
GPU_ON;\r
gpu::cvtColor(d_src, d_dst, CV_XYZ2BGR, 4);\r
GPU_OFF;\r
- \r
+\r
cv::swap(src, dst);\r
d_src.swap(d_dst);\r
\r
SUBTEST << "4000x4000, 8UC3 vs 8UC4, CV_BGR2HSV";\r
- \r
+\r
cvtColor(src, dst, CV_BGR2HSV);\r
\r
CPU_ON;\r
cvtColor(src, dst, CV_BGR2HSV);\r
CPU_OFF;\r
- \r
+\r
gpu::cvtColor(d_src, d_dst, CV_BGR2HSV, 4);\r
- \r
+\r
GPU_ON;\r
gpu::cvtColor(d_src, d_dst, CV_BGR2HSV, 4);\r
GPU_OFF;\r
- \r
+\r
cv::swap(src, dst);\r
d_src.swap(d_dst);\r
\r
SUBTEST << "4000x4000, 8UC4, CV_HSV2BGR";\r
- \r
+\r
cvtColor(src, dst, CV_HSV2BGR, 4);\r
\r
CPU_ON;\r
cvtColor(src, dst, CV_HSV2BGR, 4);\r
CPU_OFF;\r
- \r
+\r
gpu::cvtColor(d_src, d_dst, CV_HSV2BGR, 4);\r
- \r
+\r
GPU_ON;\r
gpu::cvtColor(d_src, d_dst, CV_HSV2BGR, 4);\r
GPU_OFF;\r
- \r
+\r
cv::swap(src, dst);\r
d_src.swap(d_dst);\r
}\r
\r
threshold(src, dst, 50.0, 0.0, THRESH_BINARY);\r
\r
- CPU_ON; \r
+ CPU_ON;\r
threshold(src, dst, 50.0, 0.0, THRESH_BINARY);\r
CPU_OFF;\r
\r
\r
threshold(src, dst, 50.0, 0.0, THRESH_TRUNC);\r
\r
- CPU_ON; \r
+ CPU_ON;\r
threshold(src, dst, 50.0, 0.0, THRESH_TRUNC);\r
CPU_OFF;\r
\r
}\r
\r
\r
-void InitSolvePnpRansac()\r
+static void InitSolvePnpRansac()\r
{\r
Mat object; gen(object, 1, 4, CV_32FC3, Scalar::all(0), Scalar::all(100));\r
Mat image; gen(image, 1, 4, CV_32FC2, Scalar::all(0), Scalar::all(100));\r
SUBTEST << size << 'x' << size << ", 8UC4";\r
\r
Mat src, dst;\r
- \r
+\r
gen(src, size, size, CV_8UC4, 0, 256);\r
\r
GaussianBlur(src, dst, Size(3, 3), 1);\r
{\r
Mat src;\r
gen(src, size, size, CV_8UC4, 0, 256);\r
- \r
+\r
for (int ksize = 3; ksize <= 16; ksize += 2)\r
- { \r
+ {\r
SUBTEST << "ksize = " << ksize << ", " << size << 'x' << size << ", 8UC4";\r
- \r
+\r
Mat kernel;\r
gen(kernel, ksize, ksize, CV_32FC1, 0.0, 1.0);\r
\r
{\r
SUBTEST << size << 'x' << size << ", 8UC4";\r
\r
- Mat src, dst; \r
+ Mat src, dst;\r
gen(src, size, size, CV_8UC4, 0, 256);\r
\r
pyrDown(src, dst);\r
{\r
SUBTEST << size << 'x' << size << ", 8UC4";\r
\r
- Mat src, dst; \r
+ Mat src, dst;\r
\r
gen(src, size, size, CV_8UC4, 0, 256);\r
\r
CPU_ON;\r
Canny(img, edges, 50.0, 100.0);\r
CPU_OFF;\r
- \r
+\r
gpu::GpuMat d_img(img);\r
gpu::GpuMat d_edges;\r
gpu::CannyBuf d_buf;\r
\r
Mat frame1 = imread(abspath("rubberwhale2.png"));\r
if (frame1.empty()) throw runtime_error("can't open rubberwhale2.png");\r
- \r
+\r
Mat gray_frame;\r
cvtColor(frame0, gray_frame, COLOR_BGR2GRAY);\r
- \r
+\r
for (int points = 1000; points <= 8000; points *= 2)\r
{\r
SUBTEST << points;\r
using namespace cv;\r
using namespace cv::gpu;\r
\r
-void download(const GpuMat& d_mat, vector<Point2f>& vec)\r
+static void download(const GpuMat& d_mat, vector<Point2f>& vec)\r
{\r
vec.resize(d_mat.cols);\r
Mat mat(1, d_mat.cols, CV_32FC2, (void*)&vec[0]);\r
d_mat.download(mat);\r
}\r
\r
-void download(const GpuMat& d_mat, vector<uchar>& vec)\r
+static void download(const GpuMat& d_mat, vector<uchar>& vec)\r
{\r
vec.resize(d_mat.cols);\r
Mat mat(1, d_mat.cols, CV_8UC1, (void*)&vec[0]);\r
d_mat.download(mat);\r
}\r
\r
-void drawArrows(Mat& frame, const vector<Point2f>& prevPts, const vector<Point2f>& nextPts, const vector<uchar>& status, Scalar line_color = Scalar(0, 0, 255))\r
+static void drawArrows(Mat& frame, const vector<Point2f>& prevPts, const vector<Point2f>& nextPts, const vector<uchar>& status, Scalar line_color = Scalar(0, 0, 255))\r
{\r
for (size_t i = 0; i < prevPts.size(); ++i)\r
{\r
GlArrays arr;\r
};\r
\r
-void drawCallback(void* userdata)\r
+static void drawCallback(void* userdata)\r
{\r
DrawData* data = static_cast<DrawData*>(userdata);\r
\r
return c + (d - c) * (x - a) / (b - a);\r
}\r
\r
-void getFlowField(const Mat& u, const Mat& v, Mat& flowField)\r
+static void getFlowField(const Mat& u, const Mat& v, Mat& flowField)\r
{\r
float maxDisplacement = 1.0f;\r
\r
void printParams() const;\r
\r
void workBegin() { work_begin = getTickCount(); }\r
- void workEnd() \r
+ void workEnd()\r
{\r
int64 d = getTickCount() - work_begin;\r
double f = getTickFrequency();\r
bool running;\r
\r
Mat left_src, right_src;\r
- Mat left, right; \r
+ Mat left, right;\r
gpu::GpuMat d_left, d_right;\r
\r
gpu::StereoBM_GPU bm;\r
double work_fps;\r
};\r
\r
-void printHelp()\r
+static void printHelp()\r
{\r
cout << "Usage: stereo_match_gpu\n"\r
<< "\t--left <left_view> --right <right_view> # must be rectified\n"\r
{\r
if (string(argv[i]) == "--left") p.left = argv[++i];\r
else if (string(argv[i]) == "--right") p.right = argv[++i];\r
- else if (string(argv[i]) == "--method") \r
+ else if (string(argv[i]) == "--method")\r
{\r
if (string(argv[i + 1]) == "BM") p.method = BM;\r
else if (string(argv[i + 1]) == "BP") p.method = BP;\r
}\r
\r
\r
-App::App(const Params& p)\r
- : p(p), running(false) \r
+App::App(const Params& params)\r
+ : p(params), running(false)\r
{\r
cv::gpu::printShortCudaDeviceInfo(cv::gpu::getDevice());\r
\r
imshow("left", left);\r
imshow("right", right);\r
\r
- // Set common parameters\r
+ // Set common parameters\r
bm.ndisp = p.ndisp;\r
bp.ndisp = p.ndisp;\r
csbp.ndisp = p.ndisp;\r
workBegin();\r
switch (p.method)\r
{\r
- case Params::BM: \r
+ case Params::BM:\r
if (d_left.channels() > 1 || d_right.channels() > 1)\r
{\r
cout << "BM doesn't support color images\n";\r
imshow("left", left);\r
imshow("right", right);\r
}\r
- bm(d_left, d_right, d_disp); \r
+ bm(d_left, d_right, d_disp);\r
break;\r
case Params::BP: bp(d_left, d_right, d_disp); break;\r
case Params::CSBP: csbp(d_left, d_right, d_disp); break;\r
break;\r
case 'p': case 'P':\r
printParams();\r
- break; \r
+ break;\r
case 'g': case 'G':\r
if (left.channels() == 1 && p.method != Params::BM)\r
{\r
left = left_src;\r
right = right_src;\r
}\r
- else \r
+ else\r
{\r
cvtColor(left_src, left, CV_BGR2GRAY);\r
cvtColor(right_src, right, CV_BGR2GRAY);\r
using namespace cv;\r
using namespace cv::gpu;\r
\r
-void help()\r
+static void help()\r
{\r
cout << "\nThis program demonstrates using SURF_GPU features detector, descriptor extractor and BruteForceMatcher_GPU" << endl;\r
cout << "\nUsage:\n\tmatcher_simple_gpu --left <image1> --right <image2>" << endl;\r
GpuMat descriptors1GPU, descriptors2GPU;\r
surf(img1, GpuMat(), keypoints1GPU, descriptors1GPU);\r
surf(img2, GpuMat(), keypoints2GPU, descriptors2GPU);\r
- \r
+\r
cout << "FOUND " << keypoints1GPU.cols << " keypoints on first image" << endl;\r
cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;\r
\r
BruteForceMatcher_GPU< L2<float> > matcher;\r
GpuMat trainIdx, distance;\r
matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);\r
- \r
+\r
// downloading results\r
vector<KeyPoint> keypoints1, keypoints2;\r
vector<float> descriptors1, descriptors2;\r
// drawing the results\r
Mat img_matches;\r
drawMatches(Mat(img1), keypoints1, Mat(img2), keypoints2, matches, img_matches);\r
- \r
+\r
namedWindow("matches", 0);\r
imshow("matches", img_matches);\r
waitKey(0);\r