# --------------------------------------------------------------
# Top level OpenCV project
# --------------------------------------------------------------
-if(NOT IOS)
- cmake_minimum_required(VERSION 2.6.3)
+if(CMAKE_GENERATOR MATCHES Xcode AND XCODE_VERSION VERSION_GREATER 4.3)
+ cmake_minimum_required(VERSION 2.8.8)
+elseif(IOS)
+ cmake_minimum_required(VERSION 2.8.0)
else()
- cmake_minimum_required(VERSION 2.8)
+ cmake_minimum_required(VERSION 2.6.3)
endif()
set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "Configs" FORCE)
set_property( CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${CMAKE_CONFIGURATION_TYPES} )
endif()
-project(OpenCV)
+project(OpenCV CXX C)
include(cmake/OpenCVUtils.cmake REQUIRED)
OCV_OPTION(BUILD_FAT_JAVA_LIB "Create fat java wrapper containing the whole OpenCV library" ON IF ANDROID AND NOT BUILD_SHARED_LIBS AND CMAKE_COMPILER_IS_GNUCXX )
# 3rd party libs
-OCV_OPTION(BUILD_ZLIB "Build zlib from source" WIN32 OR IOS )
-OCV_OPTION(BUILD_TIFF "Build libtiff from source" WIN32 OR IOS OR ANDROID )
-OCV_OPTION(BUILD_JASPER "Build libjasper from source" WIN32 OR IOS OR ANDROID )
-OCV_OPTION(BUILD_JPEG "Build libjpeg from source" WIN32 OR IOS OR ANDROID )
-OCV_OPTION(BUILD_PNG "Build libpng from source" WIN32 OR IOS OR ANDROID )
+OCV_OPTION(BUILD_ZLIB "Build zlib from source" WIN32 OR IOS OR APPLE )
+OCV_OPTION(BUILD_TIFF "Build libtiff from source" WIN32 OR IOS OR ANDROID OR APPLE )
+OCV_OPTION(BUILD_JASPER "Build libjasper from source" WIN32 OR IOS OR ANDROID OR APPLE )
+OCV_OPTION(BUILD_JPEG "Build libjpeg from source" WIN32 OR IOS OR ANDROID OR APPLE )
+OCV_OPTION(BUILD_PNG "Build libpng from source" WIN32 OR IOS OR ANDROID OR APPLE )
# OpenCV installation options
# ===================================================
include(cmake/OpenCVCompilerOptions.cmake REQUIRED)
# In case of Makefiles if the user does not setup CMAKE_BUILD_TYPE, assume it's Release:
-if(CMAKE_GENERATOR MATCHES "Makefiles" AND "${CMAKE_BUILD_TYPE}" STREQUAL "")
+if(CMAKE_GENERATOR MATCHES "Makefiles|Ninja" AND "${CMAKE_BUILD_TYPE}" STREQUAL "")
set(CMAKE_BUILD_TYPE Release)
endif()
endif()
endif()
if(HAVE_FFMPEG_CODEC AND HAVE_FFMPEG_FORMAT AND HAVE_FFMPEG_UTIL)
- if(HAVE_FFMPEG_SWSCALE OR NOT HAVE_GENTOO_FFMPEG)
+ if(HAVE_FFMPEG_SWSCALE)
set(HAVE_FFMPEG 1)
endif()
endif()
endif()
endif()
- if(HAVE_FFMPEG)
- if(NOT ALIASOF_libavformat_VERSION VERSION_LESS "52.111.0")
- set(NEW_FFMPEG ON)
- endif()
- endif()
-
if(WITH_1394)
CHECK_MODULE(libdc1394-2 HAVE_DC1394_2)
if(NOT HAVE_DC1394_2)
"${FFMPEG_LIB_DIR}/libavformat.a" "${FFMPEG_LIB_DIR}/libavutil.a"
"${FFMPEG_LIB_DIR}/libswscale.a")
set(HAVE_FFMPEG 1)
- set(NEW_FFMPEG 1)
endif()
endif()
endif()
if(CMAKE_GENERATOR MATCHES Xcode)
status(" Xcode:" ${XCODE_VERSION})
endif()
+if(NOT CMAKE_GENERATOR MATCHES "Xcode|Visual Studio")
+ status(" Configuration:" ${CMAKE_BUILD_TYPE})
+endif()
# C/C++ options
status("")
status(" PNG:" "NO")
endif()
if(WITH_TIFF)
- status(" TIFF:" TIFF_FOUND THEN "${TIFF_LIBRARY} (ver ${TIFF_VERSION})" ELSE "build (ver ${TIFF_VERSION})")
+ if(TIFF_VERSION_STRING AND TIFF_FOUND)
+ status(" TIFF:" "${TIFF_LIBRARY} (ver ${TIFF_VERSION} - ${TIFF_VERSION_STRING})")
+ else()
+ status(" TIFF:" TIFF_FOUND THEN "${TIFF_LIBRARY} (ver ${TIFF_VERSION})" ELSE "build (ver ${TIFF_VERSION})")
+ endif()
else()
status(" TIFF:" "NO")
endif()
if(HAVE_CUDA)
status("")
- status(" NVIDIA CUDA:")
+ status(" NVIDIA CUDA:" "(ver ${CUDA_VERSION_STRING})")
status(" Use CUFFT:" HAVE_CUFFT THEN YES ELSE NO)
status(" Use CUBLAS:" HAVE_CUBLAS THEN YES ELSE NO)
status(" Python:")
status(" Interpreter:" PYTHON_EXECUTABLE THEN "${PYTHON_EXECUTABLE} (ver ${PYTHON_VERSION_FULL})" ELSE NO)
if(BUILD_opencv_python)
- status(" Libraries:" HAVE_opencv_python THEN ${PYTHON_LIBRARIES} ELSE NO)
+ if(PYTHONLIBS_VERSION_STRING)
+ status(" Libraries:" HAVE_opencv_python THEN "${PYTHON_LIBRARIES} (ver ${PYTHONLIBS_VERSION_STRING})" ELSE NO)
+ else()
+ status(" Libraries:" HAVE_opencv_python THEN ${PYTHON_LIBRARIES} ELSE NO)
+ endif()
status(" numpy:" PYTHON_USE_NUMPY THEN "${PYTHON_NUMPY_INCLUDE_DIR} (ver ${PYTHON_NUMPY_VERSION})" ELSE "NO (Python wrappers can not be generated)")
status(" packages path:" PYTHON_EXECUTABLE THEN "${PYTHON_PACKAGES_PATH}" ELSE "-")
endif()
status("-----------------------------------------------------------------")
status("")
+ocv_finalize_status()
+
# ----------------------------------------------------------------------------
# Warn in the case of in-source build
# ----------------------------------------------------------------------------
# under the ${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME}
# (depending on the target ABI). This is convenient for Android packaging.
#
+# Authors:
+# Ethan Rublee ethan.ruble@gmail.com
+# Andrey Kamaev andrey.kamaev@itseez.com
#
# Change Log:
-# - initial version December 2010 Ethan Rublee ethan.ruble@gmail.com
-# - modified April 2011 Andrey Kamaev andrey.kamaev@itseez.com
+# - initial version December 2010
+# - modified April 2011
# [+] added possibility to build with NDK (without standalone toolchain)
# [+] support cross-compilation on Windows (native, no cygwin support)
# [+] added compiler option to force "char" type to be signed
# [+] EXECUTABLE_OUTPUT_PATH is set by toolchain (required on Windows)
# [~] Fixed bug with ANDROID_API_LEVEL variable
# [~] turn off SWIG search if it is not found first time
-# - modified May 2011 Andrey Kamaev andrey.kamaev@itseez.com
+# - modified May 2011
# [~] ANDROID_LEVEL is renamed to ANDROID_API_LEVEL
# [+] ANDROID_API_LEVEL is detected by toolchain if not specified
# [~] added guard to prevent changing of output directories on the first
# cmake pass
# [~] toolchain exits with error if ARM_TARGET is not recognized
-# - modified June 2011 Andrey Kamaev andrey.kamaev@itseez.com
+# - modified June 2011
# [~] default NDK path is updated for version r5c
# [+] variable CMAKE_SYSTEM_PROCESSOR is set based on ARM_TARGET
# [~] toolchain install directory is added to linker paths
# [+] added macro find_host_package, find_host_program to search
# packages/programs on the host system
# [~] fixed path to STL library
-# - modified July 2011 Andrey Kamaev andrey.kamaev@itseez.com
+# - modified July 2011
# [~] fixed options caching
# [~] search for all supported NDK versions
# [~] allowed spaces in NDK path
-# - modified September 2011 Andrey Kamaev andrey.kamaev@itseez.com
+# - modified September 2011
# [~] updated for NDK r6b
-# - modified November 2011 Andrey Kamaev andrey.kamaev@itseez.com
+# - modified November 2011
# [*] rewritten for NDK r7
# [+] x86 toolchain support (experimental)
# [+] added "armeabi-v6 with VFP" ABI for ARMv6 processors.
# [~] ARM_TARGET is renamed to ANDROID_ABI
# [~] ARMEABI_NDK_NAME is renamed to ANDROID_NDK_ABI_NAME
# [~] ANDROID_API_LEVEL is renamed to ANDROID_NATIVE_API_LEVEL
-# - modified January 2012 Andrey Kamaev andrey.kamaev@itseez.com
+# - modified January 2012
# [+] added stlport_static support (experimental)
# [+] added special check for cygwin
# [+] filtered out hidden files (starting with .) while globbing inside NDK
# [+] automatically applied GLESv2 linkage fix for NDK revisions 5-6
# [+] added ANDROID_GET_ABI_RAWNAME to get NDK ABI names by CMake flags
-# - modified February 2012 Andrey Kamaev andrey.kamaev@itseez.com
+# - modified February 2012
# [+] updated for NDK r7b
# [~] fixed cmake try_compile() command
# [~] Fix for missing install_name_tool on OS X
-# - modified March 2012 Andrey Kamaev andrey.kamaev@itseez.com
+# - modified March 2012
# [~] fixed incorrect C compiler flags
# [~] fixed CMAKE_SYSTEM_PROCESSOR change on ANDROID_ABI change
# [+] improved toolchain loading speed
# [+] added assembler language support (.S)
# [+] allowed preset search paths and extra search suffixes
-# - modified April 2012 Andrey Kamaev andrey.kamaev@itseez.com
+# - modified April 2012
# [+] updated for NDK r7c
+# [~] fixed most of problems with compiler/linker flags and caching
+# [+] added option ANDROID_FUNCTION_LEVEL_LINKING
# ------------------------------------------------------------------------------
cmake_minimum_required( VERSION 2.6.3 )
# extra arm-specific flags
set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fsigned-char" )
elseif( X86 )
- set( CMAKE_CXX_FLAGS "-ffunction-sections -funwind-tables" )
- set( CMAKE_C_FLAGS "-ffunction-sections -funwind-tables" )
+ set( CMAKE_CXX_FLAGS "-funwind-tables" )
+ set( CMAKE_C_FLAGS "-funwind-tables" )
if( ANDROID_USE_STLPORT )
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti -fno-exceptions" )
set( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-rtti -fno-exceptions" )
#linker flags
list( APPEND ANDROID_SYSTEM_LIB_DIRS "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}" "${CMAKE_INSTALL_PREFIX}/libs/${ANDROID_NDK_ABI_NAME}" )
-set( LINKER_FLAGS "" )
+set( ANDROID_LINKER_FLAGS "" )
#STL
if( ANDROID_USE_STLPORT )
if( EXISTS "${__stlLibPath}/libstlport_static.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libstlport_static.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstlport_static.a" )
endif()
if( EXISTS "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstlport_static.a" )
- set( LINKER_FLAGS "${LINKER_FLAGS} -Wl,--start-group -lstlport_static" )
+ set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,--start-group -lstlport_static" )
endif()
else( ANDROID_USE_STLPORT )
if( EXISTS "${__stlLibPath}/libgnustl_static.a" )
__COPY_IF_DIFFERENT( "${__stlLibPath}/libstdc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstdc++.a" )
endif()
if( EXISTS "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libstdc++.a" )
- set( LINKER_FLAGS "${LINKER_FLAGS} -lstdc++" )
+ set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -lstdc++" )
endif()
#gcc exception & rtti support
__COPY_IF_DIFFERENT( "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}/lib/libsupc++.a" "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" )
endif()
if( EXISTS "${CMAKE_BINARY_DIR}/systemlibs/${ANDROID_NDK_ABI_NAME}/libsupc++.a" )
- set( LINKER_FLAGS "${LINKER_FLAGS} -lsupc++" )
+ set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -lsupc++" )
endif()
endif( ANDROID_USE_STLPORT )
set( ANDROID_NO_UNDEFINED ${ANDROID_NO_UNDEFINED} CACHE BOOL "Show all undefined symbols as linker errors" FORCE )
mark_as_advanced( ANDROID_NO_UNDEFINED )
if( ANDROID_NO_UNDEFINED )
- set( LINKER_FLAGS "-Wl,--no-undefined ${LINKER_FLAGS}" )
+ set( ANDROID_LINKER_FLAGS "-Wl,--no-undefined ${ANDROID_LINKER_FLAGS}" )
endif()
if (ANDROID_NDK MATCHES "-r[56].?$")
else()
__INIT_VARIABLE( ANDROID_SO_UNDEFINED VALUES OFF )
endif()
+
set( ANDROID_SO_UNDEFINED ${ANDROID_SO_UNDEFINED} CACHE BOOL "Allows or disallows undefined symbols in shared libraries" FORCE )
mark_as_advanced( ANDROID_SO_UNDEFINED )
if( ANDROID_SO_UNDEFINED )
- set( LINKER_FLAGS "${LINKER_FLAGS} -Wl,-allow-shlib-undefined" )
+ set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} -Wl,-allow-shlib-undefined" )
+endif()
+
+__INIT_VARIABLE( ANDROID_FUNCTION_LEVEL_LINKING VALUES ON )
+set( ANDROID_FUNCTION_LEVEL_LINKING ON CACHE BOOL "Allows or disallows undefined symbols in shared libraries" FORCE )
+mark_as_advanced( ANDROID_FUNCTION_LEVEL_LINKING )
+if( ANDROID_FUNCTION_LEVEL_LINKING )
+ set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fdata-sections -ffunction-sections" )
+ set( ANDROID_LINKER_FLAGS "-Wl,--gc-sections ${ANDROID_LINKER_FLAGS}" )
endif()
if( ARMEABI_V7A )
# this is *required* to use the following linker flags that routes around
# a CPU bug in some Cortex-A8 implementations:
- set( LINKER_FLAGS "-Wl,--fix-cortex-a8 ${LINKER_FLAGS}" )
+ set( ANDROID_LINKER_FLAGS "-Wl,--fix-cortex-a8 ${ANDROID_LINKER_FLAGS}" )
endif()
#cache flags
set( CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE}" CACHE STRING "c Release flags" )
set( CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}" CACHE STRING "c++ Debug flags" )
set( CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}" CACHE STRING "c Debug flags" )
-set( CMAKE_SHARED_LINKER_FLAGS "${LINKER_FLAGS}" CACHE STRING "linker flags" )
-set( CMAKE_MODULE_LINKER_FLAGS "${LINKER_FLAGS}" CACHE STRING "linker flags" )
-set( CMAKE_EXE_LINKER_FLAGS "-Wl,--gc-sections -Wl,-z,nocopyreloc ${LINKER_FLAGS}" CACHE STRING "linker flags" )
+set( CMAKE_SHARED_LINKER_FLAGS "" CACHE STRING "linker flags" )
+set( CMAKE_MODULE_LINKER_FLAGS "" CACHE STRING "linker flags" )
+set( CMAKE_EXE_LINKER_FLAGS "-Wl,-z,nocopyreloc" CACHE STRING "linker flags" )
include_directories( SYSTEM ${ANDROID_SYSTEM_INCLUDE_DIRS} )
link_directories( ${ANDROID_SYSTEM_LIB_DIRS} )
#finish flags
-set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS}" CACHE INTERNAL "Extra Android falgs")
-set( CMAKE_CXX_FLAGS "${ANDROID_CXX_FLAGS} ${CMAKE_CXX_FLAGS}" )
-set( CMAKE_C_FLAGS "${ANDROID_CXX_FLAGS} ${CMAKE_C_FLAGS}" )
+set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS}" CACHE INTERNAL "Extra Android compiler flags")
+set( ANDROID_LINKER_FLAGS "${ANDROID_LINKER_FLAGS}" CACHE INTERNAL "Extra Android linker flags")
+set( CMAKE_CXX_FLAGS "${ANDROID_CXX_FLAGS} ${CMAKE_CXX_FLAGS}" )
+set( CMAKE_C_FLAGS "${ANDROID_CXX_FLAGS} ${CMAKE_C_FLAGS}" )
+set( CMAKE_SHARED_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS}" )
+set( CMAKE_MODULE_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} ${CMAKE_MODULE_LINKER_FLAGS}" )
+set( CMAKE_EXE_LINKER_FLAGS "${ANDROID_LINKER_FLAGS} ${CMAKE_EXE_LINKER_FLAGS}" )
#set these global flags for cmake client scripts to change behavior
set( ANDROID True )
# export toolchain settings for the try_compile() command
if( NOT PROJECT_NAME STREQUAL "CMAKE_TRY_COMPILE" )
set( __toolchain_config "")
- foreach( __var ANDROID_ABI ANDROID_FORCE_ARM_BUILD ANDROID_NATIVE_API_LEVEL ANDROID_NO_UNDEFINED ANDROID_SO_UNDEFINED ANDROID_SET_OBSOLETE_VARIABLES LIBRARY_OUTPUT_PATH_ROOT ANDROID_USE_STLPORT ANDROID_FORBID_SYGWIN ANDROID_NDK ANDROID_STANDALONE_TOOLCHAIN )
+ foreach( __var ANDROID_ABI ANDROID_FORCE_ARM_BUILD ANDROID_NATIVE_API_LEVEL ANDROID_NO_UNDEFINED ANDROID_SO_UNDEFINED ANDROID_SET_OBSOLETE_VARIABLES LIBRARY_OUTPUT_PATH_ROOT ANDROID_USE_STLPORT ANDROID_FORBID_SYGWIN ANDROID_NDK ANDROID_STANDALONE_TOOLCHAIN ANDROID_FUNCTION_LEVEL_LINKING )
if( DEFINED ${__var} )
set( __toolchain_config "${__toolchain_config}set( ${__var} \"${${__var}}\" )\n" )
endif()
# Variables controlling behavior or set by cmake toolchain:
# ANDROID_ABI : "armeabi-v7a" (default), "armeabi", "armeabi-v7a with NEON", "armeabi-v7a with VFPV3", "armeabi-v6 with VFP", "x86"
-# ANDROID_FORCE_ARM_BUILD : ON/OFF
# ANDROID_NATIVE_API_LEVEL : 3,4,5,8,9,14 (depends on NDK version)
-# ANDROID_NO_UNDEFINED : ON/OFF
-# ANDROID_SO_UNDEFINED : OFF/ON (default depends on NDK version)
# ANDROID_SET_OBSOLETE_VARIABLES : ON/OFF
-# LIBRARY_OUTPUT_PATH_ROOT : <any valid path>
# ANDROID_USE_STLPORT : OFF/ON - EXPERIMENTAL!!!
# ANDROID_FORBID_SYGWIN : ON/OFF
+# ANDROID_NO_UNDEFINED : ON/OFF
+# ANDROID_SO_UNDEFINED : OFF/ON (default depends on NDK version)
+# ANDROID_FUNCTION_LEVEL_LINKING : ON/OFF
+# Variables that takes effect only at first run:
+# ANDROID_FORCE_ARM_BUILD : ON/OFF
+# LIBRARY_OUTPUT_PATH_ROOT : <any valid path>
# Can be set only at the first run:
# ANDROID_NDK
# ANDROID_STANDALONE_TOOLCHAIN
# ANDROID_NDK_ABI_NAME : "armeabi", "armeabi-v7a" or "x86" depending on ANDROID_ABI
# ANDROID_ARCH_NAME : "arm" or "x86" depending on ANDROID_ABI
# TOOL_OS_SUFFIX : "" or ".exe" depending on host platform
+# ANDROID_SYSROOT : path to the compiler sysroot
# ANDROID_SYSTEM_INCLUDE_DIRS
# ANDROID_SYSTEM_LIB_DIRS
# Obsolete:
# ANDROID_COMPILER_VERSION : GCC version used
# ANDROID_CXX_FLAGS : C/C++ compiler flags required by Android platform
# ANDROID_SUPPORTED_ABIS : list of currently allowed values for ANDROID_ABI
-# ANDROID_SYSROOT : path to the compiler sysroot
# ANDROID_TOOLCHAIN_NAME : "standalone", "arm-linux-androideabi-4.4.3" or "x86-4.4.3" or something similar.
# ANDROID_TOOLCHAIN_MACHINE_NAME : "arm-linux-androideabi", "arm-eabi" or "i686-android-linux"
# ANDROID_TOOLCHAIN_ROOT : path to the top level of toolchain (standalone or placed inside NDK)
IF EXIST .\wincfg.cmd CALL .\wincfg.cmd\r
POPD\r
\r
+:: inherit old names\r
+IF NOT DEFINED CMAKE SET CMAKE=%CMAKE_EXE%\r
+IF NOT DEFINED MAKE SET MAKE=%MAKE_EXE%\r
+\r
:: defaults\r
IF NOT DEFINED BUILD_DIR SET BUILD_DIR=build\r
IF NOT DEFINED ANDROID_ABI SET ANDROID_ABI=armeabi-v7a\r
IF NOT DEFINED ANDROID_NDK (ECHO. & ECHO You should set an environment variable ANDROID_NDK to the full path to your copy of Android NDK & GOTO end)\r
(CD "%ANDROID_NDK%") || (ECHO. & ECHO Directory "%ANDROID_NDK%" specified by ANDROID_NDK variable does not exist & GOTO end)\r
\r
-IF NOT EXIST "%CMAKE_EXE%" (ECHO. & ECHO You should set an environment variable CMAKE_EXE to the full path to cmake executable & GOTO end)\r
-IF NOT EXIST "%MAKE_EXE%" (ECHO. & ECHO You should set an environment variable MAKE_EXE to the full path to native port of make executable & GOTO end)\r
+IF NOT EXIST "%CMAKE%" (ECHO. & ECHO You should set an environment variable CMAKE to the full path to cmake executable & GOTO end)\r
+IF NOT EXIST "%MAKE%" (ECHO. & ECHO You should set an environment variable MAKE to the full path to native port of make executable & GOTO end)\r
\r
IF NOT %BUILD_JAVA_PART%==1 GOTO required_variables_checked\r
\r
:required_variables_checked\r
POPD\r
\r
+:: check for ninja\r
+echo "%MAKE%"|findstr /i ninja >nul:\r
+IF %errorlevel%==1 (SET BUILD_WITH_NINJA=0) ELSE (SET BUILD_WITH_NINJA=1)\r
+IF %BUILD_WITH_NINJA%==1 (SET CMAKE_GENERATOR=Ninja) ELSE (SET CMAKE_GENERATOR=MinGW Makefiles)\r
+\r
:: create build dir\r
IF DEFINED REBUILD rmdir /S /Q "%BUILD_DIR%" 2>NUL\r
MKDIR "%BUILD_DIR%" 2>NUL\r
ECHO.\r
IF NOT %BUILD_OPENCV%==1 GOTO other-cmake\r
:opencv-cmake\r
-("%CMAKE_EXE%" -G"MinGW Makefiles" -DANDROID_ABI="%ANDROID_ABI%" -DCMAKE_TOOLCHAIN_FILE="%SOURCE_DIR%"\android.toolchain.cmake -DCMAKE_MAKE_PROGRAM="%MAKE_EXE%" %* "%SOURCE_DIR%\..") && GOTO cmakefin\r
+("%CMAKE%" -G"%CMAKE_GENERATOR%" -DANDROID_ABI="%ANDROID_ABI%" -DCMAKE_TOOLCHAIN_FILE="%SOURCE_DIR%"\android.toolchain.cmake -DCMAKE_MAKE_PROGRAM="%MAKE%" %* "%SOURCE_DIR%\..") && GOTO cmakefin\r
ECHO. & ECHO cmake failed & GOTO end\r
:other-cmake\r
-("%CMAKE_EXE%" -G"MinGW Makefiles" -DANDROID_ABI="%ANDROID_ABI%" -DOpenCV_DIR="%OPENCV_BUILD_DIR%" -DCMAKE_TOOLCHAIN_FILE="%OPENCV_BUILD_DIR%\..\android.toolchain.cmake" -DCMAKE_MAKE_PROGRAM="%MAKE_EXE%" %* "%SOURCE_DIR%") && GOTO cmakefin\r
+("%CMAKE%" -G"%CMAKE_GENERATOR%" -DANDROID_ABI="%ANDROID_ABI%" -DOpenCV_DIR="%OPENCV_BUILD_DIR%" -DCMAKE_TOOLCHAIN_FILE="%OPENCV_BUILD_DIR%\..\android.toolchain.cmake" -DCMAKE_MAKE_PROGRAM="%MAKE%" %* "%SOURCE_DIR%") && GOTO cmakefin\r
ECHO. & ECHO cmake failed & GOTO end\r
:cmakefin\r
\r
:: run make\r
ECHO. & ECHO Building native libs...\r
-("%MAKE_EXE%" -j %NUMBER_OF_PROCESSORS% VERBOSE=%VERBOSE%) || (ECHO. & ECHO make failed & GOTO end)\r
+IF %BUILD_WITH_NINJA%==0 ("%MAKE%" -j %NUMBER_OF_PROCESSORS% VERBOSE=%VERBOSE%) || (ECHO. & ECHO make failed & GOTO end)\r
+IF %BUILD_WITH_NINJA%==1 ("%MAKE%") || (ECHO. & ECHO ninja failed & GOTO end)\r
\r
IF NOT %BUILD_JAVA_PART%==1 GOTO end\r
POPD && PUSHD %SOURCE_DIR%\r
set(OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE "")
set(OPENCV_EXTRA_EXE_LINKER_FLAGS_DEBUG "")
-if(MSVC)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /D _CRT_SECURE_NO_DEPRECATE /D _CRT_NONSTDC_NO_DEPRECATE /D _SCL_SECURE_NO_WARNINGS")
- # 64-bit portability warnings, in MSVC80
- if(MSVC80)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Wp64")
- endif()
-
- if(BUILD_WITH_DEBUG_INFO)
- set(OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE "${OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE} /debug")
- endif()
-
- # Remove unreferenced functions: function level linking
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Gy")
- if(NOT MSVC_VERSION LESS 1400)
- set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /bigobj")
- endif()
- if(BUILD_WITH_DEBUG_INFO)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} /Zi")
- endif()
-endif()
-
if(CMAKE_COMPILER_IS_GNUCXX)
# High level of warnings.
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Wall")
# Other optimizations
if(ENABLE_OMIT_FRAME_POINTER)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -fomit-frame-pointer")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -fomit-frame-pointer")
else()
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -fno-omit-frame-pointer")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -fno-omit-frame-pointer")
endif()
if(ENABLE_FAST_MATH)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -ffast-math")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -ffast-math")
endif()
if(ENABLE_POWERPC)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -mcpu=G3 -mtune=G5")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mcpu=G3 -mtune=G5")
endif()
if(ENABLE_SSE)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -msse")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse")
endif()
if(ENABLE_SSE2)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -msse2")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse2")
endif()
# SSE3 and further should be disabled under MingW because it generates compiler errors
if(NOT MINGW)
if(ENABLE_SSE3)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -msse3")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse3")
endif()
if(${CMAKE_OPENCV_GCC_VERSION_NUM} GREATER 402)
if(HAVE_GCC42_OR_NEWER OR APPLE)
if(ENABLE_SSSE3)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -mssse3")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mssse3")
endif()
if(HAVE_GCC43_OR_NEWER)
if(ENABLE_SSE41)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -msse4.1")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse4.1")
endif()
if(ENABLE_SSE42)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -msse4.2")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -msse4.2")
endif()
endif()
endif()
if(X86 OR X86_64)
if(NOT APPLE AND CMAKE_SIZEOF_VOID_P EQUAL 4)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -mfpmath=387")
+ if(ENABLE_SSE2)
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mfpmath=sse")# !! important - be on the same wave with x64 compilers
+ else()
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -mfpmath=387")
+ endif()
endif()
endif()
# Profiling?
if(ENABLE_PROFILING)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} -pg -g")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -pg -g")
+ # turn off incompatible options
+ foreach(flags CMAKE_CXX_FLAGS CMAKE_C_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG OPENCV_EXTRA_C_FLAGS_RELEASE)
+ string(REPLACE "-fomit-frame-pointer" "" ${flags} "${${flags}}")
+ string(REPLACE "-ffunction-sections" "" ${flags} "${${flags}}")
+ endforeach()
elseif(NOT APPLE AND NOT ANDROID)
# Remove unreferenced functions: function level linking
set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -ffunction-sections")
endif()
if(MSVC)
- # 64-bit MSVC compiler uses SSE/SSE2 by default
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /D _CRT_SECURE_NO_DEPRECATE /D _CRT_NONSTDC_NO_DEPRECATE /D _SCL_SECURE_NO_WARNINGS")
+ # 64-bit portability warnings, in MSVC80
+ if(MSVC80)
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Wp64")
+ endif()
+
+ if(BUILD_WITH_DEBUG_INFO)
+ set(OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE "${OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE} /debug")
+ endif()
+
+ # Remove unreferenced functions: function level linking
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Gy")
+ if(NOT MSVC_VERSION LESS 1400)
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /bigobj")
+ endif()
+ if(BUILD_WITH_DEBUG_INFO)
+ set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} /Zi")
+ endif()
+
if(NOT MSVC64)
+ # 64-bit MSVC compiler uses SSE/SSE2 by default
if(ENABLE_SSE)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} /arch:SSE")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE")
endif()
if(ENABLE_SSE2)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} /arch:SSE2")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE2")
endif()
endif()
+
if(ENABLE_SSE3)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} /arch:SSE3")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE3")
endif()
if(ENABLE_SSE4_1)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} /arch:SSE4.1")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE4.1")
endif()
+
if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1)
- set(OPENCV_EXTRA_C_FLAGS_RELEASE "${OPENCV_EXTRA_C_FLAGS_RELEASE} /Oi")
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Oi")
+ endif()
+
+ if(X86 OR X86_64)
+ if(CMAKE_SIZEOF_VOID_P EQUAL 4 AND ENABLE_SSE2)
+ set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /fp:fast")# !! important - be on the same wave with x64 compilers
+ endif()
endif()
endif()
if(ANDROID_EXECUTABLE)
if(NOT ANDROID_SDK_DETECT_QUIET)
- message(STATUS " Found android tool: ${ANDROID_EXECUTABLE}")
+ message(STATUS "Found android tool: ${ANDROID_EXECUTABLE}")
endif()
get_filename_component(ANDROID_SDK_TOOLS_PATH "${ANDROID_EXECUTABLE}" PATH)
# put the final .apk to the OpenCV's bin folder
add_custom_command(TARGET ${target} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy "${android_proj_bin_dir}/bin/${target}-debug.apk" "${OpenCV_BINARY_DIR}/bin/${target}.apk")
- if(INSTALL_ANDROID_EXAMPLES AND target MATCHES "^example-")
+ if(INSTALL_ANDROID_EXAMPLES AND "${target}" MATCHES "^example-")
install(FILES "${OpenCV_BINARY_DIR}/bin/${target}.apk" DESTINATION "bin" COMPONENT main)
endif()
endif()
string(REGEX MATCH "[0-9]+.[0-9]+.[0-9]+" ANT_VERSION "${ANT_VERSION_FULL}")
set(ANT_VERSION "${ANT_VERSION}" CACHE INTERNAL "Detected ant vesion")
- message(STATUS " Found apache ant ${ANT_VERSION}: ${ANT_EXECUTABLE}")
+ message(STATUS "Found apache ant ${ANT_VERSION}: ${ANT_EXECUTABLE}")
endif()
set(MSVC64 1)
endif()
-if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
- set(CMAKE_COMPILER_IS_GNUCXX 1)
-endif()
-
-if(CMAKE_C_COMPILER_ID STREQUAL "Clang")
- set(CMAKE_COMPILER_IS_GNUC 1)
+if(NOT APPLE)
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+ set(CMAKE_COMPILER_IS_GNUCXX 1)
+ unset(ENABLE_PRECOMPILED_HEADERS CACHE)
+ endif()
+ if(CMAKE_C_COMPILER_ID STREQUAL "Clang")
+ set(CMAKE_COMPILER_IS_GNUC 1)
+ unset(ENABLE_PRECOMPILED_HEADERS CACHE)
+ endif()
endif()
# ----------------------------------------------------------------------------
if(MSVC AND NOT PYTHON_EXECUTABLE)
# search for executable with the same bitness as resulting binaries
# standard FindPythonInterp always prefers executable from system path
- foreach(_CURRENT_VERSION ${Python_ADDITIONAL_VERSIONS} 2.7 2.6 2.5 2.4 2.3 2.2 2.1 2.0 1.6 1.5)
+ # this is really important because we are using the interpreter for numpy search and for choosing the install location
+ foreach(_CURRENT_VERSION ${Python_ADDITIONAL_VERSIONS} 2.7 2.6 2.5 2.4 2.3 2.2 2.1 2.0)
find_host_program(PYTHON_EXECUTABLE
NAMES python${_CURRENT_VERSION} python
- PATHS [HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Python\\\\PythonCore\\\\${_CURRENT_VERSION}\\\\InstallPath]
+ PATHS
+ [HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Python\\\\PythonCore\\\\${_CURRENT_VERSION}\\\\InstallPath]
+ [HKEY_CURRENT_USER\\\\SOFTWARE\\\\Python\\\\PythonCore\\\\${_CURRENT_VERSION}\\\\InstallPath]
NO_SYSTEM_ENVIRONMENT_PATH
)
endforeach()
endif()
-find_host_package(PythonInterp)
+
+find_host_package(PythonInterp 2.0)
unset(PYTHON_USE_NUMPY CACHE)
unset(HAVE_SPHINX CACHE)
if(PYTHON_EXECUTABLE)
+ if(PYTHON_VERSION_STRING)
+ set(PYTHON_VERSION_FULL "${PYTHON_VERSION_STRING}")
+ set(PYTHON_VERSION_MAJOR_MINOR "${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}")
+ else()
+ execute_process(COMMAND ${PYTHON_EXECUTABLE} --version
+ ERROR_VARIABLE PYTHON_VERSION_FULL
+ ERROR_STRIP_TRAILING_WHITESPACE)
+
+ string(REGEX MATCH "[0-9]+.[0-9]+" PYTHON_VERSION_MAJOR_MINOR "${PYTHON_VERSION_FULL}")
+ string(REGEX MATCH "[0-9]+.[0-9]+.[0-9]+" PYTHON_VERSION_FULL "${PYTHON_VERSION_FULL}")
+ endif()
+
if(NOT ANDROID AND NOT IOS)
- find_host_package(PythonLibs)
+ find_host_package(PythonLibs ${PYTHON_VERSION_FULL})
# cmake 2.4 (at least on Ubuntu 8.04 (hardy)) don't define PYTHONLIBS_FOUND
if(NOT PYTHONLIBS_FOUND AND PYTHON_INCLUDE_PATH)
set(PYTHONLIBS_FOUND ON)
endif()
endif()
- execute_process(COMMAND ${PYTHON_EXECUTABLE} --version
- ERROR_VARIABLE PYTHON_VERSION_FULL
- ERROR_STRIP_TRAILING_WHITESPACE)
-
- string(REGEX MATCH "[0-9]+.[0-9]+" PYTHON_VERSION_MAJOR_MINOR "${PYTHON_VERSION_FULL}")
- string(REGEX MATCH "[0-9]+.[0-9]+.[0-9]+" PYTHON_VERSION_FULL "${PYTHON_VERSION_FULL}")
-
if(NOT ANDROID AND NOT IOS)
if(CMAKE_HOST_UNIX)
execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "from distutils.sysconfig import *; print get_python_lib()"
if(NOT EXISTS "${PYTHON_PATH}/Lib/site-packages")
unset(PYTHON_PATH)
get_filename_component(PYTHON_PATH "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Python\\PythonCore\\${PYTHON_VERSION_MAJOR_MINOR}\\InstallPath]" ABSOLUTE)
+ if(NOT PYTHON_PATH)
+ get_filename_component(PYTHON_PATH "[HKEY_CURRENT_USER\\SOFTWARE\\Python\\PythonCore\\${PYTHON_VERSION_MAJOR_MINOR}\\InstallPath]" ABSOLUTE)
+ endif()
file(TO_CMAKE_PATH "${PYTHON_PATH}" PYTHON_PATH)
endif()
set(PYTHON_PACKAGES_PATH "${PYTHON_PATH}/Lib/site-packages")
find_host_program(SPHINX_BUILD sphinx-build)
if(SPHINX_BUILD)
set(HAVE_SPHINX 1)
- message(STATUS " Found Sphinx ${SPHINX_VERSION}: ${SPHINX_BUILD}")
+ message(STATUS "Found Sphinx ${SPHINX_VERSION}: ${SPHINX_BUILD}")
endif()
endif()
endif(BUILD_DOCS)
if(BUILD_ZLIB)
unset_all(ZLIB_FOUND)
else()
- if(ANDROID)
- set(ZLIB_FOUND TRUE)
- set(ZLIB_LIBRARY z)
- set(ZLIB_LIBRARIES ${ZLIB_LIBRARY})
- set(ZLIB_INCLUDE_DIR "")
- ocv_parse_header2(ZLIB "${ANDROID_SYSROOT}/usr/include/zlib.h" ZLIB_VERSION "")
- else()
- include(FindZLIB)
- if(NOT ZLIB_VERSION_STRING)
- ocv_parse_header2(ZLIB "${ZLIB_INCLUDE_DIR}/zlib.h" ZLIB_VERSION "")
+ include(FindZLIB)
+ if(ZLIB_FOUND AND NOT ZLIB_VERSION_STRING)
+ ocv_parse_header2(ZLIB "${ZLIB_INCLUDE_DIR}/zlib.h" ZLIB_VERSION "")
+ endif()
+ if(ZLIB_FOUND AND ANDROID)
+ if(ZLIB_LIBRARY STREQUAL "${ANDROID_SYSROOT}/usr/lib/libz.so")
+ set(ZLIB_LIBRARY z)
+ set(ZLIB_LIBRARIES z)
endif()
endif()
endif()
set(JASPER_INCLUDE_DIR "${${JASPER_LIBRARY}_SOURCE_DIR}")
endif()
-ocv_parse_header2(JASPER "${JASPER_INCLUDE_DIR}/jasper/jas_config.h" JAS_VERSION "")
+if(NOT JASPER_VERSION_STRING)
+ ocv_parse_header2(JASPER "${JASPER_INCLUDE_DIR}/jasper/jas_config.h" JAS_VERSION "")
+endif()
################### libpng - optional (should be searched after zlib)
if(WITH_PNG)
add_native_precompiled_header(${the_target} ${pch_header})
elseif(CMAKE_GENERATOR MATCHES Xcode)
add_native_precompiled_header(${the_target} ${pch_header})
- elseif(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_GENERATOR MATCHES Makefiles)
+ elseif(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_GENERATOR MATCHES "Makefiles|Ninja")
add_precompiled_header(${the_target} ${pch_header})
endif()
endif()
endif()
#added include directories in such way that directories from the OpenCV source tree go first
-macro(ocv_include_directories)
+function(ocv_include_directories)
set(__add_before "")
foreach(dir ${ARGN})
get_filename_component(__abs_dir "${dir}" ABSOLUTE)
endif()
endforeach()
include_directories(BEFORE ${__add_before})
-endmacro()
+endfunction()
# Provides an option that the user can optionally select.
file(APPEND "${OPENCV_BUILD_INFO_FILE}" "\"${msg}\\n\"\n")
endfunction()
+macro(ocv_finalize_status)
+ if(NOT OPENCV_SKIP_STATUS_FINALIZATION)
+ if(TARGET opencv_core)
+ execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different "${OPENCV_BUILD_INFO_FILE}" "${opencv_core_BINARY_DIR}/version_string.inc" OUTPUT_QUIET)
+ endif()
+ endif()
+endmacro()
+
+
# Status report function.
# Automatically align right column and selects text based on condition.
# Usage:
# Advanced variables:
# - OpenCV_SHARED
# - OpenCV_CONFIG_PATH
-# - OpenCV_INSTALL_PATH
+# - OpenCV_INSTALL_PATH (not set on Windows)
# - OpenCV_LIB_COMPONENTS
# - OpenCV_USE_MANGLED_PATHS
# - OpenCV_HAVE_ANDROID_CAMERA
set(OpenCV_USE_MANGLED_PATHS @OpenCV_USE_MANGLED_PATHS_CONFIGCMAKE@)
# Extract the directory where *this* file has been installed (determined at cmake run-time)
-get_filename_component(OpenCV_CONFIG_PATH "${CMAKE_CURRENT_LIST_FILE}" PATH)
+get_filename_component(OpenCV_CONFIG_PATH "${CMAKE_CURRENT_LIST_FILE}" PATH CACHE)
-# Get the absolute path with no ../.. relative marks, to eliminate implicit linker warnings
-if(${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} VERSION_LESS 2.8)
- get_filename_component(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../.." ABSOLUTE)
-else()
- get_filename_component(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../.." REALPATH)
+if(NOT WIN32)
+ # Get the absolute path with no ../.. relative marks, to eliminate implicit linker warnings
+ if(${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} VERSION_LESS 2.8)
+ get_filename_component(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../.." ABSOLUTE)
+ else()
+ get_filename_component(OpenCV_INSTALL_PATH "${OpenCV_CONFIG_PATH}/../.." REALPATH)
+ endif()
endif()
# Presence of Android native camera wrappers
# ======================================================
# Provide the libs directories to the caller
-set(OpenCV_LIB_DIR_OPT @OpenCV_LIB_DIRS_CONFIGCMAKE@)
-set(OpenCV_LIB_DIR_DBG @OpenCV_LIB_DIRS_CONFIGCMAKE@)
-set(OpenCV_3RDPARTY_LIB_DIR_OPT @OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE@)
-set(OpenCV_3RDPARTY_LIB_DIR_DBG @OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE@)
+set(OpenCV_LIB_DIR_OPT @OpenCV_LIB_DIRS_CONFIGCMAKE@ CACHE PATH "Path where release OpenCV libraries are located")
+set(OpenCV_LIB_DIR_DBG @OpenCV_LIB_DIRS_CONFIGCMAKE@ CACHE PATH "Path where debug OpenCV libraries are located")
+set(OpenCV_3RDPARTY_LIB_DIR_OPT @OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE@ CACHE PATH "Path where release 3rdpaty OpenCV dependencies are located")
+set(OpenCV_3RDPARTY_LIB_DIR_DBG @OpenCV_3RDPARTY_LIB_DIRS_CONFIGCMAKE@ CACHE PATH "Path where debug 3rdpaty OpenCV dependencies are located")
+mark_as_advanced(FORCE OpenCV_LIB_DIR_OPT OpenCV_LIB_DIR_DBG OpenCV_3RDPARTY_LIB_DIR_OPT OpenCV_3RDPARTY_LIB_DIR_DBG OpenCV_CONFIG_PATH)
# ======================================================
# Version variables:
}
div.bodywrapper {
- margin: 0 0 0 230px;
+ margin: 0 0 0 270px;
}
div.body {
div.sphinxsidebar {
word-wrap: break-word;
- width: 240px;
+ width: 270px;
{%- if theme_stickysidebar|tobool %}
top: 30px;
margin: 0;
cd ~/opencv
mkdir release
cd release
- cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX= /usr/local
+ cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local ..
#. Enter the created temporary directory (<cmake_binary_dir>) and proceed with:
:alt: Add a new Property Sheet\r
:align: center\r
\r
-Use for example the *OpenCV_Debug* name. Then by selecting the sheet :menuselection:`Right Click --> Properties`. In the following I will show to set the OpenCV rules locally, as I find unnecessary to pollute projects with custom rules that I do not use it. Go the C++ groups General entry and under the *"Additional Include Directories"* add the path to your OpenCV include. \r
+Use for example the *OpenCV_Debug* name. Then by selecting the sheet :menuselection:`Right Click --> Properties`. In the following I will show to set the OpenCV rules locally, as I find unnecessary to pollute projects with custom rules that I do not use it. Go the C++ groups General entry and under the *"Additional Include Directories"* add the path to your OpenCV include. If you don't have *"C/C++"* group, you should add any .c/.cpp file to the project.\r
\r
.. code-block:: bash\r
\r
:alt: Visual Studio Command Line Arguments\r
:align: center\r
\r
-Specify here the name of the inputs and while you start your application from the Visual Studio enviroment you have automatic argument passing. In the next introductionary tutorial you'll see an in-depth explanation of the upper source code: :ref:`Display_Image`.
\ No newline at end of file
+Specify here the name of the inputs and while you start your application from the Visual Studio enviroment you have automatic argument passing. In the next introductionary tutorial you'll see an in-depth explanation of the upper source code: :ref:`Display_Image`.\r
Mat img = imread(filename, 0);
+.. note:: format of the file is determined by its content (first few bytes)
+
Save an image to a file: ::
- Mat img = imwrite(filename);
+ imwrite(filename, img);
+
+.. note:: format of the file is determined by its extension.
+
+.. note:: use ``imdecode`` and ``imencode`` to read and write image from/to memory rather than a file.
XML/YAML
--------
bool isFound = false;
#define BE_QUIET 1
#if BE_QUIET
- redirectError(quiet_error);
+ void* oldCbkData;
+ ErrorCallback oldCbk = redirectError(quiet_error, 0, &oldCbkData);
#endif
try
{
}
#if BE_QUIET
- redirectError(0);
+ redirectError(oldCbk, oldCbkData);
#endif
if (isFound)
{
errNorm = cvNorm( err, 0, CV_L2 );
if( errNorm > prevErrNorm )
{
- lambdaLg10++;
- step();
- _param = param;
- cvZero( err );
- _err = err;
- state = CHECK_ERR;
- return true;
+ if( ++lambdaLg10 <= 16 )
+ {
+ step();
+ _param = param;
+ cvZero( err );
+ _err = err;
+ state = CHECK_ERR;
+ return true;
+ }
}
lambdaLg10 = MAX(lambdaLg10-1, -16);
assert( state == CHECK_ERR );
if( errNorm > prevErrNorm )
{
- lambdaLg10++;
- step();
- _param = param;
- errNorm = 0;
- _errNorm = &errNorm;
- state = CHECK_ERR;
- return true;
+ if( ++lambdaLg10 <= 16 )
+ {
+ step();
+ _param = param;
+ errNorm = 0;
+ _errNorm = &errNorm;
+ state = CHECK_ERR;
+ return true;
+ }
}
lambdaLg10 = MAX(lambdaLg10-1, -16);
if( tvecs )
{
src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 + 3 );
- dst = cvMat( 3, 1, CV_MAT_TYPE(tvecs->type), tvecs->rows == 1 ?
+ dst = cvMat( 3, 1, CV_MAT_DEPTH(tvecs->type), tvecs->rows == 1 ?
tvecs->data.ptr + i*CV_ELEM_SIZE(tvecs->type) :
tvecs->data.ptr + tvecs->step*i );
cvConvert( &src, &dst );
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "precomp.hpp"
+
cv::Mat lbls;
EM em_model(1, EM::COV_MAT_SPHERICAL, TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 10000, 0.001));
- em_model.train(cvarrToMat(samples), lbls);
+ em_model.train(cvarrToMat(samples), noArray(), lbls);
if(labels)
lbls.copyTo(cvarrToMat(labels));
-----------
.. ocv:class:: RotatedRect
-Template class for rotated rectangles specified by the center, size, and the rotation angle in degrees.
+The class represents rotated (i.e. not up-right) rectangles on a plane. Each rectangle is specified by the center point (mass center), length of each side (represented by cv::Size2f structure) and the rotation angle in degrees.
+ .. ocv:function:: RotatedRect::RotatedRect()
+ .. ocv:function:: RotatedRect::RotatedRect(const Point2f& center, const Size2f& size, float angle)
+ .. ocv:function:: RotatedRect::RotatedRect(const CvBox2D& box)
+
+ :param center: The rectangle mass center.
+ :param size: Width and height of the rectangle.
+ :param angle: The rotation angle in a clockwise direction. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.
+ :param box: The rotated rectangle parameters as the obsolete CvBox2D structure.
+
+ .. ocv:function:: void RotatedRect::points(Point2f* pts) const
+ .. ocv:function:: Rect RotatedRect::boundingRect() const
+ .. ocv:function:: RotatedRect::operator CvBox2D() const
+
+ :param pts: The points array for storing rectangle vertices.
+
+The sample below demonstrates how to use RotatedRect:
+
+::
+
+ Mat image(200, 200, CV_8UC3, Scalar(0));
+ RotatedRect rRect = RotatedRect(Point2f(100,100), Size2f(100,50), 30);
+
+ Point2f vertices[4];
+ rRect.points(vertices);
+ for (int i = 0; i < 4; i++)
+ line(image, vertices[i], vertices[(i+1)%4], Scalar(0,255,0));
+
+ Rect brect = rRect.boundingRect();
+ rectangle(image, brect, Scalar(255,0,0));
+
+ imshow("rectangles", image);
+ waitKey(0);
+
+.. image:: pics/rotatedrect.png
+
+.. seealso::
+
+ :ocv:cfunc:`CamShift`,
+ :ocv:func:`fitEllipse`,
+ :ocv:func:`minAreaRect`,
+ :ocv:struct:`CvBox2D`
TermCriteria
------------
img(i,j)[2] ^= (uchar)(i ^ j);
+InputArray
+----------
+
+This is the proxy class for passing read-only input arrays into OpenCV functions. It is defined as ::
+
+ typedef const _InputArray& InputArray;
+
+where ``_InputArray`` is a class that can be constructed from ``Mat``, ``Mat_<T>``, ``Matx<T, m, n>``, ``std::vector<T>``, ``std::vector<std::vector<T> >`` or ``std::vector<Mat>``. It can also be constructed from a matrix expression.
+
+Since this is mostly implementation-level class, and its interface may change in future versions, we do not describe it in details. There are a few key things, though, that should be kept in mind:
+
+ * When you see in the reference manual or in OpenCV source code a function that takes ``InputArray``, it means that you can actually pass ``Mat``, ``Matx``, ``vector<T>`` etc. (see above the complete list).
+
+ * Optional input arguments: If some of the input arrays may be empty, pass ``cv::noArray()`` (or simply ``cv::Mat()`` as you probably did before).
+
+ * The class is designed solely for passing parameters. That is, normally you *should not* declare class members, local and global variables of this type.
+
+ * If you want to design your own function or a class method that can operate of arrays of multiple types, you can use ``InputArray`` (or ``OutputArray``) for the respective parameters. Inside a function you should use ``_InputArray::getMat()`` method to construct a matrix header for the array (without copying data). ``_InputArray::kind()`` can be used to distinguish ``Mat`` from ``vector<>`` etc., but normally it is not needed.
+
+Here is how you can use a function that takes ``InputArray`` ::
+
+ std::vector<Point2f> vec;
+ // points or a circle
+ for( int i = 0; i < 30; i++ )
+ vec.push_back(Point2f((float)(100 + 30*cos(i*CV_PI*2/5)),
+ (float)(100 - 30*sin(i*CV_PI*2/5))));
+ cv::transform(vec, vec, cv::Matx23f(0.707, -0.707, 10, 0.707, 0.707, 20));
+
+That is, we form an STL vector containing points, and apply in-place affine transformation to the vector using the 2x3 matrix created inline as ``Matx<float, 2, 3>`` instance.
+
+Here is how such a function can be implemented (for simplicity, we implement a very specific case of it, according to the assertion statement inside) ::
+
+ void myAffineTransform(InputArray _src, OutputArray _dst, InputArray _m)
+ {
+ // get Mat headers for input arrays. This is O(1) operation,
+ // unless _src and/or _m are matrix expressions.
+ Mat src = _src.getMat(), m = _m.getMat();
+ CV_Assert( src.type() == CV_32FC2 && m.type() == CV_32F && m.size() == Size(3, 2) );
+
+ // [re]create the output array so that it has the proper size and type.
+ // In case of Mat it calls Mat::create, in case of STL vector it calls vector::resize.
+ _dst.create(src.size(), src.type());
+ Mat dst = _dst.getMat();
+
+ for( int i = 0; i < src.rows; i++ )
+ for( int j = 0; j < src.cols; j++ )
+ {
+ Point2f pt = src.at<Point2f>(i, j);
+ dst.at<Point2f>(i, j) = Point2f(m.at<float>(0, 0)*pt.x +
+ m.at<float>(0, 1)*pt.y +
+ m.at<float>(0, 2),
+ m.at<float>(1, 0)*pt.x +
+ m.at<float>(1, 1)*pt.y +
+ m.at<float>(1, 2));
+ }
+ }
+
+There is another related type, ``InputArrayOfArrays``, which is currently defined as a synonym for ``InputArray``: ::
+
+ typedef InputArray InputArrayOfArrays;
+
+It denotes function arguments that are either vectors of vectors or vectors of matrices. A separate synonym is needed to generate Python/Java etc. wrappers properly. At the function implementation level their use is similar, but ``_InputArray::getMat(idx)`` should be used to get header for the idx-th component of the outer vector and ``_InputArray::size().area()`` should be used to find the number of components (vectors/matrices) of the outer vector.
+
+
+OutputArray
+-----------
+
+This type is very similar to ``InputArray`` except that it is used for input/output and output function parameters. Just like with ``InputArray``, OpenCV users should not care about ``OutputArray``, they just pass ``Mat``, ``vector<T>`` etc. to the functions. The same limitation as for ``InputArray``: **Do not explicitly create OutputArray instances** applies here too.
+
+If you want to make your function polymorphic (i.e. accept different arrays as output parameters), it is also not very difficult. Take the sample above as the reference. Note that ``_OutputArray::create()`` needs to be called before ``_OutputArray::getMat()``. This way you guarantee that the output array is properly allocated.
+
+Optional output parameters. If you do not need certain output array to be computed and returned to you, pass ``cv::noArray()``, just like you would in the case of optional input array. At the implementation level, use ``_OutputArray::needed()`` to check if certain output array needs to be computed or not.
+
+There are several synonyms for ``OutputArray`` that are used to assist automatic Python/Java/... wrapper generators: ::
+
+ typedef OutputArray OutputArrayOfArrays;
+ typedef OutputArray InputOutputArray;
+ typedef OutputArray InputOutputArrayOfArrays;
NAryMatIterator
---------------
M.ref(1, 2, 3) = M(4, 5, 6) + M(7, 8, 9);
+Algorithm
+---------
+
+This is a base class for all more or less complex algorithms in OpenCV, especially for classes of algorithms, for which there can be multiple implementations. The examples are stereo correspondence (for which there are algorithms like block matching, semi-global block matching, graph-cut etc.), background subtraction (which can be done using mixture-of-gaussians models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck etc.).
+
+The class provides the following features for all derived classes:
+
+ * so called "virtual constructor". That is, each Algorithm derivative is registered at program start and you can get the list of registered algorithms and create instance of a particular algorithm by its name (see ``Algorithm::create``). If you plan to add your own algorithms, it is good practice to add a unique prefix to your algorithms to distinguish them from other algorithms.
+
+ * setting/retrieving algorithm parameters by name. If you used video capturing functionality from OpenCV highgui module, you are probably familar with ``cvSetCaptureProperty()``, ``cvGetCaptureProperty()``, ``VideoCapture::set()`` and ``VideoCapture::get()``. ``Algorithm`` provides similar method where instead of integer id's you specify the parameter names as text strings. See ``Algorithm::set`` and ``Algorithm::get`` for details.
+
+ * reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store all its parameters and then read them back. There is no need to re-implement it each time.
+
+Here is example of SIFT use in your application via Algorithm interface: ::
+
+ #include "opencv2/opencv.hpp"
+
+ ...
+
+ initModule_nonfree(); // to load SURF/SIFT etc.
+
+ Ptr<Feature2D> sift = Algorithm::create<Feature2D>("Feature2D.SIFT");
+
+ FileStorage fs("sift_params.xml", FileStorage::READ);
+ if( fs.isOpened() ) // if we have file with parameters, read them
+ {
+ sift.read(fs["sift_params"]);
+ fs.release();
+ }
+ else // else modify the parameters and store them; user can later edit the file to use different parameters
+ {
+ sift.set("contrastThreshold", 0.01f); // lower the contrast threshold, compared to the default value
+
+ {
+ WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
+ sift.write(fs);
+ }
+ }
+
+ Mat image = imread("myimage.png", 0), descriptors;
+ vector<KeyPoint> keypoints;
+ sift(image, noArray(), keypoints, descriptors);
+
+
+Algorithm::get
+--------------
+Returns the algorithm parameter
+
+.. ocv:function:: template<typename _Tp> typename ParamType<_Tp>::member_type get(const string& name) const
+
+ :param name: The parameter name.
+
+The method returns value of the particular parameter. Since the compiler can not deduce the type of the returned parameter, you should specify it explicitly in angle brackets. Here are the allowed forms of get:
+
+ * myalgo.get<int>("param_name")
+ * myalgo.get<double>("param_name")
+ * myalgo.get<bool>("param_name")
+ * myalgo.get<string>("param_name")
+ * myalgo.get<Mat>("param_name")
+ * myalgo.get<vector<Mat> >("param_name")
+ * myalgo.get<Algorithm>("param_name") (it returns Ptr<Algorithm>).
+
+In some cases the actual type of the parameter can be cast to the specified type, e.g. integer parameter can be cast to double, ``bool`` can be cast to ``int``. But "dangerous" transformations (string<->number, double->int, 1x1 Mat<->number, ...) are not performed and the method will throw an exception. In the case of ``Mat`` or ``vector<Mat>`` parameters the method does not clone the matrix data, so do not modify the matrices. Use ``Algorithm::set`` instead - slower, but more safe.
+
+
+Algorithm::set
+--------------
+Sets the algorithm parameter
+
+.. ocv:function:: void set(const string& name, int value)
+.. ocv:function:: void set(const string& name, double value)
+.. ocv:function:: void set(const string& name, bool value)
+.. ocv:function:: void set(const string& name, const string& value)
+.. ocv:function:: void set(const string& name, const Mat& value)
+.. ocv:function:: void set(const string& name, const vector<Mat>& value)
+.. ocv:function:: void set(const string& name, const Ptr<Algorithm>& value)
+
+ :param name: The parameter name.
+ :param value: The parameter value.
+
+The method sets value of the particular parameter. Some of the algorithm parameters may be declared as read-only. If you try to set such a parameter, you will get exception with the corresponding error message.
+
+
+Algorithm::write
+----------------
+Stores algorithm parameters in a file storage
+
+.. ocv:function:: void write(FileStorage& fs) const
+
+ :param fs: File storage.
+
+The method stores all the algorithm parameters (in alphabetic order) to the file storage. The method is virtual. If you define your own Algorithm derivative, your can override the method and store some extra information. However, it's rarely needed. Here are some examples:
+
+ * SIFT feature detector (from nonfree module). The class only stores algorithm parameters and no keypoints or their descriptors. Therefore, it's enough to store the algorithm parameters, which is what ``Algorithm::write()`` does. Therefore, there is no dedicated ``SIFT::write()``.
+
+ * Background subtractor (from video module). It has the algorithm parameters and also it has the current background model. However, the background model is not stored. First, it's rather big. Then, if you have stored the background model, it would likely become irrelevant on the next run (because of shifted camera, changed background, different lighting etc.). Therefore, ``BackgroundSubtractorMOG`` and ``BackgroundSubtractorMOG2`` also rely on the standard ``Algorithm::write()`` to store just the algorithm parameters.
+
+ * Expectation Maximization (from ml module). The algorithm finds mixture of gaussians that approximates user data best of all. In this case the model may be re-used on the next run to test new data against the trained statistical model. So EM needs to store the model. However, since the model is described by a few parameters that are available as read-only algorithm parameters (i.e. they are available via ``EM::get()``), EM also relies on ``Algorithm::write()`` to store both EM parameters and the model (represented by read-only algorithm parameters).
+
+
+Algorithm::read
+---------------
+Reads algorithm parameters from a file storage
+
+.. ocv:function:: void read(const FileNode& fn)
+
+ :param fn: File node of the file storage.
+
+The method reads all the algorithm parameters from the specified node of a file storage. Similarly to ``Algorithm::write()``, if you implement an algorithm that needs to read some extra data and/or re-compute some internal data, you may override the method.
+
+Algorithm::getList
+------------------
+Returns the list of registered algorithms
+
+.. ocv:function:: void read(vector<string>& algorithms)
+
+ :param algorithms: The output vector of algorithm names.
+
+This static method returns the list of registered algorithms in alphabetical order.
+
+
+Algorithm::getList
+------------------
+Returns the list of registered algorithms
+
+.. ocv:function:: void read(vector<string>& algorithms)
+
+ :param algorithms: The output vector of algorithm names.
+
+This static method returns the list of registered algorithms in alphabetical order.
+
+
+Algorithm::create
+-----------------
+Creates algorithm instance by name
+
+.. ocv:function:: template<typename _Tp> Ptr<_Tp> create(const string& name)
+
+ :param name: The algorithm name, one of the names returned by ``Algorithm::getList()``.
+
+This static method creates a new instance of the specified algorithm. If there is no such algorithm, the method will silently return null pointer (that can be checked by ``Ptr::empty()`` method). Also, you should specify the particular ``Algorithm`` subclass as ``_Tp`` (or simply ``Algorithm`` if you do not know it at that point). ::
+
+ Ptr<BackgroundSubtractor> bgfg = Algorithm::create<BackgroundSubtractor>("BackgroundSubtractor.MOG2");
+
+.. note:: This is important note about seemingly mysterious behavior of ``Algorithm::create()`` when it returns NULL while it should not. The reason is simple - ``Algorithm::create()`` resides in OpenCV`s core module and the algorithms are implemented in other modules. If you create algorithms dynamically, C++ linker may decide to throw away the modules where the actual algorithms are implemented, since you do not call any functions from the modules. To avoid this problem, you need to call ``initModule_<modulename>();`` somewhere in the beginning of the program before ``Algorithm::create()``. For example, call ``initModule_nonfree()`` in order to use SURF/SIFT, call ``initModule_ml()`` to use expectation maximization etc.
+
+Creating Own Algorithms
+-----------------------
+
+The above methods are usually enough for users. If you want to make your own algorithm, derived from ``Algorithm``, you should basically follow a few conventions and add a little semi-standard piece of code to your class:
+
+ * Make a class and specify ``Algorithm`` as its base class.
+ * The algorithm parameters should be the class members. See ``Algorithm::get()`` for the list of possible types of the parameters.
+ * Add public virtual method ``AlgorithmInfo* info() const;`` to your class.
+ * Add constructor function, ``AlgorithmInfo`` instance and implement the ``info()`` method. The simplest way is to take http://code.opencv.org/svn/opencv/trunk/opencv/modules/ml/src/ml_init.cpp as the reference and modify it according to the list of your parameters.
+ * Add some public function (e.g. ``initModule_<mymodule>()``) that calls info() of your algorithm and put it into the same source file as ``info()`` implementation. This is to force C++ linker to include this object file into the target application. See ``Algorithm::create()`` for details.
+
.. seealso:: :ocv:class:`Rect\_`
+
+CvBox2D
+------
+
+.. ocv:struct:: CvBox2D
+
+Stores coordinates of a rotated rectangle.
+
+ .. ocv:member:: CvPoint2D32f center
+
+ Center of the box
+
+ .. ocv:member:: CvSize2D32f size
+
+ Box width and height
+
+ .. ocv:member:: float angle
+
+ Angle between the horizontal axis and the first side (i.e. length) in degrees
+
+.. seealso:: :ocv:class:`RotatedRect`
+
+
CvScalar
--------
};
+typedef void (*BinaryFunc)(const uchar* src1, size_t step1,
+ const uchar* src2, size_t step2,
+ uchar* dst, size_t step, Size sz,
+ void*);
+
+CV_EXPORTS BinaryFunc getConvertFunc(int sdepth, int ddepth);
+CV_EXPORTS BinaryFunc getConvertScaleFunc(int sdepth, int ddepth);
+CV_EXPORTS BinaryFunc getCopyMaskFunc(size_t esz);
+
//! swaps two matrices
CV_EXPORTS void swap(Mat& a, Mat& b);
CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst);
CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst);
-CV_EXPORTS_W void hconcat(InputArray src, OutputArray dst);
+CV_EXPORTS_W void hconcat(InputArrayOfArrays src, OutputArray dst);
CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst);
CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst);
-CV_EXPORTS_W void vconcat(InputArray src, OutputArray dst);
+CV_EXPORTS_W void vconcat(InputArrayOfArrays src, OutputArray dst);
//! computes bitwise conjunction of the two arrays (dst = src1 & src2)
CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2,
//! checks that each matrix element is within the specified range.
CV_EXPORTS_W bool checkRange(InputArray a, bool quiet=true, CV_OUT Point* pos=0,
double minVal=-DBL_MAX, double maxVal=DBL_MAX);
+//! converts NaN's to the given number
+CV_EXPORTS_W void patchNaNs(InputOutputArray a, double val=0);
+
//! implements generalized matrix product algorithm GEMM from BLAS
CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha,
InputArray src3, double gamma, OutputArray dst, int flags=0);
/*!
Base class for high-level OpenCV algorithms
*/
-class CV_EXPORTS Algorithm
+class CV_EXPORTS_W Algorithm
{
public:
Algorithm();
template<typename _Tp> typename ParamType<_Tp>::member_type get(const string& name) const;
template<typename _Tp> typename ParamType<_Tp>::member_type get(const char* name) const;
- void set(const string& name, int value);
- void set(const string& name, double value);
- void set(const string& name, bool value);
- void set(const string& name, const string& value);
- void set(const string& name, const Mat& value);
- void set(const string& name, const vector<Mat>& value);
- void set(const string& name, const Ptr<Algorithm>& value);
+
+ CV_WRAP int getInt(const string& name) const;
+ CV_WRAP double getDouble(const string& name) const;
+ CV_WRAP bool getBool(const string& name) const;
+ CV_WRAP string getString(const string& name) const;
+ CV_WRAP Mat getMat(const string& name) const;
+ CV_WRAP vector<Mat> getMatVector(const string& name) const;
+ CV_WRAP Ptr<Algorithm> getAlgorithm(const string& name) const;
+
+ CV_WRAP_AS(setInt) void set(const string& name, int value);
+ CV_WRAP_AS(setDouble) void set(const string& name, double value);
+ CV_WRAP_AS(setBool) void set(const string& name, bool value);
+ CV_WRAP_AS(setString) void set(const string& name, const string& value);
+ CV_WRAP_AS(setMat) void set(const string& name, const Mat& value);
+ CV_WRAP_AS(setMatVector) void set(const string& name, const vector<Mat>& value);
+ CV_WRAP_AS(setAlgorithm) void set(const string& name, const Ptr<Algorithm>& value);
void set(const char* name, int value);
void set(const char* name, double value);
void set(const char* name, const vector<Mat>& value);
void set(const char* name, const Ptr<Algorithm>& value);
- string paramHelp(const string& name) const;
+ CV_WRAP string paramHelp(const string& name) const;
int paramType(const char* name) const;
- int paramType(const string& name) const;
- void getParams(vector<string>& names) const;
+ CV_WRAP int paramType(const string& name) const;
+ CV_WRAP void getParams(CV_OUT vector<string>& names) const;
virtual void write(FileStorage& fs) const;
typedef int (Algorithm::*Getter)() const;
typedef void (Algorithm::*Setter)(int);
- static void getList(vector<string>& algorithms);
- static Ptr<Algorithm> _create(const string& name);
+ CV_WRAP static void getList(CV_OUT vector<string>& algorithms);
+ CV_WRAP static Ptr<Algorithm> _create(const string& name);
template<typename _Tp> static Ptr<_Tp> create(const string& name);
virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; }
class CV_EXPORTS AlgorithmInfo
{
public:
+ friend class Algorithm;
AlgorithmInfo(const string& name, Algorithm::Constructor create);
~AlgorithmInfo();
void get(const Algorithm* algo, const char* name, int argType, void* value) const;
- void set(Algorithm* algo, const char* name, int argType, const void* value) const;
void addParam_(Algorithm& algo, const char* name, int argType,
void* value, bool readOnly,
Algorithm::Getter getter, Algorithm::Setter setter,
const string& help=string());
protected:
AlgorithmInfoData* data;
+ void set(Algorithm* algo, const char* name, int argType,
+ const void* value, bool force=false) const;
};
-/*M///////////////////////////////////////////////////////////////////////////////////////\r
-//\r
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
-//\r
-// By downloading, copying, installing or using the software you agree to this license.\r
-// If you do not agree to this license, do not download, install,\r
-// copy or use the software.\r
-//\r
-//\r
-// License Agreement\r
-// For Open Source Computer Vision Library\r
-//\r
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
-// Third party copyrights are property of their respective owners.\r
-//\r
-// Redistribution and use in source and binary forms, with or without modification,\r
-// are permitted provided that the following conditions are met:\r
-//\r
-// * Redistribution's of source code must retain the above copyright notice,\r
-// this list of conditions and the following disclaimer.\r
-//\r
-// * Redistribution's in binary form must reproduce the above copyright notice,\r
-// this list of conditions and the following disclaimer in the documentation\r
-// and/or other GpuMaterials provided with the distribution.\r
-//\r
-// * The name of the copyright holders may not be used to endorse or promote products\r
-// derived from this software without specific prior written permission.\r
-//\r
-// This software is provided by the copyright holders and contributors "as is" and\r
-// any express or implied warranties, including, but not limited to, the implied\r
-// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
-// In no event shall the Intel Corporation or contributors be liable for any direct,\r
-// indirect, incidental, special, exemplary, or consequential damages\r
-// (including, but not limited to, procurement of substitute goods or services;\r
-// loss of use, data, or profits; or business interruption) however caused\r
-// and on any theory of liability, whether in contract, strict liability,\r
-// or tort (including negligence or otherwise) arising in any way out of\r
-// the use of this software, even if advised of the possibility of such damage.\r
-//\r
-//M*/\r
-\r
-#ifndef __OPENCV_CORE_DevMem2D_HPP__\r
-#define __OPENCV_CORE_DevMem2D_HPP__\r
-\r
-#ifdef __cplusplus\r
-\r
-#ifdef __CUDACC__ \r
- #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__ \r
-#else\r
- #define __CV_GPU_HOST_DEVICE__\r
-#endif\r
-\r
-namespace cv\r
-{ \r
- namespace gpu\r
- {\r
- // Simple lightweight structures that encapsulates information about an image on device.\r
- // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile\r
-\r
- template <bool expr> struct StaticAssert;\r
- template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};\r
-\r
- template<typename T> struct DevPtr\r
- {\r
- typedef T elem_type;\r
- typedef int index_type;\r
-\r
- enum { elem_size = sizeof(elem_type) };\r
-\r
- T* data;\r
-\r
- __CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}\r
- __CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}\r
-\r
- __CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }\r
- __CV_GPU_HOST_DEVICE__ operator T*() { return data; }\r
- __CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }\r
- };\r
- \r
- template<typename T> struct PtrSz : public DevPtr<T>\r
- { \r
- __CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}\r
- __CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}\r
-\r
- size_t size;\r
- };\r
-\r
- template<typename T> struct PtrStep : public DevPtr<T>\r
- { \r
- __CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}\r
- __CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}\r
-\r
- /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */\r
- size_t step; \r
-\r
- __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }\r
- __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }\r
-\r
- __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }\r
- __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }\r
- };\r
-\r
- template <typename T> struct PtrStepSz : public PtrStep<T>\r
- { \r
- __CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}\r
- __CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_) \r
- : PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}\r
-\r
- int cols;\r
- int rows; \r
- };\r
-\r
- template <typename T> struct DevMem2D_ : public PtrStepSz<T>\r
- { \r
- DevMem2D_() {}\r
- DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}\r
- \r
- template <typename U> \r
- explicit DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {} \r
- };\r
- \r
- template<typename T> struct PtrElemStep_ : public PtrStep<T>\r
- { \r
- PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) \r
- {\r
- StaticAssert<256 % sizeof(T) == 0>::check();\r
-\r
- PtrStep<T>::step /= PtrStep<T>::elem_size; \r
- }\r
- __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep<T>::data + y * PtrStep<T>::step; }\r
- __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep<T>::data + y * PtrStep<T>::step; } \r
-\r
- __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }\r
- __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; } \r
- };\r
-\r
- template<typename T> struct PtrStep_ : public PtrStep<T>\r
- { \r
- PtrStep_() {}\r
- PtrStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) {} \r
- };\r
-\r
- typedef DevMem2D_<unsigned char> DevMem2Db;\r
- typedef DevMem2Db DevMem2D;\r
- typedef DevMem2D_<float> DevMem2Df;\r
- typedef DevMem2D_<int> DevMem2Di;\r
-\r
- typedef PtrStep<unsigned char> PtrStepb;\r
- typedef PtrStep<float> PtrStepf;\r
- typedef PtrStep<int> PtrStepi;\r
-\r
- typedef PtrElemStep_<unsigned char> PtrElemStep;\r
- typedef PtrElemStep_<float> PtrElemStepf;\r
- typedef PtrElemStep_<int> PtrElemStepi; \r
- } \r
-}\r
-\r
-#endif // __cplusplus\r
-\r
-#endif /* __OPENCV_GPU_DevMem2D_HPP__ */\r
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other GpuMaterials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_DevMem2D_HPP__
+#define __OPENCV_CORE_DevMem2D_HPP__
+
+#ifdef __cplusplus
+
+#ifdef __CUDACC__
+ #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
+#else
+ #define __CV_GPU_HOST_DEVICE__
+#endif
+
+namespace cv
+{
+ namespace gpu
+ {
+ // Simple lightweight structures that encapsulates information about an image on device.
+ // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
+
+ template <bool expr> struct StaticAssert;
+ template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
+
+ template<typename T> struct DevPtr
+ {
+ typedef T elem_type;
+ typedef int index_type;
+
+ enum { elem_size = sizeof(elem_type) };
+
+ T* data;
+
+ __CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
+ __CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
+
+ __CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
+ __CV_GPU_HOST_DEVICE__ operator T*() { return data; }
+ __CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
+ };
+
+ template<typename T> struct PtrSz : public DevPtr<T>
+ {
+ __CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
+ __CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
+
+ size_t size;
+ };
+
+ template<typename T> struct PtrStep : public DevPtr<T>
+ {
+ __CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
+ __CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
+
+ /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
+ size_t step;
+
+ __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
+ __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
+
+ __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
+ __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
+ };
+
+ template <typename T> struct PtrStepSz : public PtrStep<T>
+ {
+ __CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
+ __CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
+ : PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
+
+ int cols;
+ int rows;
+ };
+
+ template <typename T> struct DevMem2D_ : public PtrStepSz<T>
+ {
+ DevMem2D_() {}
+ DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}
+
+ template <typename U>
+ explicit DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {}
+ };
+
+ template<typename T> struct PtrElemStep_ : public PtrStep<T>
+ {
+ PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step)
+ {
+ StaticAssert<256 % sizeof(T) == 0>::check();
+
+ PtrStep<T>::step /= PtrStep<T>::elem_size;
+ }
+ __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep<T>::data + y * PtrStep<T>::step; }
+ __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep<T>::data + y * PtrStep<T>::step; }
+
+ __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
+ __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
+ };
+
+ template<typename T> struct PtrStep_ : public PtrStep<T>
+ {
+ PtrStep_() {}
+ PtrStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) {}
+ };
+
+ typedef DevMem2D_<unsigned char> DevMem2Db;
+ typedef DevMem2Db DevMem2D;
+ typedef DevMem2D_<float> DevMem2Df;
+ typedef DevMem2D_<int> DevMem2Di;
+
+ typedef PtrStep<unsigned char> PtrStepb;
+ typedef PtrStep<float> PtrStepf;
+ typedef PtrStep<int> PtrStepi;
+
+ typedef PtrElemStep_<unsigned char> PtrElemStep;
+ typedef PtrElemStep_<float> PtrElemStepf;
+ typedef PtrElemStep_<int> PtrElemStepi;
+ }
+}
+
+#endif // __cplusplus
+
+#endif /* __OPENCV_GPU_DevMem2D_HPP__ */
template<typename _Tp> inline _Tp& Mat::at(int i0)
{
- CV_DbgAssert( dims <= 2 && data && (size.p[0] == 1 || size.p[1] == 1) &&
- (unsigned)i0 < (unsigned)(size.p[0] + size.p[1] - 1) &&
+ CV_DbgAssert( dims <= 2 && data &&
+ (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) &&
elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
- return *(_Tp*)(data + step.p[size.p[0]==1]*i0);
+ if( isContinuous() || size.p[0] == 1 )
+ return ((_Tp*)data)[i0];
+ if( size.p[1] == 1 )
+ return *(_Tp*)(data + step.p[0]*i0);
+ int i = i0/cols, j = i0 - i*cols;
+ return ((_Tp*)(data + step.p[0]*i))[j];
}
template<typename _Tp> inline const _Tp& Mat::at(int i0) const
{
- CV_DbgAssert( dims <= 2 && data && (size.p[0] == 1 || size.p[1] == 1) &&
- (unsigned)i0 < (unsigned)(size.p[0] + size.p[1] - 1) &&
+ CV_DbgAssert( dims <= 2 && data &&
+ (unsigned)i0 < (unsigned)(size.p[0]*size.p[1]) &&
elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) );
- return *(_Tp*)(data + step.p[size.p[0]==1]*i0);
+ if( isContinuous() || size.p[0] == 1 )
+ return ((const _Tp*)data)[i0];
+ if( size.p[1] == 1 )
+ return *(const _Tp*)(data + step.p[0]*i0);
+ int i = i0/cols, j = i0 - i*cols;
+ return ((const _Tp*)(data + step.p[0]*i))[j];
}
template<typename _Tp> inline _Tp& Mat::at(int i0, int i1, int i2)
#ifdef __cplusplus
/////// exchange-add operation for atomic operations on reference counters ///////
-#ifdef __INTEL_COMPILER // atomic increment on the Intel(tm) compiler
+#if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32) // atomic increment on the linux version of the Intel(tm) compiler
#define CV_XADD(addr,delta) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(addr)), delta)
#elif defined __GNUC__
}
+template<typename _Tp, int m, int n> static inline
+Vec<_Tp, m> operator * (const Matx<_Tp, m, n>& a, const Vec<_Tp, n>& b)
+{
+ Matx<_Tp, m, 1> c(a, b, Matx_MatMulOp());
+ return reinterpret_cast<const Vec<_Tp, m>&>(c);
+}
+
+
template<typename _Tp> static inline
Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b)
{
return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1);
}
-
+
template<typename _Tp> static inline
Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b)
{
- return Scalar(a*Matx<_Tp, 4, 1>(b[0],b[1],b[2],b[3]));
-}
+ Matx<double, 4, 1> c(Matx<double, 4, 4>(a), b, Matx_MatMulOp());
+ return reinterpret_cast<const Scalar&>(c);
+}
+
+static inline
+Scalar operator * (const Matx<double, 4, 4>& a, const Scalar& b)
+{
+ Matx<double, 4, 1> c(a, b, Matx_MatMulOp());
+ return reinterpret_cast<const Scalar&>(c);
+}
+
template<typename _Tp, int m, int n> inline
Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const
{
#endif
}
+#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
+#include "emmintrin.h"
+#endif
CV_INLINE int cvFloor( double value )
{
-#ifdef __GNUC__
- int i = (int)value;
- return i - (i > value);
-#elif defined _MSC_VER && defined _M_X64
+#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__ && !defined __APPLE__)
__m128d t = _mm_set_sd( value );
int i = _mm_cvtsd_si32(t);
return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i)));
+#elif defined __GNUC__
+ int i = (int)value;
+ return i - (i > value);
#else
int i = cvRound(value);
Cv32suf diff;
CV_INLINE int cvCeil( double value )
{
-#ifdef __GNUC__
- int i = (int)value;
- return i + (i < value);
-#elif defined _MSC_VER && defined _M_X64
+#if defined _MSC_VER && defined _M_X64 || (defined __GNUC__ && defined __SSE2__&& !defined __APPLE__)
__m128d t = _mm_set_sd( value );
int i = _mm_cvtsd_si32(t);
return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t));
+#elif defined __GNUC__
+ int i = (int)value;
+ return i + (i < value);
#else
int i = cvRound(value);
Cv32suf diff;
info()->set(this, name, ParamType<Algorithm>::type, &value);
}
+int Algorithm::getInt(const string& name) const
+{
+ return get<int>(name);
+}
+
+double Algorithm::getDouble(const string& name) const
+{
+ return get<double>(name);
+}
+
+bool Algorithm::getBool(const string& name) const
+{
+ return get<bool>(name);
+}
+
+string Algorithm::getString(const string& name) const
+{
+ return get<string>(name);
+}
+
+Mat Algorithm::getMat(const string& name) const
+{
+ return get<Mat>(name);
+}
+
+vector<Mat> Algorithm::getMatVector(const string& name) const
+{
+ return get<vector<Mat> >(name);
+}
+
+Ptr<Algorithm> Algorithm::getAlgorithm(const string& name) const
+{
+ return get<Algorithm>(name);
+}
+
string Algorithm::paramHelp(const string& name) const
{
return info()->paramHelp(name.c_str());
void AlgorithmInfo::write(const Algorithm* algo, FileStorage& fs) const
{
- size_t i = 0, n = data->params.vec.size();
+ size_t i = 0, nparams = data->params.vec.size();
cv::write(fs, "name", algo->name());
- for( i = 0; i < n; i++ )
+ for( i = 0; i < nparams; i++ )
{
const Param& p = data->params.vec[i].second;
const string& pname = data->params.vec[i].first;
void AlgorithmInfo::read(Algorithm* algo, const FileNode& fn) const
{
- size_t i = 0, n = data->params.vec.size();
+ size_t i = 0, nparams = data->params.vec.size();
+ AlgorithmInfo* info = algo->info();
- for( i = 0; i < n; i++ )
+ for( i = 0; i < nparams; i++ )
{
const Param& p = data->params.vec[i].second;
const string& pname = data->params.vec[i].first;
if( n.empty() )
continue;
if( p.type == Param::INT )
- algo->set(pname, (int)n);
+ {
+ int val = (int)n;
+ info->set(algo, pname.c_str(), p.type, &val, true);
+ }
else if( p.type == Param::BOOLEAN )
- algo->set(pname, (int)n != 0);
+ {
+ bool val = (int)n != 0;
+ info->set(algo, pname.c_str(), p.type, &val, true);
+ }
else if( p.type == Param::REAL )
- algo->set(pname, (double)n);
+ {
+ double val = (double)n;
+ info->set(algo, pname.c_str(), p.type, &val, true);
+ }
else if( p.type == Param::STRING )
- algo->set(pname, (string)n);
+ {
+ string val = (string)n;
+ info->set(algo, pname.c_str(), p.type, &val, true);
+ }
else if( p.type == Param::MAT )
{
Mat m;
cv::read(n, m);
- algo->set(pname, m);
+ info->set(algo, pname.c_str(), p.type, &m, true);
}
else if( p.type == Param::MAT_VECTOR )
{
vector<Mat> mv;
cv::read(n, mv);
- algo->set(pname, mv);
+ info->set(algo, pname.c_str(), p.type, &mv, true);
}
else if( p.type == Param::ALGORITHM )
{
Ptr<Algorithm> nestedAlgo = Algorithm::_create((string)n["name"]);
CV_Assert( !nestedAlgo.empty() );
nestedAlgo->read(n);
- algo->set(pname, nestedAlgo);
+ info->set(algo, pname.c_str(), p.type, &nestedAlgo, true);
}
else
CV_Error( CV_StsUnsupportedFormat, "unknown/unsupported parameter type");
void (Algorithm::*set_mat_vector)(const vector<Mat>&);
void (Algorithm::*set_algo)(const Ptr<Algorithm>&);
};
-
-void AlgorithmInfo::set(Algorithm* algo, const char* name, int argType, const void* value) const
+
+void AlgorithmInfo::set(Algorithm* algo, const char* name, int argType, const void* value, bool force) const
{
const Param* p = findstr(data->params, name);
-
+
if( !p )
CV_Error_( CV_StsBadArg, ("No parameter '%s' is found", name ? name : "<NULL>") );
-
- if( p->readonly )
+
+ if( !force && p->readonly )
CV_Error_( CV_StsError, ("Parameter '%s' is readonly", name));
-
+
GetSetParam f;
f.set_int = p->setter;
-
+
if( argType == Param::INT || argType == Param::BOOLEAN || argType == Param::REAL )
{
CV_Assert( p->type == Param::INT || p->type == Param::REAL || p->type == Param::BOOLEAN );
-
+
if( p->type == Param::INT )
{
int val = argType == Param::INT ? *(const int*)value :
else if( argType == Param::STRING )
{
CV_Assert( p->type == Param::STRING );
-
+
const string& val = *(const string*)value;
if( p->setter )
(algo->*f.set_string)(val);
else if( argType == Param::MAT )
{
CV_Assert( p->type == Param::MAT );
-
+
const Mat& val = *(const Mat*)value;
if( p->setter )
(algo->*f.set_mat)(val);
else if( argType == Param::MAT_VECTOR )
{
CV_Assert( p->type == Param::MAT_VECTOR );
-
+
const vector<Mat>& val = *(const vector<Mat>*)value;
if( p->setter )
(algo->*f.set_mat_vector)(val);
else if( argType == Param::ALGORITHM )
{
CV_Assert( p->type == Param::ALGORITHM );
-
+
const Ptr<Algorithm>& val = *(const Ptr<Algorithm>*)value;
if( p->setter )
(algo->*f.set_algo)(val);
}
+void patchNaNs( InputOutputArray _a, double _val )
+{
+ Mat a = _a.getMat();
+ CV_Assert( a.depth() == CV_32F );
+
+ const Mat* arrays[] = {&a, 0};
+ int* ptrs[1];
+ NAryMatIterator it(arrays, (uchar**)ptrs);
+ size_t len = it.size*a.channels();
+ Cv32suf val;
+ val.f = (float)_val;
+
+ for( size_t i = 0; i < it.nplanes; i++, ++it )
+ {
+ int* tptr = ptrs[0];
+ for( size_t j = 0; j < len; j++ )
+ if( (tptr[j] & 0x7fffffff) > 0x7f800000 )
+ tptr[j] = val.i;
+ }
+}
+
+
void exp(const float* src, float* dst, int n)
{
Exp_32f(src, dst, n);
extern volatile bool USE_SSE2;
-typedef void (*BinaryFunc)(const uchar* src1, size_t step1,
- const uchar* src2, size_t step2,
- uchar* dst, size_t step, Size sz,
- void*);
-
-BinaryFunc getConvertFunc(int sdepth, int ddepth);
-BinaryFunc getConvertScaleFunc(int sdepth, int ddepth);
-BinaryFunc getCopyMaskFunc(size_t esz);
-
enum { BLOCK_SIZE = 1024 };
#ifdef HAVE_IPP
#include "precomp.hpp"
+#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
+#include "emmintrin.h"
+#endif
+
namespace cv
{
DEF_RANDI_FUNC(16u, ushort)
DEF_RANDI_FUNC(16s, short)
DEF_RANDI_FUNC(32s, int)
-
+
static void randf_32f( float* arr, int len, uint64* state, const Vec2f* p, bool )
{
uint64 temp = *state;
- int i;
+ int i = 0;
- for( i = 0; i <= len - 4; i += 4 )
+ for( ; i <= len - 4; i += 4 )
{
- float f0, f1;
-
- temp = RNG_NEXT(temp);
- f0 = (int)temp*p[i][0] + p[i][1];
- temp = RNG_NEXT(temp);
- f1 = (int)temp*p[i+1][0] + p[i+1][1];
- arr[i] = f0; arr[i+1] = f1;
-
- temp = RNG_NEXT(temp);
- f0 = (int)temp*p[i+2][0] + p[i+2][1];
- temp = RNG_NEXT(temp);
- f1 = (int)temp*p[i+3][0] + p[i+3][1];
- arr[i+2] = f0; arr[i+3] = f1;
+ float f[4];
+ f[0] = (float)(int)(temp = RNG_NEXT(temp));
+ f[1] = (float)(int)(temp = RNG_NEXT(temp));
+ f[2] = (float)(int)(temp = RNG_NEXT(temp));
+ f[3] = (float)(int)(temp = RNG_NEXT(temp));
+
+ // handwritten SSE is required not for performance but for numerical stability!
+ // both 32-bit gcc and MSVC compilers trend to generate double precision SSE
+ // while 64-bit compilers generate single precision SIMD instructions
+ // so manual vectorisation forces all compilers to the single precision
+#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
+ __m128 q0 = _mm_loadu_ps((const float*)(p + i));
+ __m128 q1 = _mm_loadu_ps((const float*)(p + i + 2));
+
+ __m128 q01l = _mm_unpacklo_ps(q0, q1);
+ __m128 q01h = _mm_unpackhi_ps(q0, q1);
+
+ __m128 p0 = _mm_unpacklo_ps(q01l, q01h);
+ __m128 p1 = _mm_unpackhi_ps(q01l, q01h);
+
+ _mm_storeu_ps(arr + i, _mm_add_ps(_mm_mul_ps(_mm_loadu_ps(f), p0), p1));
+#else
+ arr[i+0] = f[0]*p[i+0][0] + p[i+0][1];
+ arr[i+1] = f[1]*p[i+1][0] + p[i+1][1];
+ arr[i+2] = f[2]*p[i+2][0] + p[i+2][1];
+ arr[i+3] = f[3]*p[i+3][0] + p[i+3][1];
+#endif
}
for( ; i < len; i++ )
{
temp = RNG_NEXT(temp);
+#if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
+ _mm_store_ss(arr + i, _mm_add_ss(
+ _mm_mul_ss(_mm_set_ss((float)(int)temp), _mm_set_ss(p[i][0])),
+ _mm_set_ss(p[i][1]))
+ );
+#else
arr[i] = (int)temp*p[i][0] + p[i][1];
+#endif
}
*state = temp;
}
else
#endif
- //vz why do we need unroll here? no sse = no need to unroll
{
for( ; j <= n - 4; j += 4 )
{
}
else
#endif
- //vz no need to unroll here - if no sse
{
for( ; j <= n - 4; j += 4 )
{
}
else
#endif
- //vz why do we need unroll here? no sse = no unroll
{
for( ; j <= n - 4; j += 4 )
{
1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
};
+int normHamming(const uchar* a, int n)
+{
+ int i = 0, result = 0;
+#if CV_NEON
+ if (CPU_HAS_NEON_FEATURE)
+ {
+ uint32x4_t bits = vmovq_n_u32(0);
+ for (; i <= n - 16; i += 16) {
+ uint8x16_t A_vec = vld1q_u8 (a + i);
+ uint8x16_t bitsSet = vcntq_u8 (A_vec);
+ uint16x8_t bitSet8 = vpaddlq_u8 (bitsSet);
+ uint32x4_t bitSet4 = vpaddlq_u16 (bitSet8);
+ bits = vaddq_u32(bits, bitSet4);
+ }
+ uint64x2_t bitSet2 = vpaddlq_u32 (bits);
+ result = vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),0);
+ result += vgetq_lane_s32 (vreinterpretq_s32_u64(bitSet2),2);
+ }
+ else
+#endif
+ for( ; i <= n - 4; i += 4 )
+ result += popCountTable[a[i]] + popCountTable[a[i+1]] +
+ popCountTable[a[i+2]] + popCountTable[a[i+3]];
+ for( ; i < n; i++ )
+ result += popCountTable[a[i]];
+ return result;
+}
+
int normHamming(const uchar* a, const uchar* b, int n)
{
int i = 0, result = 0;
return result;
}
+int normHamming(const uchar* a, int n, int cellSize)
+{
+ if( cellSize == 1 )
+ return normHamming(a, n);
+ const uchar* tab = 0;
+ if( cellSize == 2 )
+ tab = popCountTable2;
+ else if( cellSize == 4 )
+ tab = popCountTable4;
+ else
+ CV_Error( CV_StsBadSize, "bad cell size (not 1, 2 or 4) in normHamming" );
+ int i = 0, result = 0;
+#if CV_ENABLE_UNROLLED
+ for( ; i <= n - 4; i += 4 )
+ result += tab[a[i]] + tab[a[i+1]] + tab[a[i+2]] + tab[a[i+3]];
+#endif
+ for( ; i < n; i++ )
+ result += tab[a[i]];
+ return result;
+}
+
int normHamming(const uchar* a, const uchar* b, int n, int cellSize)
{
if( cellSize == 1 )
int depth = src.depth(), cn = src.channels();
normType &= 7;
- CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
+ CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR ||
+ ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src.type() == CV_8U) );
- if( depth == CV_32F && src.isContinuous() && mask.empty() )
+ if( src.isContinuous() && mask.empty() )
{
size_t len = src.total()*cn;
if( len == (size_t)(int)len )
{
- const float* data = src.ptr<float>();
-
- if( normType == NORM_L2 )
- {
- double result = 0;
- GET_OPTIMIZED(normL2_32f)(data, 0, &result, (int)len, 1);
- return std::sqrt(result);
- }
- if( normType == NORM_L1 )
+ if( depth == CV_32F )
{
- double result = 0;
- GET_OPTIMIZED(normL1_32f)(data, 0, &result, (int)len, 1);
- return result;
+ const float* data = src.ptr<float>();
+
+ if( normType == NORM_L2 )
+ {
+ double result = 0;
+ GET_OPTIMIZED(normL2_32f)(data, 0, &result, (int)len, 1);
+ return std::sqrt(result);
+ }
+ if( normType == NORM_L2SQR )
+ {
+ double result = 0;
+ GET_OPTIMIZED(normL2_32f)(data, 0, &result, (int)len, 1);
+ return result;
+ }
+ if( normType == NORM_L1 )
+ {
+ double result = 0;
+ GET_OPTIMIZED(normL1_32f)(data, 0, &result, (int)len, 1);
+ return result;
+ }
+ if( normType == NORM_INF )
+ {
+ float result = 0;
+ GET_OPTIMIZED(normInf_32f)(data, 0, &result, (int)len, 1);
+ return result;
+ }
}
+ if( depth == CV_8U )
{
- float result = 0;
- GET_OPTIMIZED(normInf_32f)(data, 0, &result, (int)len, 1);
- return result;
-
+ const uchar* data = src.ptr<uchar>();
+
+ if( normType == NORM_HAMMING )
+ return normHamming(data, (int)len);
+
+ if( normType == NORM_HAMMING2 )
+ return normHamming(data, (int)len, 2);
}
}
}
CV_Assert( mask.empty() || mask.type() == CV_8U );
+ if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
+ {
+ if( !mask.empty() )
+ {
+ Mat temp;
+ bitwise_and(src, mask, temp);
+ return norm(temp, normType);
+ }
+ int cellSize = normType == NORM_HAMMING ? 1 : 2;
+
+ const Mat* arrays[] = {&src, 0};
+ uchar* ptrs[1];
+ NAryMatIterator it(arrays, ptrs);
+ int total = (int)it.size;
+ int result = 0;
+
+ for( size_t i = 0; i < it.nplanes; i++, ++it )
+ result += normHamming(ptrs[0], total, cellSize);
+
+ return result;
+ }
+
NormFunc func = normTab[normType >> 1][depth];
CV_Assert( func != 0 );
NAryMatIterator it(arrays, ptrs);
int j, total = (int)it.size, blockSize = total, intSumBlockSize = 0, count = 0;
bool blockSum = (normType == NORM_L1 && depth <= CV_16S) ||
- (normType == NORM_L2 && depth <= CV_8S);
+ ((normType == NORM_L2 || normType == NORM_L2SQR) && depth <= CV_8S);
int isum = 0;
int *ibuf = &result.i;
size_t esz = 0;
CV_Assert( src1.size == src2.size && src1.type() == src2.type() );
normType &= 7;
- CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
+ CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR ||
+ ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src1.type() == CV_8U) );
- if( src1.depth() == CV_32F && src1.isContinuous() && src2.isContinuous() && mask.empty() )
+ if( src1.isContinuous() && src2.isContinuous() && mask.empty() )
{
size_t len = src1.total()*src1.channels();
if( len == (size_t)(int)len )
{
- const float* data1 = src1.ptr<float>();
- const float* data2 = src2.ptr<float>();
-
- if( normType == NORM_L2 )
+ if( src1.depth() == CV_32F )
{
- double result = 0;
- GET_OPTIMIZED(normDiffL2_32f)(data1, data2, 0, &result, (int)len, 1);
- return std::sqrt(result);
- }
- if( normType == NORM_L1 )
- {
- double result = 0;
- GET_OPTIMIZED(normDiffL1_32f)(data1, data2, 0, &result, (int)len, 1);
- return result;
- }
- {
- float result = 0;
- GET_OPTIMIZED(normDiffInf_32f)(data1, data2, 0, &result, (int)len, 1);
- return result;
+ const float* data1 = src1.ptr<float>();
+ const float* data2 = src2.ptr<float>();
+
+ if( normType == NORM_L2 )
+ {
+ double result = 0;
+ GET_OPTIMIZED(normDiffL2_32f)(data1, data2, 0, &result, (int)len, 1);
+ return std::sqrt(result);
+ }
+ if( normType == NORM_L2SQR )
+ {
+ double result = 0;
+ GET_OPTIMIZED(normDiffL2_32f)(data1, data2, 0, &result, (int)len, 1);
+ return result;
+ }
+ if( normType == NORM_L1 )
+ {
+ double result = 0;
+ GET_OPTIMIZED(normDiffL1_32f)(data1, data2, 0, &result, (int)len, 1);
+ return result;
+ }
+ if( normType == NORM_INF )
+ {
+ float result = 0;
+ GET_OPTIMIZED(normDiffInf_32f)(data1, data2, 0, &result, (int)len, 1);
+ return result;
+ }
}
}
}
CV_Assert( mask.empty() || mask.type() == CV_8U );
+ if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
+ {
+ if( !mask.empty() )
+ {
+ Mat temp;
+ bitwise_xor(src1, src2, temp);
+ bitwise_and(temp, mask, temp);
+ return norm(temp, normType);
+ }
+ int cellSize = normType == NORM_HAMMING ? 1 : 2;
+
+ const Mat* arrays[] = {&src1, &src2, 0};
+ uchar* ptrs[2];
+ NAryMatIterator it(arrays, ptrs);
+ int total = (int)it.size;
+ int result = 0;
+
+ for( size_t i = 0; i < it.nplanes; i++, ++it )
+ result += normHamming(ptrs[0], ptrs[1], total, cellSize);
+
+ return result;
+ }
+
NormDiffFunc func = normDiffTab[normType >> 1][depth];
CV_Assert( func != 0 );
NAryMatIterator it(arrays, ptrs);
int j, total = (int)it.size, blockSize = total, intSumBlockSize = 0, count = 0;
bool blockSum = (normType == NORM_L1 && depth <= CV_16S) ||
- (normType == NORM_L2 && depth <= CV_8S);
+ ((normType == NORM_L2 || normType == NORM_L2SQR) && depth <= CV_8S);
unsigned isum = 0;
unsigned *ibuf = &result.u;
size_t esz = 0;
struct MeanStdDevOp : public BaseElemWiseOp
{
+ Scalar sqmeanRef;
+ int cn;
+
MeanStdDevOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK+SCALAR_OUTPUT, 1, 1, Scalar::all(0))
{
+ cn = 0;
context = 7;
};
void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
cvtest::multiply(temp, temp, temp);
Scalar mean = cvtest::mean(src[0], mask);
Scalar sqmean = cvtest::mean(temp, mask);
+
+ sqmeanRef = sqmean;
+ cn = temp.channels();
for( int c = 0; c < 4; c++ )
sqmean[c] = std::sqrt(std::max(sqmean[c] - mean[c]*mean[c], 0.));
}
double getMaxErr(int)
{
- return 1e-6;
+ CV_Assert(cn > 0);
+ double err = sqmeanRef[0];
+ for(int i = 1; i < cn; ++i)
+ err = std::max(err, sqmeanRef[i]);
+ return 3e-7 * err;
}
};
};
int getRandomType(RNG& rng)
{
- return cvtest::randomType(rng, DEPTH_MASK_ALL_BUT_8S, 1, 4);
+ int type = cvtest::randomType(rng, DEPTH_MASK_ALL_BUT_8S, 1, 4);
+ for(;;)
+ {
+ normType = rng.uniform(1, 8);
+ if( normType == NORM_INF || normType == NORM_L1 ||
+ normType == NORM_L2 || normType == NORM_L2SQR ||
+ normType == NORM_HAMMING || normType == NORM_HAMMING2 )
+ break;
+ }
+ if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
+ {
+ type = CV_8U;
+ }
+ return type;
}
void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
{
}
void generateScalars(int, RNG& rng)
{
- normType = 1 << rng.uniform(0, 3);
}
double getMaxErr(int)
{
bool check_full(int type); // compex test for symmetric matrix\r
virtual void run (int) = 0; // main testing method\r
\r
-private:\r
+protected:\r
\r
float eps_val_32, eps_vec_32;\r
float eps_val_64, eps_vec_64;\r
+ int ntests;\r
+ \r
bool check_pair_count(const cv::Mat& src, const cv::Mat& evalues, int low_index = -1, int high_index = -1);\r
bool check_pair_count(const cv::Mat& src, const cv::Mat& evalues, const cv::Mat& evectors, int low_index = -1, int high_index = -1);\r
bool check_pairs_order(const cv::Mat& eigen_values); // checking order of eigen values & vectors (it should be none up)\r
\r
void Core_EigenTest_Scalar_32::run(int) \r
{\r
- const size_t MATRIX_COUNT = 500;\r
- for (size_t i = 0; i < MATRIX_COUNT; ++i)\r
+ for (int i = 0; i < ntests; ++i)\r
{\r
float value = cv::randu<float>();\r
cv::Mat src(1, 1, CV_32FC1, Scalar::all((float)value));\r
\r
void Core_EigenTest_Scalar_64::run(int)\r
{\r
- const size_t MATRIX_COUNT = 500;\r
- for (size_t i = 0; i < MATRIX_COUNT; ++i)\r
+ for (int i = 0; i < ntests; ++i)\r
{\r
float value = cv::randu<float>();\r
cv::Mat src(1, 1, CV_64FC1, Scalar::all((double)value));\r
void Core_EigenTest_32::run(int) { check_full(CV_32FC1); }\r
void Core_EigenTest_64::run(int) { check_full(CV_64FC1); }\r
\r
-Core_EigenTest::Core_EigenTest() : eps_val_32(1e-3f), eps_vec_32(1e-2f), eps_val_64(1e-4f), eps_vec_64(1e-3f) {}\r
+Core_EigenTest::Core_EigenTest()\r
+: eps_val_32(1e-3f), eps_vec_32(1e-2f),\r
+ eps_val_64(1e-4f), eps_vec_64(1e-3f), ntests(100) {}\r
Core_EigenTest::~Core_EigenTest() {}\r
\r
bool Core_EigenTest::check_pair_count(const cv::Mat& src, const cv::Mat& evalues, int low_index, int high_index)\r
\r
bool Core_EigenTest::check_full(int type)\r
{\r
- const int MATRIX_COUNT = 500;\r
const int MAX_DEGREE = 7;\r
\r
srand((unsigned int)time(0));\r
\r
- for (int i = 1; i <= MATRIX_COUNT; ++i)\r
+ for (int i = 0; i < ntests; ++i)\r
{\r
- int src_size = (int)(std::pow(2.0, (rand()%MAX_DEGREE+1)*1.0));\r
+ int src_size = (int)(std::pow(2.0, (rand()%MAX_DEGREE)+1.));\r
\r
cv::Mat src(src_size, src_size, type);\r
\r
flags(0), have_u(false), have_v(false), symmetric(false), compact(false), vector_w(false)
{
test_case_count = 100;
+ max_log_array_size = 8;
test_array[TEMP].push_back(NULL);
test_array[TEMP].push_back(NULL);
test_array[TEMP].push_back(NULL);
bool TestSparseMat();
bool TestVec();
bool TestMatxMultiplication();
+ bool TestSubMatAccess();
bool operations1();
- void checkDiff(const Mat& m1, const Mat& m2, const string& s) { if (norm(m1, m2, NORM_INF) != 0) throw test_excep(s); }
- void checkDiffF(const Mat& m1, const Mat& m2, const string& s) { if (norm(m1, m2, NORM_INF) > 1e-5) throw test_excep(s); }
-
+ void checkDiff(const Mat& m1, const Mat& m2, const string& s)
+ {
+ if (norm(m1, m2, NORM_INF) != 0) throw test_excep(s);
+ }
+ void checkDiffF(const Mat& m1, const Mat& m2, const string& s)
+ {
+ if (norm(m1, m2, NORM_INF) > 1e-5) throw test_excep(s);
+ }
};
CV_OperationsTest::CV_OperationsTest()
}
+bool CV_OperationsTest::TestSubMatAccess()
+{
+ try
+ {
+ Mat_<float> T_bs(4,4);
+ Vec3f cdir(1.f, 1.f, 0.f);
+ Vec3f ydir(1.f, 0.f, 1.f);
+ Vec3f fpt(0.1f, 0.7f, 0.2f);
+ T_bs.setTo(0);
+ T_bs(Range(0,3),Range(2,3)) = 1.0*Mat(cdir); // wierd OpenCV stuff, need to do multiply
+ T_bs(Range(0,3),Range(1,2)) = 1.0*Mat(ydir);
+ T_bs(Range(0,3),Range(0,1)) = 1.0*Mat(cdir.cross(ydir));
+ T_bs(Range(0,3),Range(3,4)) = 1.0*Mat(fpt);
+ T_bs(3,3) = 1.0;
+ //std::cout << "[Nav Grok] S frame =" << std::endl << T_bs << std::endl;
+
+ // set up display coords, really just the S frame
+ std::vector<float>coords;
+
+ for (int i=0; i<16; i++)
+ {
+ coords.push_back(T_bs(i));
+ //std::cout << T_bs1(i) << std::endl;
+ }
+ CV_Assert( norm(coords, T_bs.reshape(1,1), NORM_INF) == 0 );
+ }
+ catch (const test_excep& e)
+ {
+ ts->printf(cvtest::TS::LOG, "%s\n", e.s.c_str());
+ ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH);
+ return false;
+ }
+ return true;
+}
+
bool CV_OperationsTest::TestTemplateMat()
{
try
{
try
{
- Matx33f mat(1, 0, 0, 0, 1, 0, 0, 0, 1); // Identity matrix
+ Matx33f mat(1, 1, 1, 0, 1, 1, 0, 0, 1); // Identity matrix
Point2f pt(3, 4);
Point3f res = mat * pt; // Correctly assumes homogeneous coordinates
- if(res.x != 3.0) throw test_excep();
- if(res.y != 4.0) throw test_excep();
- if(res.z != 1.0) throw test_excep();
+
+ Vec3f res2 = mat*Vec3f(res.x, res.y, res.z);
+
+ if(res.x != 8.0) throw test_excep();
+ if(res.y != 5.0) throw test_excep();
+ if(res.z != 1.0) throw test_excep();
+
+ if(res2[0] != 14.0) throw test_excep();
+ if(res2[1] != 6.0) throw test_excep();
+ if(res2[2] != 1.0) throw test_excep();
+
+ Matx44f mat44f(1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1);
+ Matx44d mat44d(1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1);
+ Scalar s(4, 3, 2, 1);
+ Scalar sf = mat44f*s;
+ Scalar sd = mat44d*s;
+
+ if(sf[0] != 10.0) throw test_excep();
+ if(sf[1] != 6.0) throw test_excep();
+ if(sf[2] != 3.0) throw test_excep();
+ if(sf[3] != 1.0) throw test_excep();
+
+ if(sd[0] != 10.0) throw test_excep();
+ if(sd[1] != 6.0) throw test_excep();
+ if(sd[2] != 3.0) throw test_excep();
+ if(sd[3] != 1.0) throw test_excep();
}
catch(const test_excep&)
{
if (!TestMatxMultiplication())
return;
+
+ if (!TestSubMatAccess())
+ return;
if (!operations1())
return;
The current implementation supports the following types of a descriptor extractor:
- * ``"SIFT"`` -- :ocv:class:`SiftDescriptorExtractor`
- * ``"SURF"`` -- :ocv:class:`SurfDescriptorExtractor`
- * ``"ORB"`` -- :ocv:class:`OrbDescriptorExtractor`
+ * ``"SIFT"`` -- :ocv:class:`SIFT`
+ * ``"SURF"`` -- :ocv:class:`SURF`
+ * ``"ORB"`` -- :ocv:class:`ORB`
* ``"BRIEF"`` -- :ocv:class:`BriefDescriptorExtractor`
A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` --
for example: ``"OpponentSIFT"`` .
-
-SiftDescriptorExtractor
------------------------
-.. ocv:class:: SiftDescriptorExtractor
-
-Wrapping class for computing descriptors by using the
-:ocv:class:`SIFT` class. ::
-
- class SiftDescriptorExtractor : public DescriptorExtractor
- {
- public:
- SiftDescriptorExtractor(
- const SIFT::DescriptorParams& descriptorParams=SIFT::DescriptorParams(),
- const SIFT::CommonParams& commonParams=SIFT::CommonParams() );
- SiftDescriptorExtractor( double magnification, bool isNormalize=true,
- bool recalculateAngles=true, int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
- int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
- int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
- int angleMode=SIFT::CommonParams::FIRST_ANGLE );
-
- virtual void read (const FileNode &fn);
- virtual void write (FileStorage &fs) const;
- virtual int descriptorSize() const;
- virtual int descriptorType() const;
- protected:
- ...
- }
-
-
-
-
-SurfDescriptorExtractor
------------------------
-.. ocv:class:: SurfDescriptorExtractor
-
-Wrapping class for computing descriptors by using the
-:ocv:class:`SURF` class. ::
-
- class SurfDescriptorExtractor : public DescriptorExtractor
- {
- public:
- SurfDescriptorExtractor( int nOctaves=4,
- int nOctaveLayers=2, bool extended=false );
-
- virtual void read (const FileNode &fn);
- virtual void write (FileStorage &fs) const;
- virtual int descriptorSize() const;
- virtual int descriptorType() const;
- protected:
- ...
- }
-
-
-
-
-OrbDescriptorExtractor
----------------------------
-.. ocv:class:: OrbDescriptorExtractor
-
-Wrapping class for computing descriptors by using the
-:ocv:class:`ORB` class. ::
-
- template<typename T>
- class ORbDescriptorExtractor : public DescriptorExtractor
- {
- public:
- OrbDescriptorExtractor( ORB::PatchSize patch_size );
-
- virtual void read( const FileNode &fn );
- virtual void write( FileStorage &fs ) const;
- virtual int descriptorSize() const;
- virtual int descriptorType() const;
- protected:
- ...
- }
-
-
-
-
-CalonderDescriptorExtractor
----------------------------
-.. ocv:class:: CalonderDescriptorExtractor
-
-Wrapping class for computing descriptors by using the
-:ocv:class:`RTreeClassifier` class. ::
-
- template<typename T>
- class CalonderDescriptorExtractor : public DescriptorExtractor
- {
- public:
- CalonderDescriptorExtractor( const string& classifierFile );
-
- virtual void read( const FileNode &fn );
- virtual void write( FileStorage &fs ) const;
- virtual int descriptorSize() const;
- virtual int descriptorType() const;
- protected:
- ...
- }
-
-
OpponentColorDescriptorExtractor
--------------------------------
.. ocv:class:: OpponentColorDescriptorExtractor
*
``BruteForce-Hamming``
*
- ``BruteForce-HammingLUT``
+ ``BruteForce-Hamming(2)``
*
``FlannBased``
-BruteForceMatcher
+BFMatcher
-----------------
-.. ocv:class:: BruteForceMatcher
+.. ocv:class::BFMatcher
Brute-force descriptor matcher. For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets. ::
- template<class Distance>
- class BruteForceMatcher : public DescriptorMatcher
- {
- public:
- BruteForceMatcher( Distance d = Distance() );
- virtual ~BruteForceMatcher();
-
- virtual bool isMaskSupported() const;
- virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const;
- protected:
- ...
- }
+BFMatcher::BFMatcher
+--------------------
+Brute-force matcher constructor.
-For efficiency, ``BruteForceMatcher`` is used as a template parameterized with the distance type. For float descriptors, ``L2<float>`` is a common choice. The following distances are supported: ::
+.. ocv:function:: BFMatcher::BFMatcher( int distanceType, bool crossCheck=false )
- template<typename T>
- struct Accumulator
- {
- typedef T Type;
- };
-
- template<> struct Accumulator<unsigned char> { typedef unsigned int Type; };
- template<> struct Accumulator<unsigned short> { typedef unsigned int Type; };
- template<> struct Accumulator<char> { typedef int Type; };
- template<> struct Accumulator<short> { typedef int Type; };
-
- /*
- * Euclidean distance functor
- */
- template<class T>
- struct L2
- {
- typedef T ValueType;
- typedef typename Accumulator<T>::Type ResultType;
-
- ResultType operator()( const T* a, const T* b, int size ) const;
- };
+ :param distanceType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
- /*
- * Squared Euclidean distance functor
- */
- template<class T>
- struct SL2
- {
- typedef T ValueType;
- typedef typename Accumulator<T>::Type ResultType;
-
- ResultType operator()( const T* a, const T* b, int size ) const;
- };
- // Note: in case of SL2 distance a parameter maxDistance in the method DescriptorMatcher::radiusMatch
- // is a squared maximum distance in L2.
-
- /*
- * Manhattan distance (city block distance) functor
- */
- template<class T>
- struct CV_EXPORTS L1
- {
- typedef T ValueType;
- typedef typename Accumulator<T>::Type ResultType;
-
- ResultType operator()( const T* a, const T* b, int size ) const;
- };
-
- /*
- * Hamming distance functor
- */
- struct HammingLUT
- {
- typedef unsigned char ValueType;
- typedef int ResultType;
-
- ResultType operator()( const unsigned char* a, const unsigned char* b,
- int size ) const;
- ...
- };
-
- struct Hamming
- {
- typedef unsigned char ValueType;
- typedef int ResultType;
-
- ResultType operator()( const unsigned char* a, const unsigned char* b,
- int size ) const;
- };
-
-
-
-
+ :param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
FlannBasedMatcher
* ``"FAST"`` -- :ocv:class:`FastFeatureDetector`
* ``"STAR"`` -- :ocv:class:`StarFeatureDetector`
-* ``"SIFT"`` -- :ocv:class:`SiftFeatureDetector`
-* ``"SURF"`` -- :ocv:class:`SurfFeatureDetector`
-* ``"ORB"`` -- :ocv:class:`OrbFeatureDetector`
-* ``"MSER"`` -- :ocv:class:`MserFeatureDetector`
+* ``"SIFT"`` -- :ocv:class:`SIFT` (nonfree module)
+* ``"SURF"`` -- :ocv:class:`SURF` (nonfree module)
+* ``"ORB"`` -- :ocv:class:`ORB`
+* ``"MSER"`` -- :ocv:class:`MSER`
* ``"GFTT"`` -- :ocv:class:`GoodFeaturesToTrackDetector`
* ``"HARRIS"`` -- :ocv:class:`GoodFeaturesToTrackDetector` with Harris detector enabled
* ``"Dense"`` -- :ocv:class:`DenseFeatureDetector`
...
};
-SiftFeatureDetector
--------------------
-.. ocv:class:: SiftFeatureDetector
-
-Wrapping class for feature detection using the
-:ocv:class:`SIFT` class. ::
-
- class SiftFeatureDetector : public FeatureDetector
- {
- public:
- SiftFeatureDetector(
- const SIFT::DetectorParams& detectorParams=SIFT::DetectorParams(),
- const SIFT::CommonParams& commonParams=SIFT::CommonParams() );
- SiftFeatureDetector( double threshold, double edgeThreshold,
- int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
- int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
- int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
- int angleMode=SIFT::CommonParams::FIRST_ANGLE );
- virtual void read( const FileNode& fn );
- virtual void write( FileStorage& fs ) const;
- protected:
- ...
- };
-
-SurfFeatureDetector
--------------------
-.. ocv:class:: SurfFeatureDetector
-
-Wrapping class for feature detection using the
-:ocv:class:`SURF` class. ::
-
- class SurfFeatureDetector : public FeatureDetector
- {
- public:
- SurfFeatureDetector( double hessianThreshold = 400., int octaves = 3,
- int octaveLayers = 4 );
- virtual void read( const FileNode& fn );
- virtual void write( FileStorage& fs ) const;
- protected:
- ...
- };
-
-
-OrbFeatureDetector
--------------------
-.. ocv:class:: OrbFeatureDetector
-
-Wrapping class for feature detection using the
-:ocv:class:`ORB` class. ::
-
- class OrbFeatureDetector : public FeatureDetector
- {
- public:
- OrbFeatureDetector( size_t n_features );
- virtual void read( const FileNode& fn );
- virtual void write( FileStorage& fs ) const;
- protected:
- ...
- };
-
-
DenseFeatureDetector
--------------------
.. ocv:class:: DenseFeatureDetector
StarAdjuster(double initial_thresh = 30.0);
...
};
-
-SurfAdjuster
-------------
-.. ocv:class:: SurfAdjuster
-
-:ocv:class:`AdjusterAdapter` for :ocv:class:`SurfFeatureDetector`. This class adjusts the ``hessianThreshold`` of ``SurfFeatureDetector``. ::
-
- class SurfAdjuster: public SurfAdjuster
- {
- SurfAdjuster();
- ...
- };
but with empty train data.
-OneWayDescriptorMatcher
------------------------
-.. ocv:class:: OneWayDescriptorMatcher
-
-Wrapping class for computing, matching, and classifying descriptors using the
-:ocv:class:`OneWayDescriptorBase` class. ::
-
- class OneWayDescriptorMatcher : public GenericDescriptorMatcher
- {
- public:
- class Params
- {
- public:
- static const int POSE_COUNT = 500;
- static const int PATCH_WIDTH = 24;
- static const int PATCH_HEIGHT = 24;
- static float GET_MIN_SCALE() { return 0.7f; }
- static float GET_MAX_SCALE() { return 1.5f; }
- static float GET_STEP_SCALE() { return 1.2f; }
-
- Params( int poseCount = POSE_COUNT,
- Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT),
- string pcaFilename = string(),
- string trainPath = string(), string trainImagesList = string(),
- float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(),
- float stepScale = GET_STEP_SCALE() );
-
- int poseCount;
- Size patchSize;
- string pcaFilename;
- string trainPath;
- string trainImagesList;
-
- float minScale, maxScale, stepScale;
- };
-
- OneWayDescriptorMatcher( const Params& params=Params() );
- virtual ~OneWayDescriptorMatcher();
-
- void initialize( const Params& params, const Ptr<OneWayDescriptorBase>& base=Ptr<OneWayDescriptorBase>() );
-
- // Clears keypoints stored in collection and OneWayDescriptorBase
- virtual void clear();
-
- virtual void train();
-
- virtual bool isMaskSupported();
-
- virtual void read( const FileNode &fn );
- virtual void write( FileStorage& fs ) const;
-
- virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
- protected:
- ...
- };
-
-
-
-
-FernDescriptorMatcher
----------------------
-.. ocv:class:: FernDescriptorMatcher
-
-Wrapping class for computing, matching, and classifying descriptors using the
-:ocv:class:`FernClassifier` class. ::
-
- class FernDescriptorMatcher : public GenericDescriptorMatcher
- {
- public:
- class Params
- {
- public:
- Params( int nclasses=0,
- int patchSize=FernClassifier::PATCH_SIZE,
- int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE,
- int nstructs=FernClassifier::DEFAULT_STRUCTS,
- int structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
- int nviews=FernClassifier::DEFAULT_VIEWS,
- int compressionMethod=FernClassifier::COMPRESSION_NONE,
- const PatchGenerator& patchGenerator=PatchGenerator() );
-
- Params( const string& filename );
-
- int nclasses;
- int patchSize;
- int signatureSize;
- int nstructs;
- int structSize;
- int nviews;
- int compressionMethod;
- PatchGenerator patchGenerator;
-
- string filename;
- };
-
- FernDescriptorMatcher( const Params& params=Params() );
- virtual ~FernDescriptorMatcher();
-
- virtual void clear();
-
- virtual void train();
-
- virtual bool isMaskSupported();
-
- virtual void read( const FileNode &fn );
- virtual void write( FileStorage& fs ) const;
-
- virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
-
- protected:
- ...
- };
-
-
-
-
VectorDescriptorMatcher
-----------------------
.. ocv:class:: VectorDescriptorMatcher
--------
Detects corners using the FAST algorithm
-.. ocv:function:: void FAST( const Mat& image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true )
+.. ocv:function:: void FAST( InputArray image, vector<KeyPoint>& keypoints, int threshold, bool nonmaxSupression=true )
:param image: Image where keypoints (corners) are detected.
:param nonmaxSupression: If it is true, non-maximum suppression is applied to detected corners (keypoints).
-Detects corners using the FAST algorithm by E. Rosten (*Machine Learning for High-speed Corner Detection*, 2006).
+Detects corners using the FAST algorithm by [Rosten06]_.
+
+.. [Rosten06] E. Rosten. Machine Learning for High-speed Corner Detection, 2006.
MSER
http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions). Also see http://opencv.willowgarage.com/wiki/documentation/cpp/features2d/MSER for useful comments and parameters description.
-StarDetector
-------------
-.. ocv:class:: StarDetector
-
-Class implementing the ``Star`` keypoint detector, a modified version of the ``CenSurE`` keypoint detector described in [Agrawal08]_.
-
-.. [Agrawal08] Agrawal, M. and Konolige, K. and Blas, M.R. "CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching", ECCV08, 2008
-
-StarDetector::StarDetector
---------------------------
-The Star Detector constructor
-
-.. ocv:function:: StarDetector::StarDetector()
-
-.. ocv:function:: StarDetector::StarDetector(int maxSize, int responseThreshold, int lineThresholdProjected, int lineThresholdBinarized, int suppressNonmaxSize)
-
-.. ocv:pyfunction:: cv2.StarDetector(maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize) -> <StarDetector object>
-
- :param maxSize: maximum size of the features. The following values are supported: 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128. In the case of a different value the result is undefined.
-
- :param responseThreshold: threshold for the approximated laplacian, used to eliminate weak features. The larger it is, the less features will be retrieved
-
- :param lineThresholdProjected: another threshold for the laplacian to eliminate edges
-
- :param lineThresholdBinarized: yet another threshold for the feature size to eliminate edges. The larger the 2nd threshold, the more points you get.
-
-StarDetector::operator()
-------------------------
-Finds keypoints in an image
-
-.. ocv:function:: void StarDetector::operator()(const Mat& image, vector<KeyPoint>& keypoints)
-
-.. ocv:pyfunction:: cv2.StarDetector.detect(image) -> keypoints
-
-.. ocv:cfunction:: CvSeq* cvGetStarKeypoints( const CvArr* image, CvMemStorage* storage, CvStarDetectorParams params=cvStarDetectorParams() )
-
-.. ocv:pyoldfunction:: cv.GetStarKeypoints(image, storage, params)-> keypoints
-
- :param image: The input 8-bit grayscale image
-
- :param keypoints: The output vector of keypoints
-
- :param storage: The memory storage used to store the keypoints (OpenCV 1.x API only)
-
- :param params: The algorithm parameters stored in ``CvStarDetectorParams`` (OpenCV 1.x API only)
-
ORB
-----
+---
.. ocv:class:: ORB
-Class for extracting ORB features and descriptors from an image. ::
-
- class ORB
- {
- public:
- /** The patch sizes that can be used (only one right now) */
- struct CommonParams
- {
- enum { DEFAULT_N_LEVELS = 3, DEFAULT_FIRST_LEVEL = 0};
-
- /** default constructor */
- CommonParams(float scale_factor = 1.2f, unsigned int n_levels = DEFAULT_N_LEVELS,
- int edge_threshold = 31, unsigned int first_level = DEFAULT_FIRST_LEVEL);
- void read(const FileNode& fn);
- void write(FileStorage& fs) const;
-
- /** Coefficient by which we divide the dimensions from one scale pyramid level to the next */
- float scale_factor_;
- /** The number of levels in the scale pyramid */
- unsigned int n_levels_;
- /** The level at which the image is given
- * if 1, that means we will also look at the image scale_factor_ times bigger
- */
- unsigned int first_level_;
- /** How far from the boundary the points should be */
- int edge_threshold_;
- };
-
- // constructor that initializes all the algorithm parameters
- // n_features is the number of desired features
- ORB(size_t n_features = 500, const CommonParams & detector_params = CommonParams());
- // returns the number of elements in each descriptor (32 bytes)
- int descriptorSize() const;
- // detects keypoints using ORB
- void operator()(const Mat& img, const Mat& mask,
- vector<KeyPoint>& keypoints) const;
- // detects ORB keypoints and computes the ORB descriptors for them;
- // output vector "descriptors" stores elements of descriptors and has size
- // equal descriptorSize()*keypoints.size() as each descriptor is
- // descriptorSize() elements of this vector.
- void operator()(const Mat& img, const Mat& mask,
- vector<KeyPoint>& keypoints,
- cv::Mat& descriptors,
- bool useProvidedKeypoints=false) const;
- };
-
-The class implements ORB.
-
-
-
-
-
-RandomizedTree
---------------
-.. ocv:class:: RandomizedTree
-
-Class containing a base structure for ``RTreeClassifier``. ::
-
- class CV_EXPORTS RandomizedTree
- {
- public:
- friend class RTreeClassifier;
-
- RandomizedTree();
- ~RandomizedTree();
+Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor, described in [RRKB11]_. The algorithm uses FAST in pyramids to detect stable keypoints, selects the strongest features using FAST or Harris response, finds their orientation using first-order moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or k-tuples) are rotated according to the measured orientation).
- void train(std::vector<BaseKeypoint> const& base_set,
- RNG &rng, int depth, int views,
- size_t reduced_num_dim, int num_quant_bits);
- void train(std::vector<BaseKeypoint> const& base_set,
- RNG &rng, PatchGenerator &make_patch, int depth,
- int views, size_t reduced_num_dim, int num_quant_bits);
+.. [RRKB11] Ethan Rublee, Vincent Rabaud, Kurt Konolige, Gary R. Bradski: ORB: An efficient alternative to SIFT or SURF. ICCV 2011: 2564-2571.
- // next two functions are EXPERIMENTAL
- //(do not use unless you know exactly what you do)
- static void quantizeVector(float *vec, int dim, int N, float bnds[2],
- int clamp_mode=0);
- static void quantizeVector(float *src, int dim, int N, float bnds[2],
- uchar *dst);
-
- // patch_data must be a 32x32 array (no row padding)
- float* getPosterior(uchar* patch_data);
- const float* getPosterior(uchar* patch_data) const;
- uchar* getPosterior2(uchar* patch_data);
-
- void read(const char* file_name, int num_quant_bits);
- void read(std::istream &is, int num_quant_bits);
- void write(const char* file_name) const;
- void write(std::ostream &os) const;
-
- int classes() { return classes_; }
- int depth() { return depth_; }
-
- void discardFloatPosteriors() { freePosteriors(1); }
-
- inline void applyQuantization(int num_quant_bits)
- { makePosteriors2(num_quant_bits); }
-
- private:
- int classes_;
- int depth_;
- int num_leaves_;
- std::vector<RTreeNode> nodes_;
- float **posteriors_; // 16-byte aligned posteriors
- uchar **posteriors2_; // 16-byte aligned posteriors
- std::vector<int> leaf_counts_;
-
- void createNodes(int num_nodes, RNG &rng);
- void allocPosteriorsAligned(int num_leaves, int num_classes);
- void freePosteriors(int which);
- // which: 1=posteriors_, 2=posteriors2_, 3=both
- void init(int classes, int depth, RNG &rng);
- void addExample(int class_id, uchar* patch_data);
- void finalize(size_t reduced_num_dim, int num_quant_bits);
- int getIndex(uchar* patch_data) const;
- inline float* getPosteriorByIndex(int index);
- inline uchar* getPosteriorByIndex2(int index);
- inline const float* getPosteriorByIndex(int index) const;
- void convertPosteriorsToChar();
- void makePosteriors2(int num_quant_bits);
- void compressLeaves(size_t reduced_num_dim);
- void estimateQuantPercForPosteriors(float perc[2]);
- };
-
-
-
-RandomizedTree::train
--------------------------
-Trains a randomized tree using an input set of keypoints.
+ORB::ORB
+--------
+The ORB constructor
-.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
+.. ocv:function:: ORB::ORB()
-.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
+.. ocv:function:: ORB::ORB(int nfeatures = 500, float scaleFactor = 1.2f, int nlevels = 8, int edgeThreshold = 31, int firstLevel = 0, int WTA_K=2, int scoreType=HARRIS_SCORE, int patchSize=31)
- :param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
-
- :param rng: Random-number generator used for training.
+ :param nfeatures: The maximum number of features to retain.
- :param make_patch: Patch generator used for training.
+ :param scaleFactor: Pyramid decimation ratio, greater than 1. ``scaleFactor==2`` means the classical pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor will mean that to cover certain scale range you will need more pyramid levels and so the speed will suffer.
- :param depth: Maximum tree depth.
-
- :param views: Number of random views of each keypoint neighborhood to generate.
-
- :param reduced_num_dim: Number of dimensions used in the compressed signature.
+ :param nlevels: The number of pyramid levels. The smallest level will have linear size equal to ``input_image_linear_size/pow(scaleFactor, nlevels)``.
- :param num_quant_bits: Number of bits used for quantization.
-
-
-
-RandomizedTree::read
-------------------------
-Reads a pre-saved randomized tree from a file or stream.
-
-.. ocv:function:: read(const char* file_name, int num_quant_bits)
-
-.. ocv:function:: read(std::istream &is, int num_quant_bits)
-
- :param file_name: Name of the file that contains randomized tree data.
-
- :param is: Input stream associated with the file that contains randomized tree data.
-
- :param num_quant_bits: Number of bits used for quantization.
-
-
-
-RandomizedTree::write
--------------------------
-Writes the current randomized tree to a file or stream.
-
-.. ocv:function:: void write(const char* file_name) const
-
-.. ocv:function:: void write(std::ostream &os) const
-
- :param file_name: Name of the file where randomized tree data is stored.
-
- :param os: Output stream associated with the file where randomized tree data is stored.
-
-
-
-RandomizedTree::applyQuantization
--------------------------------------
-.. ocv:function:: void applyQuantization(int num_quant_bits)
-
- Applies quantization to the current randomized tree.
-
- :param num_quant_bits: Number of bits used for quantization.
-
-
-RTreeNode
----------
-.. ocv:class:: RTreeNode
-
-Class containing a base structure for ``RandomizedTree``. ::
-
- struct RTreeNode
- {
- short offset1, offset2;
-
- RTreeNode() {}
-
- RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2)
- : offset1(y1*PATCH_SIZE + x1),
- offset2(y2*PATCH_SIZE + x2)
- {}
-
- //! Left child on 0, right child on 1
- inline bool operator() (uchar* patch_data) const
- {
- return patch_data[offset1] > patch_data[offset2];
- }
- };
-
-
-
-RTreeClassifier
----------------
-.. ocv:class:: RTreeClassifier
-
-Class containing ``RTreeClassifier``. It represents the Calonder descriptor originally introduced by Michael Calonder. ::
-
- class CV_EXPORTS RTreeClassifier
- {
- public:
- static const int DEFAULT_TREES = 48;
- static const size_t DEFAULT_NUM_QUANT_BITS = 4;
-
- RTreeClassifier();
-
- void train(std::vector<BaseKeypoint> const& base_set,
- RNG &rng,
- int num_trees = RTreeClassifier::DEFAULT_TREES,
- int depth = DEFAULT_DEPTH,
- int views = DEFAULT_VIEWS,
- size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
- int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
- bool print_status = true);
- void train(std::vector<BaseKeypoint> const& base_set,
- RNG &rng,
- PatchGenerator &make_patch,
- int num_trees = RTreeClassifier::DEFAULT_TREES,
- int depth = DEFAULT_DEPTH,
- int views = DEFAULT_VIEWS,
- size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
- int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
- bool print_status = true);
-
- // sig must point to a memory block of at least
- //classes()*sizeof(float|uchar) bytes
- void getSignature(IplImage *patch, uchar *sig);
- void getSignature(IplImage *patch, float *sig);
- void getSparseSignature(IplImage *patch, float *sig,
- float thresh);
-
- static int countNonZeroElements(float *vec, int n, double tol=1e-10);
- static inline void safeSignatureAlloc(uchar **sig, int num_sig=1,
- int sig_len=176);
- static inline uchar* safeSignatureAlloc(int num_sig=1,
- int sig_len=176);
-
- inline int classes() { return classes_; }
- inline int original_num_classes()
- { return original_num_classes_; }
-
- void setQuantization(int num_quant_bits);
- void discardFloatPosteriors();
-
- void read(const char* file_name);
- void read(std::istream &is);
- void write(const char* file_name) const;
- void write(std::ostream &os) const;
-
- std::vector<RandomizedTree> trees_;
-
- private:
- int classes_;
- int num_quant_bits_;
- uchar **posteriors_;
- ushort *ptemp_;
- int original_num_classes_;
- bool keep_floats_;
- };
-
-
-
-RTreeClassifier::train
---------------------------
-Trains a randomized tree classifier using an input set of keypoints.
-
-.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
-
-.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
-
- :param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
+ :param edgeThreshold: This is size of the border where the features are not detected. It should roughly match the ``patchSize`` parameter.
- :param rng: Random-number generator used for training.
+ :param firstLevel: It should be 0 in the current implementation.
- :param make_patch: Patch generator used for training.
+ :param WTA_K: The number of points that produce each element of the oriented BRIEF descriptor. The default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 random points (of course, those point coordinates are random, but they are generated from the pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, denoted as ``NORM_HAMMING2`` (2 bits per bin). When ``WTA_K=4``, we take 4 random points to compute each bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
- :param num_trees: Number of randomized trees used in ``RTreeClassificator`` .
+ :param scoreType: The default HARRIS_SCORE means that Harris algorithm is used to rank features (the score is written to ``KeyPoint::score`` and is used to retain best ``nfeatures`` features); FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints, but it is a little faster to compute.
- :param depth: Maximum tree depth.
+ :param patchSize: size of the patch used by the oriented BRIEF descriptor. Of course, on smaller pyramid layers the perceived image area covered by a feature will be larger.
- :param views: Number of random views of each keypoint neighborhood to generate.
+ORB::operator()
+---------------
+Finds keypoints in an image and computes their descriptors
+
+.. ocv:function:: void ORB::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false ) const
- :param reduced_num_dim: Number of dimensions used in the compressed signature.
+ :param image: The input 8-bit grayscale image.
- :param num_quant_bits: Number of bits used for quantization.
+ :param mask: The operation mask.
- :param print_status: Current status of training printed on the console.
-
-
-
-RTreeClassifier::getSignature
----------------------------------
-Returns a signature for an image patch.
-
-.. ocv:function:: void getSignature(IplImage *patch, uchar *sig)
-
-.. ocv:function:: void getSignature(IplImage *patch, float *sig)
-
- :param patch: Image patch to calculate the signature for.
- :param sig: Output signature (array dimension is ``reduced_num_dim)`` .
-
-
-
-RTreeClassifier::getSparseSignature
----------------------------------------
-Returns a sparse signature for an image patch
-
-.. ocv:function:: void getSparseSignature(IplImage *patch, float *sig, float thresh)
-
- :param patch: Image patch to calculate the signature for.
+ :param keypoints: The output vector of keypoints.
- :param sig: Output signature (array dimension is ``reduced_num_dim)`` .
+ :param descriptors: The output descriptors. Pass ``cv::noArray()`` if you do not need it.
- :param thresh: Threshold used for compressing the signature.
-
- Returns a signature for an image patch similarly to ``getSignature`` but uses a threshold for removing all signature elements below the threshold so that the signature is compressed.
-
-
-RTreeClassifier::countNonZeroElements
------------------------------------------
-Returns the number of non-zero elements in an input array.
-
-.. ocv:function:: static int countNonZeroElements(float *vec, int n, double tol=1e-10)
-
- :param vec: Input vector containing float elements.
-
- :param n: Input vector size.
-
- :param tol: Threshold used for counting elements. All elements less than ``tol`` are considered as zero elements.
-
-
-
-RTreeClassifier::read
--------------------------
-Reads a pre-saved ``RTreeClassifier`` from a file or stream.
-
-.. ocv:function:: read(const char* file_name)
-
-.. ocv:function:: read(std::istream& is)
-
- :param file_name: Name of the file that contains randomized tree data.
-
- :param is: Input stream associated with the file that contains randomized tree data.
-
-
-
-RTreeClassifier::write
---------------------------
-Writes the current ``RTreeClassifier`` to a file or stream.
-
-.. ocv:function:: void write(const char* file_name) const
-
-.. ocv:function:: void write(std::ostream &os) const
-
- :param file_name: Name of the file where randomized tree data is stored.
-
- :param os: Output stream associated with the file where randomized tree data is stored.
-
-
-
-RTreeClassifier::setQuantization
-------------------------------------
-Applies quantization to the current randomized tree.
-
-.. ocv:function:: void setQuantization(int num_quant_bits)
-
- :param num_quant_bits: Number of bits used for quantization.
-
-The example below demonstrates the usage of ``RTreeClassifier`` for matching the features. The features are extracted from the test and train images with SURF. Output is
-:math:`best\_corr` and
-:math:`best\_corr\_idx` arrays that keep the best probabilities and corresponding features indices for every train feature. ::
-
- CvMemStorage* storage = cvCreateMemStorage(0);
- CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
- CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
- CvSURFParams params = cvSURFParams(500, 1);
- cvExtractSURF( test_image, 0, &imageKeypoints, &imageDescriptors,
- storage, params );
- cvExtractSURF( train_image, 0, &objectKeypoints, &objectDescriptors,
- storage, params );
-
- RTreeClassifier detector;
- int patch_width = PATCH_SIZE;
- iint patch_height = PATCH_SIZE;
- vector<BaseKeypoint> base_set;
- int i=0;
- CvSURFPoint* point;
- for (i=0;i<(n_points > 0 ? n_points : objectKeypoints->total);i++)
- {
- point=(CvSURFPoint*)cvGetSeqElem(objectKeypoints,i);
- base_set.push_back(
- BaseKeypoint(point->pt.x,point->pt.y,train_image));
- }
-
- //Detector training
- RNG rng( cvGetTickCount() );
- PatchGenerator gen(0,255,2,false,0.7,1.3,-CV_PI/3,CV_PI/3,
- -CV_PI/3,CV_PI/3);
-
- printf("RTree Classifier training...n");
- detector.train(base_set,rng,gen,24,DEFAULT_DEPTH,2000,
- (int)base_set.size(), detector.DEFAULT_NUM_QUANT_BITS);
- printf("Donen");
-
- float* signature = new float[detector.original_num_classes()];
- float* best_corr;
- int* best_corr_idx;
- if (imageKeypoints->total > 0)
- {
- best_corr = new float[imageKeypoints->total];
- best_corr_idx = new int[imageKeypoints->total];
- }
-
- for(i=0; i < imageKeypoints->total; i++)
- {
- point=(CvSURFPoint*)cvGetSeqElem(imageKeypoints,i);
- int part_idx = -1;
- float prob = 0.0f;
-
- CvRect roi = cvRect((int)(point->pt.x) - patch_width/2,
- (int)(point->pt.y) - patch_height/2,
- patch_width, patch_height);
- cvSetImageROI(test_image, roi);
- roi = cvGetImageROI(test_image);
- if(roi.width != patch_width || roi.height != patch_height)
- {
- best_corr_idx[i] = part_idx;
- best_corr[i] = prob;
- }
- else
- {
- cvSetImageROI(test_image, roi);
- IplImage* roi_image =
- cvCreateImage(cvSize(roi.width, roi.height),
- test_image->depth, test_image->nChannels);
- cvCopy(test_image,roi_image);
-
- detector.getSignature(roi_image, signature);
- for (int j = 0; j< detector.original_num_classes();j++)
- {
- if (prob < signature[j])
- {
- part_idx = j;
- prob = signature[j];
- }
- }
-
- best_corr_idx[i] = part_idx;
- best_corr[i] = prob;
-
- if (roi_image)
- cvReleaseImage(&roi_image);
- }
- cvResetImageROI(test_image);
- }
+ :param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.
-..
PixelTestFn test_fn_;
};
+
/****************************************************************************************\
-* Distance *
+* Distance *
\****************************************************************************************/
+
template<typename T>
struct CV_EXPORTS Accumulator
{
template<class T>
struct CV_EXPORTS SL2
{
+ enum { normType = NORM_L2SQR };
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
-
+
ResultType operator()( const T* a, const T* b, int size ) const
{
return normL2Sqr<ValueType, ResultType>(a, b, size);
template<class T>
struct CV_EXPORTS L2
{
+ enum { normType = NORM_L2 };
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
-
+
ResultType operator()( const T* a, const T* b, int size ) const
{
return (ResultType)sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size));
template<class T>
struct CV_EXPORTS L1
{
+ enum { normType = NORM_L1 };
typedef T ValueType;
typedef typename Accumulator<T>::Type ResultType;
-
+
ResultType operator()( const T* a, const T* b, int size ) const
{
return normL1<ValueType, ResultType>(a, b, size);
*/
struct CV_EXPORTS Hamming
{
+ enum { normType = NORM_HAMMING };
typedef unsigned char ValueType;
typedef int ResultType;
-
+
/** this will count the bits in a ^ b
*/
ResultType operator()( const unsigned char* a, const unsigned char* b, int size ) const
template<int cellsize> struct CV_EXPORTS HammingMultilevel
{
+ enum { normType = NORM_HAMMING + (cellsize>1) };
typedef unsigned char ValueType;
typedef int ResultType;
{
return normHamming(a, b, size, cellsize);
}
-};
+};
/****************************************************************************************\
* DMatch *
Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExtractorType)
{
- if( descriptorExtractorType.find("Opponent") == 0)
+ if( descriptorExtractorType.find("Opponent") == 0 )
{
size_t pos = string("Opponent").size();
- return DescriptorExtractor::create(descriptorExtractorType.substr(pos));
+ string type = descriptorExtractorType.substr(pos);
+ return new OpponentColorDescriptorExtractor(DescriptorExtractor::create(type));
}
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
cp[1] < channelKeypoints[1].size() &&
cp[2] < channelKeypoints[2].size() )
{
- const int maxInitIdx = std::max( channelKeypoints[0][idxs[0][cp[0]]].class_id,
- std::max( channelKeypoints[1][idxs[1][cp[1]]].class_id,
- channelKeypoints[2][idxs[2][cp[2]]].class_id ) );
+ const int maxInitIdx = std::max( 0, std::max( channelKeypoints[0][idxs[0][cp[0]]].class_id,
+ std::max( channelKeypoints[1][idxs[1][cp[1]]].class_id,
+ channelKeypoints[2][idxs[2][cp[2]]].class_id ) ) );
while( channelKeypoints[0][idxs[0][cp[0]]].class_id < maxInitIdx && cp[0] < channelKeypoints[0].size() ) { cp[0]++; }
while( channelKeypoints[1][idxs[1][cp[1]]].class_id < maxInitIdx && cp[1] < channelKeypoints[1].size() ) { cp[1]++; }
}
}
-static Algorithm* createGFTT() { return new GFTTDetector; }
-static Algorithm* createHarris()
-{
- GFTTDetector* d = new GFTTDetector;
- d->set("useHarris", true);
- return d;
-}
-
-static AlgorithmInfo gftt_info("Feature2D.GFTT", createGFTT);
-static AlgorithmInfo harris_info("Feature2D.HARRIS", createHarris);
-
-AlgorithmInfo* GFTTDetector::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- GFTTDetector obj;
- gftt_info.addParam(obj, "nfeatures", obj.nfeatures);
- gftt_info.addParam(obj, "qualityLevel", obj.qualityLevel);
- gftt_info.addParam(obj, "minDistance", obj.minDistance);
- gftt_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
- gftt_info.addParam(obj, "k", obj.k);
-
- harris_info.addParam(obj, "nfeatures", obj.nfeatures);
- harris_info.addParam(obj, "qualityLevel", obj.qualityLevel);
- harris_info.addParam(obj, "minDistance", obj.minDistance);
- harris_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
- harris_info.addParam(obj, "k", obj.k);
-
- initialized = true;
- }
- return &gftt_info;
-}
-
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
-
-
-static Algorithm* createDense() { return new DenseFeatureDetector; }
-static AlgorithmInfo dense_info("Feature2D.Dense", createDense);
-
-AlgorithmInfo* DenseFeatureDetector::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- DenseFeatureDetector obj;
- dense_info.addParam(obj, "initFeatureScale", obj.initFeatureScale);
- dense_info.addParam(obj, "featureScaleLevels", obj.featureScaleLevels);
- dense_info.addParam(obj, "featureScaleMul", obj.featureScaleMul);
- dense_info.addParam(obj, "initXyStep", obj.initXyStep);
- dense_info.addParam(obj, "initImgBound", obj.initImgBound);
- dense_info.addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
- dense_info.addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale);
-
- initialized = true;
- }
- return &dense_info;
-}
/*
* GridAdaptedFeatureDetector
if( !mask.empty() )
KeyPointsFilter::runByPixelsMask( keypoints, mask );
}
-
-
-/////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
-
-/* NOTE!!!
- All the AlgorithmInfo-related stuff should be in the same file as initModule_features2d().
- Otherwise, linker may throw away some seemingly unused stuff.
-*/
-
-static Algorithm* createBRIEF() { return new BriefDescriptorExtractor; }
-static AlgorithmInfo& brief_info()
-{
- static AlgorithmInfo brief_info_var("Feature2D.BRIEF", createBRIEF);
- return brief_info_var;
-}
-
-static AlgorithmInfo& brief_info_auto = brief_info();
-
-AlgorithmInfo* BriefDescriptorExtractor::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- BriefDescriptorExtractor brief;
- brief_info().addParam(brief, "bytes", brief.bytes_);
-
- initialized = true;
- }
- return &brief_info();
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-static Algorithm* createFAST() { return new FastFeatureDetector; }
-static AlgorithmInfo& fast_info()
-{
- static AlgorithmInfo fast_info_var("Feature2D.FAST", createFAST);
- return fast_info_var;
-}
-
-static AlgorithmInfo& fast_info_auto = fast_info();
-
-AlgorithmInfo* FastFeatureDetector::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- FastFeatureDetector obj;
- fast_info().addParam(obj, "threshold", obj.threshold);
- fast_info().addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
-
- initialized = true;
- }
- return &fast_info();
-}
-
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-static Algorithm* createStarDetector() { return new StarDetector; }
-static AlgorithmInfo& star_info()
-{
- static AlgorithmInfo star_info_var("Feature2D.STAR", createStarDetector);
- return star_info_var;
-}
-
-static AlgorithmInfo& star_info_auto = star_info();
-
-AlgorithmInfo* StarDetector::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- StarDetector obj;
- star_info().addParam(obj, "maxSize", obj.maxSize);
- star_info().addParam(obj, "responseThreshold", obj.responseThreshold);
- star_info().addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
- star_info().addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
- star_info().addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize);
-
- initialized = true;
- }
- return &star_info();
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-static Algorithm* createMSER() { return new MSER; }
-static AlgorithmInfo& mser_info()
-{
- static AlgorithmInfo mser_info_var("Feature2D.MSER", createMSER);
- return mser_info_var;
-}
-static AlgorithmInfo& mser_info_auto = mser_info();
-AlgorithmInfo* MSER::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- MSER obj;
- mser_info().addParam(obj, "delta", obj.delta);
- mser_info().addParam(obj, "minArea", obj.minArea);
- mser_info().addParam(obj, "maxArea", obj.maxArea);
- mser_info().addParam(obj, "maxVariation", obj.maxVariation);
- mser_info().addParam(obj, "minDiversity", obj.minDiversity);
- mser_info().addParam(obj, "maxEvolution", obj.maxEvolution);
- mser_info().addParam(obj, "areaThreshold", obj.areaThreshold);
- mser_info().addParam(obj, "minMargin", obj.minMargin);
- mser_info().addParam(obj, "edgeBlurSize", obj.edgeBlurSize);
-
- initialized = true;
- }
- return &mser_info();
-}
-
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-static Algorithm* createORB() { return new ORB; }
-static AlgorithmInfo& orb_info()
-{
- static AlgorithmInfo orb_info_var("Feature2D.ORB", createORB);
- return orb_info_var;
-}
-
-static AlgorithmInfo& orb_info_auto = orb_info();
-
-AlgorithmInfo* ORB::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- ORB obj;
- orb_info().addParam(obj, "nFeatures", obj.nfeatures);
- orb_info().addParam(obj, "scaleFactor", obj.scaleFactor);
- orb_info().addParam(obj, "nLevels", obj.nlevels);
- orb_info().addParam(obj, "firstLevel", obj.firstLevel);
- orb_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
- orb_info().addParam(obj, "patchSize", obj.patchSize);
- orb_info().addParam(obj, "WTA_K", obj.WTA_K);
- orb_info().addParam(obj, "scoreType", obj.scoreType);
-
- initialized = true;
- }
- return &orb_info();
-}
-
-bool initModule_features2d(void)
-{
- Ptr<Algorithm> brief = createBRIEF(), orb = createORB(),
- star = createStarDetector(), fastd = createFAST(), mser = createMSER();
- return brief->info() != 0 && orb->info() != 0 && star->info() != 0 &&
- fastd->info() != 0 && mser->info() != 0;
-}
-
}
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "precomp.hpp"
+
+namespace cv
+{
+
+/////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
+
+/* NOTE!!!
+ All the AlgorithmInfo-related stuff should be in the same file as initModule_features2d().
+ Otherwise, linker may throw away some seemingly unused stuff.
+*/
+
+static Algorithm* createBRIEF() { return new BriefDescriptorExtractor; }
+static AlgorithmInfo& brief_info()
+{
+ static AlgorithmInfo brief_info_var("Feature2D.BRIEF", createBRIEF);
+ return brief_info_var;
+}
+
+static AlgorithmInfo& brief_info_auto = brief_info();
+
+AlgorithmInfo* BriefDescriptorExtractor::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ BriefDescriptorExtractor brief;
+ brief_info().addParam(brief, "bytes", brief.bytes_);
+
+ initialized = true;
+ }
+ return &brief_info();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static Algorithm* createFAST() { return new FastFeatureDetector; }
+static AlgorithmInfo& fast_info()
+{
+ static AlgorithmInfo fast_info_var("Feature2D.FAST", createFAST);
+ return fast_info_var;
+}
+
+static AlgorithmInfo& fast_info_auto = fast_info();
+
+AlgorithmInfo* FastFeatureDetector::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ FastFeatureDetector obj;
+ fast_info().addParam(obj, "threshold", obj.threshold);
+ fast_info().addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
+
+ initialized = true;
+ }
+ return &fast_info();
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static Algorithm* createStarDetector() { return new StarDetector; }
+static AlgorithmInfo& star_info()
+{
+ static AlgorithmInfo star_info_var("Feature2D.STAR", createStarDetector);
+ return star_info_var;
+}
+
+static AlgorithmInfo& star_info_auto = star_info();
+
+AlgorithmInfo* StarDetector::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ StarDetector obj;
+ star_info().addParam(obj, "maxSize", obj.maxSize);
+ star_info().addParam(obj, "responseThreshold", obj.responseThreshold);
+ star_info().addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
+ star_info().addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
+ star_info().addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize);
+
+ initialized = true;
+ }
+ return &star_info();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static Algorithm* createMSER() { return new MSER; }
+static AlgorithmInfo& mser_info()
+{
+ static AlgorithmInfo mser_info_var("Feature2D.MSER", createMSER);
+ return mser_info_var;
+}
+
+static AlgorithmInfo& mser_info_auto = mser_info();
+
+AlgorithmInfo* MSER::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ MSER obj;
+ mser_info().addParam(obj, "delta", obj.delta);
+ mser_info().addParam(obj, "minArea", obj.minArea);
+ mser_info().addParam(obj, "maxArea", obj.maxArea);
+ mser_info().addParam(obj, "maxVariation", obj.maxVariation);
+ mser_info().addParam(obj, "minDiversity", obj.minDiversity);
+ mser_info().addParam(obj, "maxEvolution", obj.maxEvolution);
+ mser_info().addParam(obj, "areaThreshold", obj.areaThreshold);
+ mser_info().addParam(obj, "minMargin", obj.minMargin);
+ mser_info().addParam(obj, "edgeBlurSize", obj.edgeBlurSize);
+
+ initialized = true;
+ }
+ return &mser_info();
+}
+
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static Algorithm* createORB() { return new ORB; }
+static AlgorithmInfo& orb_info()
+{
+ static AlgorithmInfo orb_info_var("Feature2D.ORB", createORB);
+ return orb_info_var;
+}
+
+static AlgorithmInfo& orb_info_auto = orb_info();
+
+AlgorithmInfo* ORB::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ ORB obj;
+ orb_info().addParam(obj, "nFeatures", obj.nfeatures);
+ orb_info().addParam(obj, "scaleFactor", obj.scaleFactor);
+ orb_info().addParam(obj, "nLevels", obj.nlevels);
+ orb_info().addParam(obj, "firstLevel", obj.firstLevel);
+ orb_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
+ orb_info().addParam(obj, "patchSize", obj.patchSize);
+ orb_info().addParam(obj, "WTA_K", obj.WTA_K);
+ orb_info().addParam(obj, "scoreType", obj.scoreType);
+
+ initialized = true;
+ }
+ return &orb_info();
+}
+
+static Algorithm* createGFTT() { return new GFTTDetector; }
+static Algorithm* createHarris()
+{
+ GFTTDetector* d = new GFTTDetector;
+ d->set("useHarris", true);
+ return d;
+}
+
+static AlgorithmInfo gftt_info("Feature2D.GFTT", createGFTT);
+static AlgorithmInfo harris_info("Feature2D.HARRIS", createHarris);
+
+AlgorithmInfo* GFTTDetector::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ GFTTDetector obj;
+ gftt_info.addParam(obj, "nfeatures", obj.nfeatures);
+ gftt_info.addParam(obj, "qualityLevel", obj.qualityLevel);
+ gftt_info.addParam(obj, "minDistance", obj.minDistance);
+ gftt_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
+ gftt_info.addParam(obj, "k", obj.k);
+
+ harris_info.addParam(obj, "nfeatures", obj.nfeatures);
+ harris_info.addParam(obj, "qualityLevel", obj.qualityLevel);
+ harris_info.addParam(obj, "minDistance", obj.minDistance);
+ harris_info.addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
+ harris_info.addParam(obj, "k", obj.k);
+
+ initialized = true;
+ }
+ return &gftt_info;
+}
+
+static Algorithm* createDense() { return new DenseFeatureDetector; }
+static AlgorithmInfo dense_info("Feature2D.Dense", createDense);
+
+AlgorithmInfo* DenseFeatureDetector::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ DenseFeatureDetector obj;
+ dense_info.addParam(obj, "initFeatureScale", obj.initFeatureScale);
+ dense_info.addParam(obj, "featureScaleLevels", obj.featureScaleLevels);
+ dense_info.addParam(obj, "featureScaleMul", obj.featureScaleMul);
+ dense_info.addParam(obj, "initXyStep", obj.initXyStep);
+ dense_info.addParam(obj, "initImgBound", obj.initImgBound);
+ dense_info.addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
+ dense_info.addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale);
+
+ initialized = true;
+ }
+ return &dense_info;
+}
+
+bool initModule_features2d(void)
+{
+ Ptr<Algorithm> brief = createBRIEF(), orb = createORB(),
+ star = createStarDetector(), fastd = createFAST(), mser = createMSER(),
+ dense = createDense(), gftt = createGFTT(), harris = createHarris();
+
+ return brief->info() != 0 && orb->info() != 0 && star->info() != 0 &&
+ fastd->info() != 0 && mser->info() != 0 && dense->info() != 0 &&
+ gftt->info() != 0 && harris->info() != 0;
+}
+
+}
+
+++ /dev/null
-#include "test_precomp.hpp"
-
-#if 0
-using namespace cv;
-
-class BruteForceMatcherTest : public cvtest::BaseTest
-{
-public:
- BruteForceMatcherTest();
-protected:
- void run( int );
-};
-
-struct CV_EXPORTS L2Fake : public L2<float>
-{
-};
-
-BruteForceMatcherTest::BruteForceMatcherTest() : cvtest::BaseTest( "BruteForceMatcher", "BruteForceMatcher::matchImpl")
-{
- support_testing_modes = cvtest::TS::TIMING_MODE;
-}
-
-void BruteForceMatcherTest::run( int )
-{
- const int dimensions = 64;
- const int descriptorsNumber = 5000;
-
- Mat train = Mat( descriptorsNumber, dimensions, CV_32FC1);
- Mat query = Mat( descriptorsNumber, dimensions, CV_32FC1);
-
- Mat permutation( 1, descriptorsNumber, CV_32SC1 );
- for( int i=0;i<descriptorsNumber;i++ )
- permutation.at<int>( 0, i ) = i;
-
- //RNG rng = RNG( cvGetTickCount() );
- RNG rng = RNG( *ts->get_rng() );
- randShuffle( permutation, 1, &rng );
-
- float boundary = 500.f;
- for( int row=0;row<descriptorsNumber;row++ )
- {
- for( int col=0;col<dimensions;col++ )
- {
- int bit = rng( 2 );
- train.at<float>( permutation.at<int>( 0, row ), col ) = bit*boundary + rng.uniform( 0.f, boundary );
- query.at<float>( row, col ) = bit*boundary + rng.uniform( 0.f, boundary );
- }
- }
-
- vector<DMatch> specMatches, genericMatches;
- BruteForceMatcher<L2<float> > specMatcher;
- BruteForceMatcher<L2Fake > genericMatcher;
-
- int64 time0 = cvGetTickCount();
- specMatcher.match( query, train, specMatches );
- int64 time1 = cvGetTickCount();
- genericMatcher.match( query, train, genericMatches );
- int64 time2 = cvGetTickCount();
-
- float specMatcherTime = float(time1 - time0)/(float)cvGetTickFrequency();
- ts->printf( cvtest::TS::LOG, "Matching by matrix multiplication time s: %f, us per pair: %f\n",
- specMatcherTime*1e-6, specMatcherTime/( descriptorsNumber*descriptorsNumber ) );
-
- float genericMatcherTime = float(time2 - time1)/(float)cvGetTickFrequency();
- ts->printf( cvtest::TS::LOG, "Matching without matrix multiplication time s: %f, us per pair: %f\n",
- genericMatcherTime*1e-6, genericMatcherTime/( descriptorsNumber*descriptorsNumber ) );
-
- if( (int)specMatches.size() != descriptorsNumber || (int)genericMatches.size() != descriptorsNumber )
- ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
- for( int i=0;i<descriptorsNumber;i++ )
- {
- float epsilon = 0.01f;
- bool isEquiv = fabs( specMatches[i].distance - genericMatches[i].distance ) < epsilon &&
- specMatches[i].queryIdx == genericMatches[i].queryIdx &&
- specMatches[i].trainIdx == genericMatches[i].trainIdx;
- if( !isEquiv || specMatches[i].trainIdx != permutation.at<int>( 0, i ) )
- {
- ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
- break;
- }
- }
-
-
- //Test mask
- Mat mask( query.rows, train.rows, CV_8UC1 );
- rng.fill( mask, RNG::UNIFORM, 0, 2 );
-
-
- time0 = cvGetTickCount();
- specMatcher.match( query, train, specMatches, mask );
- time1 = cvGetTickCount();
- genericMatcher.match( query, train, genericMatches, mask );
- time2 = cvGetTickCount();
-
- specMatcherTime = float(time1 - time0)/(float)cvGetTickFrequency();
- ts->printf( cvtest::TS::LOG, "Matching by matrix multiplication time with mask s: %f, us per pair: %f\n",
- specMatcherTime*1e-6, specMatcherTime/( descriptorsNumber*descriptorsNumber ) );
-
- genericMatcherTime = float(time2 - time1)/(float)cvGetTickFrequency();
- ts->printf( cvtest::TS::LOG, "Matching without matrix multiplication time with mask s: %f, us per pair: %f\n",
- genericMatcherTime*1e-6, genericMatcherTime/( descriptorsNumber*descriptorsNumber ) );
-
- if( specMatches.size() != genericMatches.size() )
- ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-
- for( size_t i=0;i<specMatches.size();i++ )
- {
- //float epsilon = 1e-2;
- float epsilon = 10000000;
- bool isEquiv = fabs( specMatches[i].distance - genericMatches[i].distance ) < epsilon &&
- specMatches[i].queryIdx == genericMatches[i].queryIdx &&
- specMatches[i].trainIdx == genericMatches[i].trainIdx;
- if( !isEquiv )
- {
- ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
- break;
- }
- }
-}
-
-BruteForceMatcherTest taBruteForceMatcherTest;
-#endif
test.safe_run();
}
-/*TEST( Features2d_DescriptorExtractor_OpponentSIFT, regression )
-{
- CV_DescriptorExtractorTest<L2<float> > test( "descriptor-opponent-sift", 0.18f,
- DescriptorExtractor::create("OpponentSIFT"), 8.06652f );
- test.safe_run();
-}*/
-
#if CV_SSE2
TEST( Features2d_DescriptorExtractor_Calonder_uchar, regression )
{
return *(cvflann::IndexParams*)(p.params);
}
+cv::flann::IndexParams::~IndexParams()
+{
+ delete &get_params(*this);
+}
+
namespace cv
{
{
params = new ::cvflann::IndexParams();
}
-
-IndexParams::~IndexParams()
-{
- delete &get_params(*this);
-}
template<typename T>
T getParam(const IndexParams& _p, const std::string& key, const T& defaultVal=T())
useInitialFlow = false;\r
minEigThreshold = 1e-4f;\r
getMinEigenVals = false;\r
+ isDeviceArch11_ = !DeviceInfo().supports(FEATURE_SET_COMPUTE_12);\r
}\r
\r
void sparse(const GpuMat& prevImg, const GpuMat& nextImg, const GpuMat& prevPts, GpuMat& nextPts,\r
\r
vector<GpuMat> uPyr_;\r
vector<GpuMat> vPyr_;\r
+\r
+ bool isDeviceArch11_;\r
};\r
\r
\r
polyN = 5;\r
polySigma = 1.1;\r
flags = 0;\r
+ isDeviceArch11_ = !DeviceInfo().supports(FEATURE_SET_COMPUTE_12);\r
}\r
\r
int numLevels;\r
GpuMat frames_[2];\r
GpuMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2];\r
std::vector<GpuMat> pyramid0_, pyramid1_;\r
+\r
+ bool isDeviceArch11_;\r
};\r
\r
\r
}
+ void boxFilter5Gpu_CC11(const DevMem2Df src, int ksizeHalf, DevMem2Df dst, cudaStream_t stream)
+ {
+ int height = src.rows / 5;
+ int width = src.cols;
+
+ dim3 block(128);
+ dim3 grid(divUp(width, block.x), divUp(height, block.y));
+ int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float);
+
+ float boxAreaInv = 1.f / ((1 + 2*ksizeHalf) * (1 + 2*ksizeHalf));
+ boxFilter5<<<grid, block, smem, stream>>>(height, width, src, ksizeHalf, boxAreaInv, dst);
+
+ cudaSafeCall(cudaGetLastError());
+
+ if (stream == 0)
+ cudaSafeCall(cudaDeviceSynchronize());
+ }
+
+
__constant__ float c_gKer[MAX_KSIZE_HALF + 1];
template <typename Border>
}
- template <typename Border>
+ template <typename Border, int blockDimX>
void gaussianBlur5Caller(
const DevMem2Df src, int ksizeHalf, DevMem2Df dst, cudaStream_t stream)
{
int height = src.rows / 5;
int width = src.cols;
- dim3 block(256);
+ dim3 block(blockDimX);
dim3 grid(divUp(width, block.x), divUp(height, block.y));
int smem = (block.x + 2*ksizeHalf) * 5 * block.y * sizeof(float);
Border b(height, width);
static const caller_t callers[] =
{
- gaussianBlur5Caller<BrdReflect101<float> >,
- gaussianBlur5Caller<BrdReplicate<float> >,
+ gaussianBlur5Caller<BrdReflect101<float>,256>,
+ gaussianBlur5Caller<BrdReplicate<float>,256>,
};
callers[borderMode](src, ksizeHalf, dst, stream);
- }
+ }
+
+ void gaussianBlur5Gpu_CC11(
+ const DevMem2Df src, int ksizeHalf, DevMem2Df dst, int borderMode, cudaStream_t stream)
+ {
+ typedef void (*caller_t)(const DevMem2Df, int, DevMem2Df, cudaStream_t);
+
+ static const caller_t callers[] =
+ {
+ gaussianBlur5Caller<BrdReflect101<float>,128>,
+ gaussianBlur5Caller<BrdReplicate<float>,128>,
+ };
+
+ callers[borderMode](src, ksizeHalf, dst, stream);
+ }
}}}} // namespace cv { namespace gpu { namespace device { namespace optflow_farneback
smem3[tid] = val3;\r
__syncthreads();\r
\r
+#if __CUDA_ARCH__ > 110\r
if (tid < 128) \r
{ \r
smem1[tid] = val1 += smem1[tid + 128]; \r
smem3[tid] = val3 += smem3[tid + 128]; \r
} \r
__syncthreads();\r
+#endif\r
\r
if (tid < 64) \r
{ \r
smem2[tid] = val2;\r
__syncthreads();\r
\r
+#if __CUDA_ARCH__ > 110\r
if (tid < 128) \r
{ \r
smem1[tid] = val1 += smem1[tid + 128]; \r
smem2[tid] = val2 += smem2[tid + 128]; \r
} \r
__syncthreads();\r
+#endif\r
\r
if (tid < 64) \r
{ \r
smem1[tid] = val1;\r
__syncthreads();\r
\r
+#if __CUDA_ARCH__ > 110\r
if (tid < 128) \r
{ \r
smem1[tid] = val1 += smem1[tid + 128]; \r
} \r
__syncthreads();\r
+#endif\r
\r
if (tid < 64) \r
{ \r
__global__ void lkSparse(const PtrStepb I, const PtrStepb J, const PtrStep<short> dIdx, const PtrStep<short> dIdy,\r
const float2* prevPts, float2* nextPts, uchar* status, float* err, const int level, const int rows, const int cols)\r
{\r
+#if __CUDA_ARCH__ <= 110\r
+ __shared__ float smem1[128];\r
+ __shared__ float smem2[128];\r
+ __shared__ float smem3[128];\r
+#else\r
__shared__ float smem1[256];\r
__shared__ float smem2[256];\r
__shared__ float smem3[256];\r
+#endif\r
\r
const int tid = threadIdx.y * blockDim.x + threadIdx.x;\r
\r
///////////////////////////////////////////////////////////////////////////////\r
__global__ void scaleVector(float *d_res, const float *d_src, float scale, const int len)\r
{\r
- const int pos = blockIdx.x * blockDim.x + threadIdx.x;\r
- \r
- if (pos >= len) return;\r
- \r
- d_res[pos] = d_src[pos] * scale;\r
+ const int pos = blockIdx.x * blockDim.x + threadIdx.x;\r
+\r
+ if (pos >= len) return;\r
+\r
+ d_res[pos] = d_src[pos] * scale;\r
}\r
\r
///////////////////////////////////////////////////////////////////////////////\r
///////////////////////////////////////////////////////////////////////////////\r
static void ScaleVector(float *d_res, const float *d_src, float scale, const int len, cudaStream_t stream)\r
{\r
- dim3 threads(256);\r
- dim3 blocks(iDivUp(len, threads.x));\r
- \r
- scaleVector<<<blocks, threads, 0, stream>>>(d_res, d_src, scale, len);\r
+ dim3 threads(256);\r
+ dim3 blocks(iDivUp(len, threads.x));\r
+\r
+ scaleVector<<<blocks, threads, 0, stream>>>(d_res, d_src, scale, len);\r
}\r
\r
const int SOR_TILE_WIDTH = 32;\r
\r
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrU->ptr(), srcSize, kLevelStride * sizeof (float), srcROI, \r
ptrUNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) );\r
- \r
- ScaleVector(ptrUNew->ptr(), ptrUNew->ptr(), 1.0f/scale_factor, ns * nh, stream);\r
+\r
+ ScaleVector(ptrUNew->ptr(), ptrUNew->ptr(), 1.0f/scale_factor, ns * nh, stream);\r
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);\r
\r
ncvAssertReturnNcvStat( nppiStResize_32f_C1R (ptrV->ptr(), srcSize, kLevelStride * sizeof (float), srcROI, \r
ptrVNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) );\r
- \r
- ScaleVector(ptrVNew->ptr(), ptrVNew->ptr(), 1.0f/scale_factor, ns * nh, stream);\r
+\r
+ ScaleVector(ptrVNew->ptr(), ptrVNew->ptr(), 1.0f/scale_factor, ns * nh, stream);\r
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);\r
\r
cv::gpu::device::swap<FloatVector*>(ptrU, ptrUNew);\r
wsum += wx;\r
}\r
}\r
- dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = sum / wsum;\r
+ dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum;\r
}\r
\r
\r
void boxFilter5Gpu(const DevMem2Df src, int ksizeHalf, DevMem2Df dst, cudaStream_t stream);
+ void boxFilter5Gpu_CC11(const DevMem2Df src, int ksizeHalf, DevMem2Df dst, cudaStream_t stream);
+
void setGaussianBlurKernel(const float *gKer, int ksizeHalf);
void gaussianBlurGpu(
void gaussianBlur5Gpu(
const DevMem2Df src, int ksizeHalf, DevMem2Df dst, int borderType, cudaStream_t stream);
+ void gaussianBlur5Gpu_CC11(
+ const DevMem2Df src, int ksizeHalf, DevMem2Df dst, int borderType, cudaStream_t stream);
+
}}}} // namespace cv { namespace gpu { namespace device { namespace optflow_farneback
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat &flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[])
{
- device::optflow_farneback::boxFilter5Gpu(M, blockSize/2, bufM, S(streams[0]));
+ if (!isDeviceArch11_)
+ device::optflow_farneback::boxFilter5Gpu(M, blockSize/2, bufM, S(streams[0]));
+ else
+ device::optflow_farneback::boxFilter5Gpu_CC11(M, blockSize/2, bufM, S(streams[0]));
swap(M, bufM);
for (int i = 1; i < 5; ++i)
const GpuMat& R0, const GpuMat& R1, GpuMat& flowx, GpuMat& flowy,
GpuMat& M, GpuMat &bufM, int blockSize, bool updateMatrices, Stream streams[])
{
- device::optflow_farneback::gaussianBlur5Gpu(
- M, blockSize/2, bufM, BORDER_REPLICATE_GPU, S(streams[0]));
+ if (!isDeviceArch11_)
+ device::optflow_farneback::gaussianBlur5Gpu(
+ M, blockSize/2, bufM, BORDER_REPLICATE_GPU, S(streams[0]));
+ else
+ device::optflow_farneback::gaussianBlur5Gpu_CC11(
+ M, blockSize/2, bufM, BORDER_REPLICATE_GPU, S(streams[0]));
swap(M, bufM);
device::optflow_farneback::updateFlowGpu(M, flowx, flowy, S(streams[0]));
if (keyPointsCount_[level] == 0)\r
continue;\r
\r
+ if (keyPointsCount_[level] == 0)\r
+ continue;\r
+\r
GpuMat descRange = descriptors.rowRange(offset, offset + keyPointsCount_[level]);\r
\r
if (blurForDescriptor)\r
-/*M///////////////////////////////////////////////////////////////////////////////////////\r
-//\r
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
-//\r
-// By downloading, copying, installing or using the software you agree to this license.\r
-// If you do not agree to this license, do not download, install,\r
-// copy or use the software.\r
-//\r
-//\r
-// Intel License Agreement\r
-// For Open Source Computer Vision Library\r
-//\r
-// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
-// Third party copyrights are property of their respective owners.\r
-//\r
-// Redistribution and use in source and binary forms, with or without modification,\r
-// are permitted provided that the following conditions are met:\r
-//\r
-// * Redistribution's of source code must retain the above copyright notice,\r
-// this list of conditions and the following disclaimer.\r
-//\r
-// * Redistribution's in binary form must reproduce the above copyright notice,\r
-// this list of conditions and the following disclaimer in the documentation\r
-// and/or other materials provided with the distribution.\r
-//\r
-// * The name of Intel Corporation may not be used to endorse or promote products\r
-// derived from this software without specific prior written permission.\r
-//\r
-// This software is provided by the copyright holders and contributors "as is" and\r
-// any express or implied warranties, including, but not limited to, the implied\r
-// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
-// In no event shall the Intel Corporation or contributors be liable for any direct,\r
-// indirect, incidental, special, exemplary, or consequential damages\r
-// (including, but not limited to, procurement of substitute goods or services;\r
-// loss of use, data, or profits; or business interruption) however caused\r
-// and on any theory of liability, whether in contract, strict liability,\r
-// or tort (including negligence or otherwise) arising in any way out of\r
-// the use of this software, even if advised of the possibility of such damage.\r
-//\r
-//M*/\r
-\r
-#include "precomp.hpp"\r
-\r
-namespace {\r
-\r
-//#define DUMP\r
-\r
-/////////////////////////////////////////////////////////////////////////////////////////////////\r
-// BroxOpticalFlow\r
-\r
-#define BROX_OPTICAL_FLOW_DUMP_FILE "opticalflow/brox_optical_flow.bin"\r
-#define BROX_OPTICAL_FLOW_DUMP_FILE_CC20 "opticalflow/brox_optical_flow_cc20.bin"\r
-\r
-struct BroxOpticalFlow : testing::TestWithParam<cv::gpu::DeviceInfo>\r
-{\r
- cv::gpu::DeviceInfo devInfo;\r
-\r
- virtual void SetUp()\r
- {\r
- devInfo = GetParam();\r
-\r
- cv::gpu::setDevice(devInfo.deviceID());\r
- }\r
-};\r
-\r
-TEST_P(BroxOpticalFlow, Regression)\r
-{\r
- cv::Mat frame0 = readImageType("opticalflow/frame0.png", CV_32FC1);\r
- ASSERT_FALSE(frame0.empty());\r
-\r
- cv::Mat frame1 = readImageType("opticalflow/frame1.png", CV_32FC1);\r
- ASSERT_FALSE(frame1.empty());\r
-\r
- cv::gpu::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,\r
- 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);\r
-\r
- cv::gpu::GpuMat u;\r
- cv::gpu::GpuMat v;\r
- brox(loadMat(frame0), loadMat(frame1), u, v);\r
-\r
-#ifndef DUMP\r
- std::string fname(cvtest::TS::ptr()->get_data_path());\r
- if (devInfo.majorVersion() >= 2)\r
- fname += BROX_OPTICAL_FLOW_DUMP_FILE_CC20;\r
- else\r
- fname += BROX_OPTICAL_FLOW_DUMP_FILE;\r
-\r
- std::ifstream f(fname.c_str(), std::ios_base::binary);\r
-\r
- int rows, cols;\r
-\r
- f.read((char*)&rows, sizeof(rows));\r
- f.read((char*)&cols, sizeof(cols));\r
-\r
- cv::Mat u_gold(rows, cols, CV_32FC1);\r
-\r
- for (int i = 0; i < u_gold.rows; ++i)\r
- f.read(u_gold.ptr<char>(i), u_gold.cols * sizeof(float));\r
-\r
- cv::Mat v_gold(rows, cols, CV_32FC1);\r
-\r
- for (int i = 0; i < v_gold.rows; ++i)\r
- f.read(v_gold.ptr<char>(i), v_gold.cols * sizeof(float));\r
-\r
- EXPECT_MAT_NEAR(u_gold, u, 0);\r
- EXPECT_MAT_NEAR(v_gold, v, 0);\r
-#else\r
- std::string fname(cvtest::TS::ptr()->get_data_path());\r
- if (devInfo.majorVersion() >= 2)\r
- fname += BROX_OPTICAL_FLOW_DUMP_FILE_CC20;\r
- else\r
- fname += BROX_OPTICAL_FLOW_DUMP_FILE;\r
-\r
- std::ofstream f(fname.c_str(), std::ios_base::binary);\r
-\r
- f.write((char*)&u.rows, sizeof(u.rows));\r
- f.write((char*)&u.cols, sizeof(u.cols));\r
-\r
- cv::Mat h_u(u);\r
- cv::Mat h_v(v);\r
-\r
- for (int i = 0; i < u.rows; ++i)\r
- f.write(h_u.ptr<char>(i), u.cols * sizeof(float));\r
-\r
- for (int i = 0; i < v.rows; ++i)\r
- f.write(h_v.ptr<char>(i), v.cols * sizeof(float));\r
-\r
-#endif\r
-}\r
-\r
-INSTANTIATE_TEST_CASE_P(GPU_Video, BroxOpticalFlow, ALL_DEVICES);\r
-\r
-/////////////////////////////////////////////////////////////////////////////////////////////////\r
-// GoodFeaturesToTrack\r
-\r
-IMPLEMENT_PARAM_CLASS(MinDistance, double)\r
-\r
-PARAM_TEST_CASE(GoodFeaturesToTrack, cv::gpu::DeviceInfo, MinDistance)\r
-{\r
- cv::gpu::DeviceInfo devInfo;\r
- double minDistance;\r
-\r
- virtual void SetUp()\r
- {\r
- devInfo = GET_PARAM(0);\r
- minDistance = GET_PARAM(1);\r
-\r
- cv::gpu::setDevice(devInfo.deviceID());\r
- }\r
-};\r
-\r
-TEST_P(GoodFeaturesToTrack, Accuracy)\r
-{\r
- cv::Mat image = readImage("opticalflow/frame0.png", cv::IMREAD_GRAYSCALE);\r
- ASSERT_FALSE(image.empty());\r
-\r
- int maxCorners = 1000;\r
- double qualityLevel = 0.01;\r
-\r
- cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);\r
-\r
- if (!supportFeature(devInfo, cv::gpu::GLOBAL_ATOMICS))\r
- {\r
- try\r
- {\r
- cv::gpu::GpuMat d_pts;\r
- detector(loadMat(image), d_pts);\r
- }\r
- catch (const cv::Exception& e)\r
- {\r
- ASSERT_EQ(CV_StsNotImplemented, e.code);\r
- }\r
- }\r
- else\r
- {\r
- cv::gpu::GpuMat d_pts;\r
- detector(loadMat(image), d_pts);\r
-\r
- std::vector<cv::Point2f> pts(d_pts.cols);\r
- cv::Mat pts_mat(1, d_pts.cols, CV_32FC2, (void*)&pts[0]);\r
- d_pts.download(pts_mat);\r
-\r
- std::vector<cv::Point2f> pts_gold;\r
- cv::goodFeaturesToTrack(image, pts_gold, maxCorners, qualityLevel, minDistance);\r
-\r
- ASSERT_EQ(pts_gold.size(), pts.size());\r
-\r
- size_t mistmatch = 0;\r
- for (size_t i = 0; i < pts.size(); ++i)\r
- {\r
- cv::Point2i a = pts_gold[i];\r
- cv::Point2i b = pts[i];\r
-\r
- bool eq = std::abs(a.x - b.x) < 1 && std::abs(a.y - b.y) < 1;\r
-\r
- if (!eq)\r
- ++mistmatch;\r
- }\r
-\r
- double bad_ratio = static_cast<double>(mistmatch) / pts.size();\r
-\r
- ASSERT_LE(bad_ratio, 0.01);\r
- }\r
-}\r
-\r
-INSTANTIATE_TEST_CASE_P(GPU_Video, GoodFeaturesToTrack, testing::Combine(\r
- ALL_DEVICES,\r
- testing::Values(MinDistance(0.0), MinDistance(3.0))));\r
-\r
-/////////////////////////////////////////////////////////////////////////////////////////////////\r
-// PyrLKOpticalFlow\r
-\r
-IMPLEMENT_PARAM_CLASS(UseGray, bool)\r
-\r
-PARAM_TEST_CASE(PyrLKOpticalFlow, cv::gpu::DeviceInfo, UseGray)\r
-{\r
- cv::gpu::DeviceInfo devInfo;\r
- bool useGray;\r
-\r
- virtual void SetUp()\r
- {\r
- devInfo = GET_PARAM(0);\r
- useGray = GET_PARAM(1);\r
-\r
- cv::gpu::setDevice(devInfo.deviceID());\r
- }\r
-};\r
-\r
-TEST_P(PyrLKOpticalFlow, Sparse)\r
-{\r
- cv::Mat frame0 = readImage("opticalflow/frame0.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);\r
- ASSERT_FALSE(frame0.empty());\r
-\r
- cv::Mat frame1 = readImage("opticalflow/frame1.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);\r
- ASSERT_FALSE(frame1.empty());\r
-\r
- cv::Mat gray_frame;\r
- if (useGray)\r
- gray_frame = frame0;\r
- else\r
- cv::cvtColor(frame0, gray_frame, cv::COLOR_BGR2GRAY);\r
-\r
- std::vector<cv::Point2f> pts;\r
- cv::goodFeaturesToTrack(gray_frame, pts, 1000, 0.01, 0.0);\r
-\r
- cv::gpu::GpuMat d_pts;\r
- cv::Mat pts_mat(1, (int)pts.size(), CV_32FC2, (void*)&pts[0]);\r
- d_pts.upload(pts_mat);\r
-\r
- cv::gpu::PyrLKOpticalFlow pyrLK;\r
-\r
- cv::gpu::GpuMat d_nextPts;\r
- cv::gpu::GpuMat d_status;\r
- cv::gpu::GpuMat d_err;\r
- pyrLK.sparse(loadMat(frame0), loadMat(frame1), d_pts, d_nextPts, d_status, &d_err);\r
-\r
- std::vector<cv::Point2f> nextPts(d_nextPts.cols);\r
- cv::Mat nextPts_mat(1, d_nextPts.cols, CV_32FC2, (void*)&nextPts[0]);\r
- d_nextPts.download(nextPts_mat);\r
-\r
- std::vector<unsigned char> status(d_status.cols);\r
- cv::Mat status_mat(1, d_status.cols, CV_8UC1, (void*)&status[0]);\r
- d_status.download(status_mat);\r
-\r
- std::vector<float> err(d_err.cols);\r
- cv::Mat err_mat(1, d_err.cols, CV_32FC1, (void*)&err[0]);\r
- d_err.download(err_mat);\r
-\r
- std::vector<cv::Point2f> nextPts_gold;\r
- std::vector<unsigned char> status_gold;\r
- std::vector<float> err_gold;\r
- cv::calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts_gold, status_gold, err_gold);\r
-\r
- ASSERT_EQ(nextPts_gold.size(), nextPts.size());\r
- ASSERT_EQ(status_gold.size(), status.size());\r
- ASSERT_EQ(err_gold.size(), err.size());\r
-\r
- size_t mistmatch = 0;\r
- for (size_t i = 0; i < nextPts.size(); ++i)\r
- {\r
- if (status[i] != status_gold[i])\r
- {\r
- ++mistmatch;\r
- continue;\r
- }\r
-\r
- if (status[i])\r
- {\r
- cv::Point2i a = nextPts[i];\r
- cv::Point2i b = nextPts_gold[i];\r
-\r
- bool eq = std::abs(a.x - b.x) < 1 && std::abs(a.y - b.y) < 1;\r
- float errdiff = std::abs(err[i] - err_gold[i]);\r
-\r
- if (!eq || errdiff > 1e-1)\r
- ++mistmatch;\r
- }\r
- }\r
-\r
- double bad_ratio = static_cast<double>(mistmatch) / nextPts.size();\r
-\r
- ASSERT_LE(bad_ratio, 0.01);\r
-}\r
-\r
-INSTANTIATE_TEST_CASE_P(GPU_Video, PyrLKOpticalFlow, testing::Combine(\r
- ALL_DEVICES,\r
- testing::Values(UseGray(true), UseGray(false))));\r
-\r
-/////////////////////////////////////////////////////////////////////////////////////////////////\r
-// FarnebackOpticalFlow\r
-\r
-IMPLEMENT_PARAM_CLASS(PyrScale, double)\r
-IMPLEMENT_PARAM_CLASS(PolyN, int)\r
-CV_FLAGS(FarnebackOptFlowFlags, 0, cv::OPTFLOW_FARNEBACK_GAUSSIAN)\r
-IMPLEMENT_PARAM_CLASS(UseInitFlow, bool)\r
-\r
-PARAM_TEST_CASE(FarnebackOpticalFlow, cv::gpu::DeviceInfo, PyrScale, PolyN, FarnebackOptFlowFlags, UseInitFlow)\r
-{\r
- cv::gpu::DeviceInfo devInfo;\r
- double pyrScale;\r
- int polyN;\r
- int flags;\r
- bool useInitFlow;\r
-\r
- virtual void SetUp()\r
- {\r
- devInfo = GET_PARAM(0);\r
- pyrScale = GET_PARAM(1);\r
- polyN = GET_PARAM(2);\r
- flags = GET_PARAM(3);\r
- useInitFlow = GET_PARAM(4);\r
-\r
- cv::gpu::setDevice(devInfo.deviceID());\r
- }\r
-};\r
-\r
-TEST_P(FarnebackOpticalFlow, Accuracy)\r
-{\r
- cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);\r
- ASSERT_FALSE(frame0.empty());\r
-\r
- cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);\r
- ASSERT_FALSE(frame1.empty());\r
-\r
- double polySigma = polyN <= 5 ? 1.1 : 1.5;\r
-\r
- cv::gpu::FarnebackOpticalFlow calc;\r
- calc.pyrScale = pyrScale;\r
- calc.polyN = polyN;\r
- calc.polySigma = polySigma;\r
- calc.flags = flags;\r
-\r
- cv::gpu::GpuMat d_flowx, d_flowy;\r
- calc(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy);\r
-\r
- cv::Mat flow;\r
- if (useInitFlow)\r
- {\r
- cv::Mat flowxy[] = {cv::Mat(d_flowx), cv::Mat(d_flowy)};\r
- cv::merge(flowxy, 2, flow);\r
- }\r
-\r
- if (useInitFlow)\r
- {\r
- calc.flags |= cv::OPTFLOW_USE_INITIAL_FLOW;\r
- calc(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy);\r
- }\r
-\r
- cv::calcOpticalFlowFarneback(\r
- frame0, frame1, flow, calc.pyrScale, calc.numLevels, calc.winSize,\r
- calc.numIters, calc.polyN, calc.polySigma, calc.flags);\r
-\r
- std::vector<cv::Mat> flowxy;\r
- cv::split(flow, flowxy);\r
-\r
- EXPECT_MAT_SIMILAR(flowxy[0], d_flowx, 0.1);\r
- EXPECT_MAT_SIMILAR(flowxy[1], d_flowy, 0.1);\r
-}\r
-\r
-INSTANTIATE_TEST_CASE_P(GPU_Video, FarnebackOpticalFlow, testing::Combine(\r
- ALL_DEVICES,\r
- testing::Values(PyrScale(0.3), PyrScale(0.5), PyrScale(0.8)),\r
- testing::Values(PolyN(5), PolyN(7)),\r
- testing::Values(FarnebackOptFlowFlags(0), FarnebackOptFlowFlags(cv::OPTFLOW_FARNEBACK_GAUSSIAN)),\r
- testing::Values(UseInitFlow(false), UseInitFlow(true))));\r
-\r
-/////////////////////////////////////////////////////////////////////////////////////////////////\r
-// VideoWriter\r
-\r
-#ifdef WIN32\r
-\r
-PARAM_TEST_CASE(VideoWriter, cv::gpu::DeviceInfo, std::string)\r
-{\r
- cv::gpu::DeviceInfo devInfo;\r
- std::string inputFile;\r
-\r
- std::string outputFile;\r
-\r
- virtual void SetUp()\r
- {\r
- devInfo = GET_PARAM(0);\r
- inputFile = GET_PARAM(1);\r
-\r
- cv::gpu::setDevice(devInfo.deviceID());\r
-\r
- inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + inputFile;\r
- outputFile = inputFile.substr(0, inputFile.find('.')) + "_test.avi";\r
- }\r
-};\r
-\r
-TEST_P(VideoWriter, Regression)\r
-{\r
- const double FPS = 25.0;\r
-\r
- cv::VideoCapture reader(inputFile);\r
- ASSERT_TRUE( reader.isOpened() );\r
-\r
- cv::gpu::VideoWriter_GPU d_writer;\r
-\r
- cv::Mat frame;\r
- std::vector<cv::Mat> frames;\r
- cv::gpu::GpuMat d_frame;\r
-\r
- for (int i = 1; i < 10; ++i)\r
- {\r
- reader >> frame;\r
-\r
- if (frame.empty())\r
- break;\r
-\r
- frames.push_back(frame.clone());\r
- d_frame.upload(frame);\r
-\r
- if (!d_writer.isOpened())\r
- d_writer.open(outputFile, frame.size(), FPS);\r
-\r
- d_writer.write(d_frame);\r
- }\r
-\r
- reader.release();\r
- d_writer.close();\r
-\r
- reader.open(outputFile);\r
- ASSERT_TRUE( reader.isOpened() );\r
-\r
- for (int i = 0; i < 5; ++i)\r
- {\r
- reader >> frame;\r
- ASSERT_FALSE( frame.empty() );\r
- }\r
-}\r
-\r
-INSTANTIATE_TEST_CASE_P(GPU_Video, VideoWriter, testing::Combine(\r
- ALL_DEVICES,\r
- testing::Values(std::string("VID00003-20100701-2204.mpg"), std::string("big_buck_bunny.mpg"))));\r
-\r
-#endif // WIN32\r
-\r
-/////////////////////////////////////////////////////////////////////////////////////////////////\r
-// VideoReader\r
-\r
-PARAM_TEST_CASE(VideoReader, cv::gpu::DeviceInfo, std::string)\r
-{\r
- cv::gpu::DeviceInfo devInfo;\r
- std::string inputFile;\r
-\r
- virtual void SetUp()\r
- {\r
- devInfo = GET_PARAM(0);\r
- inputFile = GET_PARAM(1);\r
-\r
- cv::gpu::setDevice(devInfo.deviceID());\r
-\r
- inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + inputFile;\r
- }\r
-};\r
-\r
-TEST_P(VideoReader, Regression)\r
-{\r
- cv::gpu::VideoReader_GPU reader(inputFile);\r
- ASSERT_TRUE( reader.isOpened() );\r
-\r
- cv::gpu::GpuMat frame;\r
-\r
- for (int i = 0; i < 5; ++i)\r
- {\r
- ASSERT_TRUE( reader.read(frame) );\r
- ASSERT_FALSE( frame.empty() );\r
- }\r
-\r
- reader.close();\r
- ASSERT_FALSE( reader.isOpened() );\r
-}\r
-\r
-INSTANTIATE_TEST_CASE_P(GPU_Video, VideoReader, testing::Combine(\r
- ALL_DEVICES,\r
- testing::Values(std::string("VID00003-20100701-2204.mpg"))));\r
-\r
-} // namespace\r
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// Intel License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of Intel Corporation may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "precomp.hpp"
+
+namespace {
+
+//#define DUMP
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+// BroxOpticalFlow
+
+#define BROX_OPTICAL_FLOW_DUMP_FILE "opticalflow/brox_optical_flow.bin"
+#define BROX_OPTICAL_FLOW_DUMP_FILE_CC20 "opticalflow/brox_optical_flow_cc20.bin"
+
+struct BroxOpticalFlow : testing::TestWithParam<cv::gpu::DeviceInfo>
+{
+ cv::gpu::DeviceInfo devInfo;
+
+ virtual void SetUp()
+ {
+ devInfo = GetParam();
+
+ cv::gpu::setDevice(devInfo.deviceID());
+ }
+};
+
+TEST_P(BroxOpticalFlow, Regression)
+{
+ cv::Mat frame0 = readImageType("opticalflow/frame0.png", CV_32FC1);
+ ASSERT_FALSE(frame0.empty());
+
+ cv::Mat frame1 = readImageType("opticalflow/frame1.png", CV_32FC1);
+ ASSERT_FALSE(frame1.empty());
+
+ cv::gpu::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
+ 10 /*inner_iterations*/, 77 /*outer_iterations*/, 10 /*solver_iterations*/);
+
+ cv::gpu::GpuMat u;
+ cv::gpu::GpuMat v;
+ brox(loadMat(frame0), loadMat(frame1), u, v);
+
+#ifndef DUMP
+ std::string fname(cvtest::TS::ptr()->get_data_path());
+ if (devInfo.majorVersion() >= 2)
+ fname += BROX_OPTICAL_FLOW_DUMP_FILE_CC20;
+ else
+ fname += BROX_OPTICAL_FLOW_DUMP_FILE;
+
+ std::ifstream f(fname.c_str(), std::ios_base::binary);
+
+ int rows, cols;
+
+ f.read((char*)&rows, sizeof(rows));
+ f.read((char*)&cols, sizeof(cols));
+
+ cv::Mat u_gold(rows, cols, CV_32FC1);
+
+ for (int i = 0; i < u_gold.rows; ++i)
+ f.read(u_gold.ptr<char>(i), u_gold.cols * sizeof(float));
+
+ cv::Mat v_gold(rows, cols, CV_32FC1);
+
+ for (int i = 0; i < v_gold.rows; ++i)
+ f.read(v_gold.ptr<char>(i), v_gold.cols * sizeof(float));
+
+ EXPECT_MAT_NEAR(u_gold, u, 0);
+ EXPECT_MAT_NEAR(v_gold, v, 0);
+#else
+ std::string fname(cvtest::TS::ptr()->get_data_path());
+ if (devInfo.majorVersion() >= 2)
+ fname += BROX_OPTICAL_FLOW_DUMP_FILE_CC20;
+ else
+ fname += BROX_OPTICAL_FLOW_DUMP_FILE;
+
+ std::ofstream f(fname.c_str(), std::ios_base::binary);
+
+ f.write((char*)&u.rows, sizeof(u.rows));
+ f.write((char*)&u.cols, sizeof(u.cols));
+
+ cv::Mat h_u(u);
+ cv::Mat h_v(v);
+
+ for (int i = 0; i < u.rows; ++i)
+ f.write(h_u.ptr<char>(i), u.cols * sizeof(float));
+
+ for (int i = 0; i < v.rows; ++i)
+ f.write(h_v.ptr<char>(i), v.cols * sizeof(float));
+
+#endif
+}
+
+INSTANTIATE_TEST_CASE_P(GPU_Video, BroxOpticalFlow, ALL_DEVICES);
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+// GoodFeaturesToTrack
+
+IMPLEMENT_PARAM_CLASS(MinDistance, double)
+
+PARAM_TEST_CASE(GoodFeaturesToTrack, cv::gpu::DeviceInfo, MinDistance)
+{
+ cv::gpu::DeviceInfo devInfo;
+ double minDistance;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ minDistance = GET_PARAM(1);
+
+ cv::gpu::setDevice(devInfo.deviceID());
+ }
+};
+
+TEST_P(GoodFeaturesToTrack, Accuracy)
+{
+ cv::Mat image = readImage("opticalflow/frame0.png", cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(image.empty());
+
+ int maxCorners = 1000;
+ double qualityLevel = 0.01;
+
+ cv::gpu::GoodFeaturesToTrackDetector_GPU detector(maxCorners, qualityLevel, minDistance);
+
+ if (!supportFeature(devInfo, cv::gpu::GLOBAL_ATOMICS))
+ {
+ try
+ {
+ cv::gpu::GpuMat d_pts;
+ detector(loadMat(image), d_pts);
+ }
+ catch (const cv::Exception& e)
+ {
+ ASSERT_EQ(CV_StsNotImplemented, e.code);
+ }
+ }
+ else
+ {
+ cv::gpu::GpuMat d_pts;
+ detector(loadMat(image), d_pts);
+
+ std::vector<cv::Point2f> pts(d_pts.cols);
+ cv::Mat pts_mat(1, d_pts.cols, CV_32FC2, (void*)&pts[0]);
+ d_pts.download(pts_mat);
+
+ std::vector<cv::Point2f> pts_gold;
+ cv::goodFeaturesToTrack(image, pts_gold, maxCorners, qualityLevel, minDistance);
+
+ ASSERT_EQ(pts_gold.size(), pts.size());
+
+ size_t mistmatch = 0;
+ for (size_t i = 0; i < pts.size(); ++i)
+ {
+ cv::Point2i a = pts_gold[i];
+ cv::Point2i b = pts[i];
+
+ bool eq = std::abs(a.x - b.x) < 1 && std::abs(a.y - b.y) < 1;
+
+ if (!eq)
+ ++mistmatch;
+ }
+
+ double bad_ratio = static_cast<double>(mistmatch) / pts.size();
+
+ ASSERT_LE(bad_ratio, 0.01);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(GPU_Video, GoodFeaturesToTrack, testing::Combine(
+ ALL_DEVICES,
+ testing::Values(MinDistance(0.0), MinDistance(3.0))));
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+// PyrLKOpticalFlow
+
+IMPLEMENT_PARAM_CLASS(UseGray, bool)
+
+PARAM_TEST_CASE(PyrLKOpticalFlow, cv::gpu::DeviceInfo, UseGray)
+{
+ cv::gpu::DeviceInfo devInfo;
+ bool useGray;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ useGray = GET_PARAM(1);
+
+ cv::gpu::setDevice(devInfo.deviceID());
+ }
+};
+
+TEST_P(PyrLKOpticalFlow, Sparse)
+{
+ cv::Mat frame0 = readImage("opticalflow/frame0.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
+ ASSERT_FALSE(frame0.empty());
+
+ cv::Mat frame1 = readImage("opticalflow/frame1.png", useGray ? cv::IMREAD_GRAYSCALE : cv::IMREAD_COLOR);
+ ASSERT_FALSE(frame1.empty());
+
+ cv::Mat gray_frame;
+ if (useGray)
+ gray_frame = frame0;
+ else
+ cv::cvtColor(frame0, gray_frame, cv::COLOR_BGR2GRAY);
+
+ std::vector<cv::Point2f> pts;
+ cv::goodFeaturesToTrack(gray_frame, pts, 1000, 0.01, 0.0);
+
+ cv::gpu::GpuMat d_pts;
+ cv::Mat pts_mat(1, (int)pts.size(), CV_32FC2, (void*)&pts[0]);
+ d_pts.upload(pts_mat);
+
+ cv::gpu::PyrLKOpticalFlow pyrLK;
+
+ cv::gpu::GpuMat d_nextPts;
+ cv::gpu::GpuMat d_status;
+ cv::gpu::GpuMat d_err;
+ pyrLK.sparse(loadMat(frame0), loadMat(frame1), d_pts, d_nextPts, d_status, &d_err);
+
+ std::vector<cv::Point2f> nextPts(d_nextPts.cols);
+ cv::Mat nextPts_mat(1, d_nextPts.cols, CV_32FC2, (void*)&nextPts[0]);
+ d_nextPts.download(nextPts_mat);
+
+ std::vector<unsigned char> status(d_status.cols);
+ cv::Mat status_mat(1, d_status.cols, CV_8UC1, (void*)&status[0]);
+ d_status.download(status_mat);
+
+ std::vector<float> err(d_err.cols);
+ cv::Mat err_mat(1, d_err.cols, CV_32FC1, (void*)&err[0]);
+ d_err.download(err_mat);
+
+ std::vector<cv::Point2f> nextPts_gold;
+ std::vector<unsigned char> status_gold;
+ std::vector<float> err_gold;
+ cv::calcOpticalFlowPyrLK(frame0, frame1, pts, nextPts_gold, status_gold, err_gold);
+
+ ASSERT_EQ(nextPts_gold.size(), nextPts.size());
+ ASSERT_EQ(status_gold.size(), status.size());
+ ASSERT_EQ(err_gold.size(), err.size());
+
+ size_t mistmatch = 0;
+ for (size_t i = 0; i < nextPts.size(); ++i)
+ {
+ if (status[i] != status_gold[i])
+ {
+ ++mistmatch;
+ continue;
+ }
+
+ if (status[i])
+ {
+ cv::Point2i a = nextPts[i];
+ cv::Point2i b = nextPts_gold[i];
+
+ bool eq = std::abs(a.x - b.x) < 1 && std::abs(a.y - b.y) < 1;
+ float errdiff = std::abs(err[i] - err_gold[i]);
+
+ if (!eq || errdiff > 1e-1)
+ ++mistmatch;
+ }
+ }
+
+ double bad_ratio = static_cast<double>(mistmatch) / nextPts.size();
+
+ ASSERT_LE(bad_ratio, 0.01);
+}
+
+INSTANTIATE_TEST_CASE_P(GPU_Video, PyrLKOpticalFlow, testing::Combine(
+ ALL_DEVICES,
+ testing::Values(UseGray(true), UseGray(false))));
+
+/////////////////////////////////////////////////////////////////////////////////////////////////
+// FarnebackOpticalFlow
+
+IMPLEMENT_PARAM_CLASS(PyrScale, double)
+IMPLEMENT_PARAM_CLASS(PolyN, int)
+CV_FLAGS(FarnebackOptFlowFlags, 0, cv::OPTFLOW_FARNEBACK_GAUSSIAN)
+IMPLEMENT_PARAM_CLASS(UseInitFlow, bool)
+
+PARAM_TEST_CASE(FarnebackOpticalFlow, cv::gpu::DeviceInfo, PyrScale, PolyN, FarnebackOptFlowFlags, UseInitFlow)
+{
+ cv::gpu::DeviceInfo devInfo;
+ double pyrScale;
+ int polyN;
+ int flags;
+ bool useInitFlow;
+
+ virtual void SetUp()
+ {
+ devInfo = GET_PARAM(0);
+ pyrScale = GET_PARAM(1);
+ polyN = GET_PARAM(2);
+ flags = GET_PARAM(3);
+ useInitFlow = GET_PARAM(4);
+
+ cv::gpu::setDevice(devInfo.deviceID());
+ }
+};
+
+TEST_P(FarnebackOpticalFlow, Accuracy)
+{
+ cv::Mat frame0 = readImage("opticalflow/rubberwhale1.png", cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame0.empty());
+
+ cv::Mat frame1 = readImage("opticalflow/rubberwhale2.png", cv::IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame1.empty());
+
+ double polySigma = polyN <= 5 ? 1.1 : 1.5;
+
+ cv::gpu::FarnebackOpticalFlow calc;
+ calc.pyrScale = pyrScale;
+ calc.polyN = polyN;
+ calc.polySigma = polySigma;
+ calc.flags = flags;
+
+ cv::gpu::GpuMat d_flowx, d_flowy;
+ calc(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy);
+
+ cv::Mat flow;
+ if (useInitFlow)
+ {
+ cv::Mat flowxy[] = {cv::Mat(d_flowx), cv::Mat(d_flowy)};
+ cv::merge(flowxy, 2, flow);
+ }
+
+ if (useInitFlow)
+ {
+ calc.flags |= cv::OPTFLOW_USE_INITIAL_FLOW;
+ calc(loadMat(frame0), loadMat(frame1), d_flowx, d_flowy);
+ }
+
+ cv::calcOpticalFlowFarneback(
+ frame0, frame1, flow, calc.pyrScale, calc.numLevels, calc.winSize,
+ calc.numIters, calc.polyN, calc.polySigma, calc.flags);
+
+ std::vector<cv::Mat> flowxy;
+ cv::split(flow, flowxy);
+
+ EXPECT_MAT_SIMILAR(flowxy[0], d_flowx, 0.1);
+ EXPECT_MAT_SIMILAR(flowxy[1], d_flowy, 0.1);
+};
+
+INSTANTIATE_TEST_CASE_P(GPU_Video, FarnebackOpticalFlow, testing::Combine(
+ ALL_DEVICES,
+ testing::Values(PyrScale(0.3), PyrScale(0.5), PyrScale(0.8)),
+ testing::Values(PolyN(5), PolyN(7)),
+ testing::Values(FarnebackOptFlowFlags(0), FarnebackOptFlowFlags(cv::OPTFLOW_FARNEBACK_GAUSSIAN)),
+ testing::Values(UseInitFlow(false), UseInitFlow(true))));
+
+struct OpticalFlowNan : public BroxOpticalFlow {};
+
+TEST_P(OpticalFlowNan, Regression)
+{
+ cv::Mat frame0 = readImageType("opticalflow/frame0.png", CV_32FC1);
+ ASSERT_FALSE(frame0.empty());
+ cv::Mat r_frame0, r_frame1;
+ cv::resize(frame0, r_frame0, cv::Size(1380,1000));
+
+ cv::Mat frame1 = readImageType("opticalflow/frame1.png", CV_32FC1);
+ ASSERT_FALSE(frame1.empty());
+ cv::resize(frame1, r_frame1, cv::Size(1380,1000));
+
+ cv::gpu::BroxOpticalFlow brox(0.197f /*alpha*/, 50.0f /*gamma*/, 0.8f /*scale_factor*/,
+ 5 /*inner_iterations*/, 150 /*outer_iterations*/, 10 /*solver_iterations*/);
+
+ cv::gpu::GpuMat u;
+ cv::gpu::GpuMat v;
+ brox(loadMat(r_frame0), loadMat(r_frame1), u, v);
+
+ cv::Mat h_u, h_v;
+ u.download(h_u);
+ v.download(h_v);
+ EXPECT_TRUE(cv::checkRange(h_u));
+ EXPECT_TRUE(cv::checkRange(h_v));
+};
+
+INSTANTIATE_TEST_CASE_P(GPU_Video, OpticalFlowNan, ALL_DEVICES);
+
+} // namespace
source_group("Src\\grfmts" FILES ${grfmt_hdrs} ${grfmt_srcs})
-if(NEW_FFMPEG)
- set(highgui_hdrs src/precomp.hpp src/utils.hpp src/cap_ffmpeg_impl_v2.hpp)
-else()
- set(highgui_hdrs src/precomp.hpp src/utils.hpp src/cap_ffmpeg_impl.hpp)
-endif()
+set(highgui_hdrs src/precomp.hpp src/utils.hpp src/cap_ffmpeg_impl.hpp)
set(highgui_srcs
src/cap.cpp
endif()
set(ffmpeg_bare_name "opencv_ffmpeg${FFMPEG_SUFFIX}.dll")
+ set(ffmpeg_bare_name_ver "opencv_ffmpeg${OPENCV_DLLVERSION}${FFMPEG_SUFFIX}.dll")
set(ffmpeg_path "${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/${ffmpeg_bare_name}")
- if(CMAKE_VERSION VERSION_GREATER "2.8.2")
- add_custom_command(TARGET ${the_module} POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/$<CONFIGURATION>/${ffmpeg_bare_name}"
- COMMENT "Copying ${ffmpeg_path} to the output directory")
- elseif(MSVC_IDE)
+ #if(MSVC AND CMAKE_VERSION VERSION_GREATER "2.8.2")
+ # add_custom_command(TARGET ${the_module} POST_BUILD
+ # COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/$<CONFIGURATION>/${ffmpeg_bare_name_ver}"
+ # COMMENT "Copying ${ffmpeg_path} to the output directory")
+ #else
+ if(MSVC_IDE)
add_custom_command(TARGET ${the_module} POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Release/${ffmpeg_bare_name}"
- COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Debug/${ffmpeg_bare_name}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Release/${ffmpeg_bare_name_ver}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/Debug/${ffmpeg_bare_name_ver}"
COMMENT "Copying ${ffmpeg_path} to the output directory")
elseif(MSVC)
add_custom_command(TARGET ${the_module} POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${CMAKE_BUILD_TYPE}/${ffmpeg_bare_name}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${CMAKE_BUILD_TYPE}/${ffmpeg_bare_name_ver}"
COMMENT "Copying ${ffmpeg_path} to the output directory")
else()
add_custom_command(TARGET ${the_module} POST_BUILD
- COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${ffmpeg_bare_name}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${ffmpeg_path}" "${EXECUTABLE_OUTPUT_PATH}/${ffmpeg_bare_name_ver}"
COMMENT "Copying ${ffmpeg_path} to the output directory")
endif()
- install(FILES "${ffmpeg_path}" DESTINATION bin COMPONENT main)
+ install(FILES "${ffmpeg_path}" DESTINATION bin COMPONENT main RENAME "${ffmpeg_bare_name_ver}")
endif()
ocv_add_accuracy_tests()
CV_CAP_PVAPI =800, // PvAPI, Prosilica GigE SDK
CV_CAP_OPENNI =900, // OpenNI (for Kinect)
+ CV_CAP_OPENNI_ASUS =910, // OpenNI (for Asus Xtion)
CV_CAP_ANDROID =1000, // Android
#ifdef HAVE_VIDEOINPUT
CV_CAP_DSHOW,
#endif
+#if 1
CV_CAP_IEEE1394, // identical to CV_CAP_DC1394
+#endif
+#ifdef HAVE_TYZX
CV_CAP_STEREO,
+#endif
+#ifdef HAVE_PVAPI
CV_CAP_PVAPI,
+#endif
+#if 1
CV_CAP_VFW, // identical to CV_CAP_V4L
+#endif
+#ifdef HAVE_MIL
CV_CAP_MIL,
+#endif
+#ifdef HAVE_QUICKTIME
CV_CAP_QT,
+#endif
+#ifdef HAVE_UNICAP
CV_CAP_UNICAP,
+#endif
#ifdef HAVE_OPENNI
CV_CAP_OPENNI,
#endif
#ifdef HAVE_XIMEA
CV_CAP_XIAPI,
#endif
- CV_CAP_AVFOUNDATION
+#ifdef HAVE_AVFOUNDATION
+ CV_CAP_AVFOUNDATION,
+#endif
-1
};
// try every possibly installed camera API
for (int i = 0; domains[i] >= 0; i++)
{
- #if defined(HAVE_VIDEOINPUT) || defined(HAVE_TYZX) || defined(HAVE_VFW) || \
- defined(HAVE_CAMV4L) || defined (HAVE_CAMV4L2) || defined(HAVE_GSTREAMER) || \
- defined(HAVE_DC1394_2) || defined(HAVE_DC1394) || defined(HAVE_CMU1394) || \
- defined(HAVE_GSTREAMER) || defined(HAVE_MIL) || defined(HAVE_QUICKTIME) || \
- defined(HAVE_UNICAP) || defined(HAVE_PVAPI) || defined(HAVE_OPENNI) || defined(HAVE_ANDROID_NATIVE_CAMERA) || \
- defined(HAVE_AVFOUNDATION)
+#if defined(HAVE_VIDEOINPUT) || \
+ defined(HAVE_TYZX) || \
+ defined(HAVE_VFW) || \
+ defined(HAVE_LIBV4L) || \
+ (defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \
+ defined(HAVE_GSTREAMER) || \
+ defined(HAVE_DC1394_2) || \
+ defined(HAVE_DC1394) || \
+ defined(HAVE_CMU1394) || \
+ defined(HAVE_MIL) || \
+ defined(HAVE_QUICKTIME) || \
+ defined(HAVE_UNICAP) || \
+ defined(HAVE_PVAPI) || \
+ defined(HAVE_OPENNI) || \
+ defined(HAVE_XIMEA) || \
+ defined(HAVE_AVFOUNDATION) || \
+ defined(HAVE_ANDROID_NATIVE_CAMERA) || \
+ (0)
// local variable to memorize the captured device
CvCapture *capture;
- #endif
+#endif
switch (domains[i])
{
- #ifdef HAVE_VIDEOINPUT
+#ifdef HAVE_VIDEOINPUT
case CV_CAP_DSHOW:
capture = cvCreateCameraCapture_DShow (index);
if (capture)
return capture;
break;
- #endif
+#endif
- #ifdef HAVE_TYZX
+#ifdef HAVE_TYZX
case CV_CAP_STEREO:
capture = cvCreateCameraCapture_TYZX (index);
if (capture)
return capture;
break;
- #endif
+#endif
case CV_CAP_VFW:
- #ifdef HAVE_VFW
+#ifdef HAVE_VFW
capture = cvCreateCameraCapture_VFW (index);
if (capture)
return capture;
- #endif
- #if defined HAVE_LIBV4L || (defined (HAVE_CAMV4L) && defined (HAVE_CAMV4L2))
+#endif
+
+#if defined HAVE_LIBV4L || (defined (HAVE_CAMV4L) && defined (HAVE_CAMV4L2))
capture = cvCreateCameraCapture_V4L (index);
if (capture)
return capture;
- #endif
- #ifdef HAVE_GSTREAMER
+#endif
+
+#ifdef HAVE_GSTREAMER
capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_V4L2, 0);
if (capture)
return capture;
capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_V4L, 0);
if (capture)
return capture;
- #endif
- break;
+#endif
+ break; //CV_CAP_VFW
case CV_CAP_FIREWIRE:
- #ifdef HAVE_DC1394_2
+#ifdef HAVE_DC1394_2
capture = cvCreateCameraCapture_DC1394_2 (index);
if (capture)
return capture;
- #endif
- #ifdef HAVE_DC1394
+#endif
+
+#ifdef HAVE_DC1394
capture = cvCreateCameraCapture_DC1394 (index);
if (capture)
return capture;
- #endif
- #ifdef HAVE_CMU1394
+#endif
+
+#ifdef HAVE_CMU1394
capture = cvCreateCameraCapture_CMU (index);
if (capture)
return capture;
- #endif
- /* Re-enable again when gstreamer 1394 support will land in the backend code
- #ifdef HAVE_GSTREAMER
+#endif
+
+#if defined(HAVE_GSTREAMER) && 0
+ //Re-enable again when gstreamer 1394 support will land in the backend code
capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_1394, 0);
if (capture)
return capture;
- #endif
- */
- break;
- #ifdef HAVE_MIL
+#endif
+ break; //CV_CAP_FIREWIRE
+
+#ifdef HAVE_MIL
case CV_CAP_MIL:
capture = cvCreateCameraCapture_MIL (index);
if (capture)
return capture;
break;
- #endif
+#endif
- #ifdef HAVE_QUICKTIME
+#ifdef HAVE_QUICKTIME
case CV_CAP_QT:
capture = cvCreateCameraCapture_QT (index);
if (capture)
return capture;
break;
- #endif
+#endif
- #ifdef HAVE_UNICAP
+#ifdef HAVE_UNICAP
case CV_CAP_UNICAP:
capture = cvCreateCameraCapture_Unicap (index);
if (capture)
return capture;
break;
- #endif
+#endif
- #ifdef HAVE_PVAPI
+#ifdef HAVE_PVAPI
case CV_CAP_PVAPI:
capture = cvCreateCameraCapture_PvAPI (index);
if (capture)
return capture;
break;
- #endif
+#endif
- #ifdef HAVE_OPENNI
+#ifdef HAVE_OPENNI
case CV_CAP_OPENNI:
capture = cvCreateCameraCapture_OpenNI (index);
if (capture)
return capture;
break;
- #endif
+#endif
- #ifdef HAVE_ANDROID_NATIVE_CAMERA
+#ifdef HAVE_ANDROID_NATIVE_CAMERA
case CV_CAP_ANDROID:
capture = cvCreateCameraCapture_Android (index);
if (capture)
return capture;
break;
- #endif
+#endif
- #ifdef HAVE_XIMEA
+#ifdef HAVE_XIMEA
case CV_CAP_XIAPI:
capture = cvCreateCameraCapture_XIMEA (index);
if (capture)
return capture;
break;
- #endif
+#endif
- #ifdef HAVE_AVFOUNDATION
+#ifdef HAVE_AVFOUNDATION
case CV_CAP_AVFOUNDATION:
capture = cvCreateCameraCapture_AVFoundation (index);
if (capture)
return capture;
break;
- #endif
+#endif
}
}
if (! result)
result = cvCreateFileCapture_FFMPEG_proxy (filename);
- #ifdef HAVE_XINE
+#ifdef HAVE_XINE
if (! result)
result = cvCreateFileCapture_XINE (filename);
- #endif
+#endif
- #ifdef HAVE_GSTREAMER
+#ifdef HAVE_GSTREAMER
if (! result)
result = cvCreateCapture_GStreamer (CV_CAP_GSTREAMER_FILE, filename);
- #endif
+#endif
- #ifdef HAVE_QUICKTIME
+#ifdef HAVE_QUICKTIME
if (! result)
result = cvCreateFileCapture_QT (filename);
- #endif
+#endif
- #ifdef HAVE_AVFOUNDATION
+#ifdef HAVE_AVFOUNDATION
if (! result)
result = cvCreateFileCapture_AVFoundation (filename);
- #endif
+#endif
- #ifdef HAVE_OPENNI
+#ifdef HAVE_OPENNI
if (! result)
result = cvCreateFileCapture_OpenNI (filename);
- #endif
+#endif
if (! result)
result = cvCreateFileCapture_Images (filename);
result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
#endif
*/
- #ifdef HAVE_AVFOUNDATION
+#ifdef HAVE_AVFOUNDATION
if (! result)
result = cvCreateVideoWriter_AVFoundation(filename, fourcc, fps, frameSize, is_color);
- #endif
+#endif
- #ifdef HAVE_QUICKTIME
+#ifdef HAVE_QUICKTIME
if(!result)
result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
- #endif
- #ifdef HAVE_GSTREAMER
+#endif
+
+#ifdef HAVE_GSTREAMER
if (! result)
result = cvCreateVideoWriter_GStreamer(filename, fourcc, fps, frameSize, is_color);
- #endif
+#endif
+
if(!result)
result = cvCreateVideoWriter_Images(filename);
CVPixelBufferUnlockBaseAddress(pixels, 0);
CVBufferRelease(pixels);
+ CMSampleBufferInvalidate(sampleBuffer);
+ CFRelease(sampleBuffer);
[localpool drain];
return bgr_image;
{
#if defined WIN32 || defined _WIN32
const char* module_name = "opencv_ffmpeg"
+ CVAUX_STR(CV_MAJOR_VERSION) CVAUX_STR(CV_MINOR_VERSION) CVAUX_STR(CV_SUBMINOR_VERSION)
#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__)
"_64"
#endif
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
#include "cap_ffmpeg_api.hpp"
#include <assert.h>
#include <algorithm>
+#include <limits>
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4244 4510 4512 4610 )
#include "ffmpeg_codecs.hpp"
+#include <libavutil/mathematics.h>
+
#ifdef WIN32
#define HAVE_FFMPEG_SWSCALE 1
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#else
+#ifndef HAVE_FFMPEG_SWSCALE
+ #error "libswscale is necessary to build the newer OpenCV ffmpeg wrapper"
+#endif
+
// if the header path is not specified explicitly, let's deduce it
#if !defined HAVE_FFMPEG_AVCODEC_H && !defined HAVE_LIBAVCODEC_AVCODEC_H
#elif defined __linux__ || defined __APPLE__
#include <unistd.h>
#include <stdio.h>
- #include <sys/types.h>
+ #include <sys/types.h>
#include <sys/sysctl.h>
#endif
+#ifndef MIN
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
+#if defined(__APPLE__)
+#define AV_NOPTS_VALUE_ ((int64_t)0x8000000000000000LL)
+#else
+#define AV_NOPTS_VALUE_ ((int64_t)AV_NOPTS_VALUE)
+#endif
+
int get_number_of_cpus(void)
{
-#if defined WIN32 || defined _WIN32
+#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(52, 111, 0)
+ return 1;
+#elif defined WIN32 || defined _WIN32
SYSTEM_INFO sysinfo;
GetSystemInfo( &sysinfo );
-
+
return (int)sysinfo.dwNumberOfProcessors;
#elif defined __linux__
return (int)sysconf( _SC_NPROCESSORS_ONLN );
#elif defined __APPLE__
int numCPU=0;
int mib[4];
- size_t len = sizeof(numCPU);
-
- /* set the mib for hw.ncpu */
+ size_t len = sizeof(numCPU);
+
+ // set the mib for hw.ncpu
mib[0] = CTL_HW;
mib[1] = HW_AVAILCPU; // alternatively, try HW_NCPU;
-
- /* get the number of CPUs from the system */
+
+ // get the number of CPUs from the system
sysctl(mib, 2, &numCPU, &len, NULL, 0);
-
- if( numCPU < 1 )
+
+ if( numCPU < 1 )
{
mib[1] = HW_NCPU;
sysctl( mib, 2, &numCPU, &len, NULL, 0 );
-
+
if( numCPU < 1 )
numCPU = 1;
}
}
-char * FOURCC2str( int fourcc )
-{
- char * mystr=(char*)malloc(5);
- mystr[0]=(char)((fourcc )&255);
- mystr[1]=(char)((fourcc>> 8)&255);
- mystr[2]=(char)((fourcc>>16)&255);
- mystr[3]=(char)((fourcc>>24)&255);
- mystr[4]=0;
- return mystr;
-}
-
-
struct Image_FFMPEG
{
unsigned char* data;
void close();
double getProperty(int);
- bool seekKeyAndRunOverFrames(int framenumber);
bool setProperty(int, double);
bool grabFrame();
bool retrieveFrame(int, unsigned char** data, int* step, int* width, int* height, int* cn);
void init();
- bool reopen();
- bool slowSeek( int framenumber );
-
- AVFormatContext * ic;
- int video_stream;
- AVStream * video_st;
- AVFrame * picture;
- int64_t picture_pts;
- AVFrame rgb_picture;
- AVPacket packet;
- Image_FFMPEG frame;
-#if defined(HAVE_FFMPEG_SWSCALE)
+
+ void seek(int64_t frame_number);
+ void seek(double sec);
+ bool slowSeek( int framenumber );
+
+ int64_t get_total_frames();
+ double get_duration_sec();
+ double get_fps();
+ int get_bitrate();
+
+ double r2d(AVRational r);
+ int64_t dts_to_frame_number(int64_t dts);
+ double dts_to_sec(int64_t dts);
+
+ AVFormatContext * ic;
+ AVCodec * avcodec;
+ int video_stream;
+ AVStream * video_st;
+ AVFrame * picture;
+ AVFrame rgb_picture;
+ int64_t picture_pts;
+
+ AVPacket packet;
+ Image_FFMPEG frame;
struct SwsContext *img_convert_ctx;
-#endif
+
+ int64_t frame_number, first_frame_number;
+
+ double eps_zero;
/*
'filename' contains the filename of the videosource,
'filename==NULL' indicates that ffmpeg's seek support works
char * filename;
};
-
void CvCapture_FFMPEG::init()
{
ic = 0;
video_stream = -1;
video_st = 0;
picture = 0;
- picture_pts = 0;
+ picture_pts = AV_NOPTS_VALUE_;
+ first_frame_number = -1;
memset( &rgb_picture, 0, sizeof(rgb_picture) );
memset( &frame, 0, sizeof(frame) );
filename = 0;
- packet.data = NULL;
-#if defined(HAVE_FFMPEG_SWSCALE)
+ memset(&packet, 0, sizeof(packet));
+ av_init_packet(&packet);
img_convert_ctx = 0;
-#endif
+
+ avcodec = 0;
+ frame_number = 0;
+ eps_zero = 0.000025;
}
void CvCapture_FFMPEG::close()
{
+ if( img_convert_ctx )
+ {
+ sws_freeContext(img_convert_ctx);
+ img_convert_ctx = 0;
+ }
+
if( picture )
av_free(picture);
{
#if LIBAVFORMAT_BUILD > 4628
avcodec_close( video_st->codec );
+
#else
- avcodec_close( &video_st->codec );
+ avcodec_close( &(video_st->codec) );
+
#endif
video_st = NULL;
}
if( ic )
{
- av_close_input_file(ic);
+#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 24, 2)
+ av_close_input_file(ic);
+#else
+ avformat_close_input(&ic);
+#endif
+
ic = NULL;
}
// free last packet if exist
if (packet.data) {
av_free_packet (&packet);
+ packet.data = NULL;
}
-
-
init();
}
-/*
- Used to reopen a video if the slower fallback function for seeking is used.
-*/
-bool CvCapture_FFMPEG::reopen()
-{
- if ( filename==NULL ) return false;
-
-#if LIBAVFORMAT_BUILD > 4628
- avcodec_close( video_st->codec );
-#else
- avcodec_close( &video_st->codec );
-#endif
- av_close_input_file(ic);
-
- // reopen video
- av_open_input_file(&ic, filename, NULL, 0, NULL);
- av_find_stream_info(ic);
-#if LIBAVFORMAT_BUILD > 4628
- AVCodecContext *enc = ic->streams[video_stream]->codec;
-#else
- AVCodecContext *enc = &ic->streams[video_stream]->codec;
-#endif
-
- avcodec_thread_init(enc, std::min(get_number_of_cpus(), 16));
-
- AVCodec *codec = avcodec_find_decoder(enc->codec_id);
- avcodec_open(enc, codec);
- video_st = ic->streams[video_stream];
-
- // reset framenumber to zero
- picture_pts=0;
-
- return true;
-}
-
#ifndef AVSEEK_FLAG_FRAME
#define AVSEEK_FLAG_FRAME 0
#endif
#ifndef AVSEEK_FLAG_ANY
#define AVSEEK_FLAG_ANY 1
#endif
-#ifndef SHORTER_DISTANCE_FOR_SEEK_TO_MAKE_IT_FASTER
-#define SHORTER_DISTANCE_FOR_SEEK_TO_MAKE_IT_FASTER 25
-#endif
+
+static void icvInitFFMPEG_internal()
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 13, 0)
+ avformat_network_init();
+ #endif
+
+ /* register all codecs, demux and protocols */
+ av_register_all();
+
+ av_log_set_level(AV_LOG_ERROR);
+
+ initialized = true;
+ }
+}
bool CvCapture_FFMPEG::open( const char* _filename )
{
+ icvInitFFMPEG_internal();
+
unsigned i;
bool valid = false;
close();
-
- /* register all codecs, demux and protocols */
- av_register_all();
-
-#ifndef _DEBUG
- // av_log_level = AV_LOG_QUIET;
-#endif
-
+
+#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)
+ int err = avformat_open_input(&ic, _filename, NULL, NULL);
+#else
int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
+#endif
+
if (err < 0) {
- CV_WARN("Error opening file");
- goto exit_func;
+ CV_WARN("Error opening file");
+ goto exit_func;
}
- err = av_find_stream_info(ic);
+ err =
+#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 3, 0)
+ avformat_find_stream_info(ic, NULL);
+#else
+ av_find_stream_info(ic);
+#endif
if (err < 0) {
- CV_WARN("Could not find codec parameters");
- goto exit_func;
+ CV_WARN("Could not find codec parameters");
+ goto exit_func;
}
- for(i = 0; i < ic->nb_streams; i++) {
+ for(i = 0; i < ic->nb_streams; i++)
+ {
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext *enc = ic->streams[i]->codec;
#else
AVCodecContext *enc = &ic->streams[i]->codec;
#endif
+#ifdef FF_API_THREAD_INIT
avcodec_thread_init(enc, get_number_of_cpus());
+#else
+ enc->thread_count = get_number_of_cpus();
+#endif
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
#define AVMEDIA_TYPE_VIDEO CODEC_TYPE_VIDEO
#endif
-
+
if( AVMEDIA_TYPE_VIDEO == enc->codec_type && video_stream < 0) {
AVCodec *codec = avcodec_find_decoder(enc->codec_id);
if (!codec ||
- avcodec_open(enc, codec) < 0)
- goto exit_func;
+#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
+ avcodec_open2(enc, codec, NULL)
+#else
+ avcodec_open(enc, codec)
+#endif
+ < 0) goto exit_func;
+
video_stream = i;
video_st = ic->streams[i];
picture = avcodec_alloc_frame();
if(video_stream >= 0) valid = true;
- // perform check if source is seekable via ffmpeg's seek function av_seek_frame(...)
- err = av_seek_frame(ic, video_stream, 10, 0);
- if (err < 0)
- {
- filename=(char*)malloc(strlen(_filename)+1);
- strcpy(filename, _filename);
- // reopen videofile to 'seek' back to first frame
- reopen();
- }
- else
- {
- // seek seems to work, so we don't need the filename,
- // but we still need to seek back to filestart
- filename=NULL;
- int64_t ts = video_st->first_dts;
- int flags = AVSEEK_FLAG_FRAME | AVSEEK_FLAG_BACKWARD;
- av_seek_frame(ic, video_stream, ts, flags);
- }
- exit_func:
+exit_func:
if( !valid )
close();
bool CvCapture_FFMPEG::grabFrame()
{
bool valid = false;
- static bool bFirstTime = true;
int got_picture;
- // First time we're called, set packet.data to NULL to indicate it
- // doesn't have to be freed
- if (bFirstTime) {
- bFirstTime = false;
- packet.data = NULL;
- }
+ int count_errs = 0;
+ const int max_number_of_attempts = 1 << 16;
- if( !ic || !video_st )
- return false;
+ if( !ic || !video_st ) return false;
- // free last packet if exist
- if (packet.data != NULL) {
- av_free_packet (&packet);
- }
+ av_free_packet (&packet);
+
+ picture_pts = AV_NOPTS_VALUE_;
// get the next frame
- while (!valid) {
+ while (!valid)
+ {
int ret = av_read_frame(ic, &packet);
- if (ret == AVERROR(EAGAIN))
- continue;
- if (ret < 0)
- break;
-
- if( packet.stream_index != video_stream ) {
+ if (ret == AVERROR(EAGAIN)) continue;
+
+ /* else if (ret < 0) break; */
+
+ if( packet.stream_index != video_stream )
+ {
av_free_packet (&packet);
+ count_errs++;
+ if (count_errs > max_number_of_attempts)
+ break;
continue;
}
+
+ // Decode video frame
+ #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
+ avcodec_decode_video2(video_st->codec, picture, &got_picture, &packet);
+ #elif LIBAVFORMAT_BUILD > 4628
+ avcodec_decode_video(video_st->codec,
+ picture, &got_picture,
+ packet.data, packet.size);
+ #else
+ avcodec_decode_video(&video_st->codec,
+ picture, &got_picture,
+ packet.data, packet.size);
+ #endif
-#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
- avcodec_decode_video2(video_st->codec, picture, &got_picture, &packet);
-#else
-#if LIBAVFORMAT_BUILD > 4628
- avcodec_decode_video(video_st->codec,
- picture, &got_picture,
- packet.data, packet.size);
-#else
- avcodec_decode_video(&video_st->codec,
- picture, &got_picture,
- packet.data, packet.size);
-#endif
-#endif
-
- if (got_picture) {
- // we have a new picture, so memorize it
- picture_pts = packet.pts;
- valid = 1;
+ // Did we get a video frame?
+ if(got_picture)
+ {
+ //picture_pts = picture->best_effort_timestamp;
+ if( picture_pts == AV_NOPTS_VALUE_ )
+ picture_pts = packet.pts != AV_NOPTS_VALUE_ && packet.pts != 0 ? packet.pts : packet.dts;
+ frame_number++;
+ valid = true;
+ }
+ else
+ {
+ count_errs++;
+ if (count_errs > max_number_of_attempts)
+ break;
}
+
+ av_free_packet (&packet);
}
+ if( valid && first_frame_number < 0 )
+ first_frame_number = dts_to_frame_number(picture_pts);
+
// return if we have a new picture or not
return valid;
}
if( !video_st || !picture->data[0] )
return false;
-#if !defined(HAVE_FFMPEG_SWSCALE)
-#if LIBAVFORMAT_BUILD > 4628
- img_convert( (AVPicture*)&rgb_picture, PIX_FMT_BGR24,
- (AVPicture*)picture,
- video_st->codec->pix_fmt,
- video_st->codec->width,
- video_st->codec->height );
-#else
- img_convert( (AVPicture*)&rgb_picture, PIX_FMT_BGR24,
- (AVPicture*)picture,
- video_st->codec.pix_fmt,
- video_st->codec.width,
- video_st->codec.height );
-#endif
-#else
- img_convert_ctx = sws_getContext(video_st->codec->width,
- video_st->codec->height,
- video_st->codec->pix_fmt,
- video_st->codec->width,
- video_st->codec->height,
- PIX_FMT_BGR24,
- SWS_BICUBIC,
- NULL, NULL, NULL);
-
- sws_scale(img_convert_ctx, picture->data,
- picture->linesize, 0,
- video_st->codec->height,
- rgb_picture.data, rgb_picture.linesize);
- sws_freeContext(img_convert_ctx);
-#endif
+ avpicture_fill((AVPicture*)&rgb_picture, rgb_picture.data[0], PIX_FMT_RGB24,
+ video_st->codec->width, video_st->codec->height);
+
+ if( img_convert_ctx == NULL ||
+ frame.width != video_st->codec->width ||
+ frame.height != video_st->codec->height )
+ {
+ if( img_convert_ctx )
+ sws_freeContext(img_convert_ctx);
+
+ frame.width = video_st->codec->width;
+ frame.height = video_st->codec->height;
+
+ img_convert_ctx = sws_getCachedContext(
+ NULL,
+ video_st->codec->width, video_st->codec->height,
+ video_st->codec->pix_fmt,
+ video_st->codec->width, video_st->codec->height,
+ PIX_FMT_BGR24,
+ SWS_BICUBIC,
+ NULL, NULL, NULL
+ );
+
+ if (img_convert_ctx == NULL)
+ return false;//CV_Error(0, "Cannot initialize the conversion context!");
+ }
+
+ sws_scale(
+ img_convert_ctx,
+ picture->data,
+ picture->linesize,
+ 0, video_st->codec->height,
+ rgb_picture.data,
+ rgb_picture.linesize
+ );
+
*data = frame.data;
*step = frame.step;
*width = frame.width;
return true;
}
-#if defined(__APPLE__)
-#define AV_NOPTS_VALUE_ ((int64_t)0x8000000000000000LL)
-#else
-#define AV_NOPTS_VALUE_ ((int64_t)AV_NOPTS_VALUE)
-#endif
double CvCapture_FFMPEG::getProperty( int property_id )
{
- // if( !capture || !video_st || !picture->data[0] ) return 0;
if( !video_st ) return 0;
- double frameScale = av_q2d (video_st->time_base) * av_q2d (video_st->r_frame_rate);
- int64_t timestamp;
- timestamp = picture_pts;
-
switch( property_id )
{
- case CV_FFMPEG_CAP_PROP_POS_MSEC:
- if(video_st->parser && video_st->parser->dts != AV_NOPTS_VALUE_)
- return (((double)video_st->parser->dts-1) *1000.0) * av_q2d (video_st->time_base);
- if(video_st->cur_dts != AV_NOPTS_VALUE_)
- return ((video_st->cur_dts-video_st->first_dts) * 1000.0 * av_q2d (video_st->time_base));
- break;
+ case CV_FFMPEG_CAP_PROP_POS_MSEC:
+ return 1000.0*(double)frame_number/get_fps();
case CV_FFMPEG_CAP_PROP_POS_FRAMES:
- if(video_st->parser && video_st->parser->dts != AV_NOPTS_VALUE_)
- return (double)video_st->parser->dts-1;
- if(video_st->cur_dts != AV_NOPTS_VALUE_)
- return((video_st->cur_dts-video_st->first_dts) * frameScale);
- break;
+ return (double)frame_number;
case CV_FFMPEG_CAP_PROP_POS_AVI_RATIO:
- if(video_st->parser && video_st->parser->dts != AV_NOPTS_VALUE_)
- return (double)(video_st->parser->dts-1)/(double)video_st->duration;
- if(video_st->cur_dts != AV_NOPTS_VALUE_ && video_st->duration != AV_NOPTS_VALUE_)
- return(((video_st->cur_dts-video_st->first_dts)+(1.0/frameScale)) / (double)video_st->duration);
- break;
+ return r2d(ic->streams[video_stream]->time_base);
case CV_FFMPEG_CAP_PROP_FRAME_COUNT:
- {
- int64_t nbf = ic->streams[video_stream]->nb_frames;
- double eps = 0.000025;
- if (nbf == 0)
- {
- double fps = static_cast<double>(ic->streams[video_stream]->r_frame_rate.num) / static_cast<double>(ic->streams[video_stream]->r_frame_rate.den);
- if (fps < eps)
- {
- fps = 1.0 / (static_cast<double>(ic->streams[video_stream]->codec->time_base.num) / static_cast<double>(ic->streams[video_stream]->codec->time_base.den));
- }
- nbf = static_cast<int64_t>(round(ic->duration * fps) / AV_TIME_BASE);
- }
- return nbf;
- }
- break;
+ return (double)get_total_frames();
case CV_FFMPEG_CAP_PROP_FRAME_WIDTH:
return (double)frame.width;
- break;
case CV_FFMPEG_CAP_PROP_FRAME_HEIGHT:
return (double)frame.height;
- break;
case CV_FFMPEG_CAP_PROP_FPS:
#if LIBAVCODEC_BUILD > 4753
- return av_q2d (video_st->r_frame_rate);
+ return av_q2d(video_st->r_frame_rate);
#else
return (double)video_st->codec.frame_rate
/ (double)video_st->codec.frame_rate_base;
#endif
- break;
case CV_FFMPEG_CAP_PROP_FOURCC:
#if LIBAVFORMAT_BUILD > 4628
return (double)video_st->codec->codec_tag;
#else
return (double)video_st->codec.codec_tag;
#endif
+ default:
break;
}
-
+
return 0;
}
-// this is a VERY slow fallback function, ONLY used if ffmpeg's av_seek_frame delivers no correct result!
-bool CvCapture_FFMPEG::slowSeek( int framenumber )
+double CvCapture_FFMPEG::r2d(AVRational r)
+{
+ return r.num == 0 || r.den == 0 ? 0. : (double)r.num / (double)r.den;
+}
+
+double CvCapture_FFMPEG::get_duration_sec()
{
- if ( framenumber>picture_pts )
+ double sec = (double)ic->duration / (double)AV_TIME_BASE;
+
+ if (sec < eps_zero)
{
- while ( picture_pts<framenumber )
- if ( !grabFrame() ) return false;
+ sec = (double)ic->streams[video_stream]->duration * r2d(ic->streams[video_stream]->time_base);
}
- else if ( framenumber<picture_pts )
+
+ if (sec < eps_zero)
{
- reopen();
- while ( picture_pts<framenumber )
- if ( !grabFrame() ) return false;
+ sec = (double)ic->streams[video_stream]->duration * r2d(ic->streams[video_stream]->time_base);
}
- return true;
+
+ return sec;
}
-bool CvCapture_FFMPEG::seekKeyAndRunOverFrames(int framenumber)
+int CvCapture_FFMPEG::get_bitrate()
{
- int ret;
- if (framenumber > video_st->cur_dts-1) {
- if (framenumber-(video_st->cur_dts-1) > SHORTER_DISTANCE_FOR_SEEK_TO_MAKE_IT_FASTER) {
- ret = av_seek_frame(ic, video_stream, framenumber, 1);
- assert(ret >= 0);
- if( ret < 0 )
- return false;
- }
- grabFrame();
- while ((video_st->cur_dts-1) < framenumber)
- if ( !grabFrame() ) return false;
+ return ic->bit_rate;
+}
+
+double CvCapture_FFMPEG::get_fps()
+{
+ double fps = r2d(ic->streams[video_stream]->r_frame_rate);
+
+#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)
+ if (fps < eps_zero)
+ {
+ fps = r2d(ic->streams[video_stream]->avg_frame_rate);
}
- else if ( framenumber < (video_st->cur_dts-1) ) {
- ret=av_seek_frame(ic, video_stream, framenumber, 1);
- assert( ret >= 0 );
- if( ret < 0 )
- return false;
+#endif
+
+ if (fps < eps_zero)
+ {
+ fps = 1.0 / r2d(ic->streams[video_stream]->codec->time_base);
+ }
+
+ return fps;
+}
+
+int64_t CvCapture_FFMPEG::get_total_frames()
+{
+ int64_t nbf = ic->streams[video_stream]->nb_frames;
+
+ if (nbf == 0)
+ {
+ nbf = (int64_t)floor(get_duration_sec() * get_fps() + 0.5);
+ }
+ return nbf;
+}
+
+int64_t CvCapture_FFMPEG::dts_to_frame_number(int64_t dts)
+{
+ double sec = dts_to_sec(dts);
+ return (int64_t)(get_fps() * sec + 0.5);
+}
+
+double CvCapture_FFMPEG::dts_to_sec(int64_t dts)
+{
+ return (double)(dts - ic->streams[video_stream]->start_time) *
+ r2d(ic->streams[video_stream]->time_base);
+}
+
+void CvCapture_FFMPEG::seek(int64_t _frame_number)
+{
+ _frame_number = std::min(_frame_number, get_total_frames());
+ int delta = 16;
+
+ // if we have not grabbed a single frame before first seek, let's read the first frame
+ // and get some valuable information during the process
+ if( first_frame_number < 0 )
grabFrame();
- while ((video_st->cur_dts-1) < framenumber )
- if ( !grabFrame() ) return false;
+
+ for(;;)
+ {
+ int64_t _frame_number_temp = std::max(_frame_number-delta, (int64_t)0);
+ double sec = (double)_frame_number_temp / get_fps();
+ int64_t time_stamp = ic->streams[video_stream]->start_time;
+ double time_base = r2d(ic->streams[video_stream]->time_base);
+ time_stamp += (int64_t)(sec / time_base + 0.5);
+ av_seek_frame(ic, video_stream, time_stamp, AVSEEK_FLAG_BACKWARD);
+ avcodec_flush_buffers(ic->streams[video_stream]->codec);
+ if( _frame_number > 0 )
+ {
+ grabFrame();
+
+ if( _frame_number > 1 )
+ {
+ frame_number = dts_to_frame_number(picture_pts) - first_frame_number;
+ //printf("_frame_number = %d, frame_number = %d, delta = %d\n",
+ // (int)_frame_number, (int)frame_number, delta);
+
+ if( frame_number < 0 || frame_number > _frame_number-1 )
+ {
+ if( _frame_number_temp == 0 || delta >= INT_MAX/4 )
+ break;
+ delta = delta < 16 ? delta*2 : delta*3/2;
+ continue;
+ }
+ while( frame_number < _frame_number-1 )
+ {
+ if(!grabFrame())
+ break;
+ }
+ frame_number++;
+ break;
+ }
+ else
+ {
+ frame_number = 1;
+ break;
+ }
+ }
+ else
+ {
+ frame_number = 0;
+ break;
+ }
}
- return true;
+}
+
+void CvCapture_FFMPEG::seek(double sec)
+{
+ seek((int64_t)(sec * get_fps() + 0.5));
}
bool CvCapture_FFMPEG::setProperty( int property_id, double value )
{
if( !video_st ) return false;
- int framenumber = 0;
switch( property_id )
{
case CV_FFMPEG_CAP_PROP_POS_FRAMES:
case CV_FFMPEG_CAP_PROP_POS_AVI_RATIO:
{
- int64_t timestamp = ic->streams[video_stream]->first_dts;
- AVRational time_base = ic->streams[video_stream]->time_base;
- AVRational frame_base = ic->streams[video_stream]->r_frame_rate;
- double timeScale = (time_base.den / (double)time_base.num) / (frame_base.num / (double)frame_base.den);
switch( property_id )
{
case CV_FFMPEG_CAP_PROP_POS_FRAMES:
- framenumber=(int)value;
- seekKeyAndRunOverFrames(framenumber);
+ seek((int64_t)value);
break;
case CV_FFMPEG_CAP_PROP_POS_MSEC:
- framenumber=(int)(value/(1000.0f * av_q2d (video_st->time_base)));
- seekKeyAndRunOverFrames(framenumber);
+ seek(value/1000.0);
break;
case CV_FFMPEG_CAP_PROP_POS_AVI_RATIO:
- framenumber = (int)(value*ic->duration);
- seekKeyAndRunOverFrames(framenumber);
+ seek((int64_t)(value*ic->duration));
break;
}
- if ( filename )
- {
- // ffmpeg's seek doesn't work...
- if (!slowSeek((int)timestamp))
- {
- fprintf(stderr, "HIGHGUI ERROR: AVI: could not (slow) seek to position %0.3f\n",
- (double)timestamp / AV_TIME_BASE);
- return false;
- }
- }
- else
- {
- int flags = AVSEEK_FLAG_ANY;
- if (timestamp < ic->streams[video_stream]->cur_dts)
- flags |= AVSEEK_FLAG_BACKWARD;
- int ret = av_seek_frame(ic, video_stream, timestamp, flags);
- if (ret < 0)
- {
- fprintf(stderr, "HIGHGUI ERROR: AVI: could not seek to position %0.3f\n",
- (double)timestamp / AV_TIME_BASE);
- return false;
- }
- }
picture_pts=(int64_t)value;
}
break;
void init();
- AVOutputFormat *fmt;
- AVFormatContext *oc;
+ AVOutputFormat * fmt;
+ AVFormatContext * oc;
uint8_t * outbuf;
uint32_t outbuf_size;
FILE * outfile;
AVStream * video_st;
int input_pix_fmt;
Image_FFMPEG temp_image;
-#if defined(HAVE_FFMPEG_SWSCALE)
+ int frame_width, frame_height;
+ bool ok;
struct SwsContext *img_convert_ctx;
-#endif
};
static const char * icvFFMPEGErrStr(int err)
return "Stream not found";
default:
break;
- }
+ }
#else
switch(err) {
case AVERROR_NUMEXPECTED:
- return "Incorrect filename syntax";
+ return "Incorrect filename syntax";
case AVERROR_INVALIDDATA:
- return "Invalid data in header";
+ return "Invalid data in header";
case AVERROR_NOFMT:
- return "Unknown format";
+ return "Unknown format";
case AVERROR_IO:
- return "I/O error occurred";
+ return "I/O error occurred";
case AVERROR_NOMEM:
- return "Memory allocation error";
+ return "Memory allocation error";
default:
- break;
+ break;
}
#endif
- return "Unspecified error";
+ return "Unspecified error";
}
/* function internal to FFMPEG (libavformat/riff.c) to lookup codec id by fourcc tag*/
extern "C" {
- enum CodecID codec_get_bmp_id(unsigned int tag);
+ enum CodecID codec_get_bmp_id(unsigned int tag);
}
void CvVideoWriter_FFMPEG::init()
video_st = 0;
input_pix_fmt = 0;
memset(&temp_image, 0, sizeof(temp_image));
-#if defined(HAVE_FFMPEG_SWSCALE)
img_convert_ctx = 0;
-#endif
+ frame_width = frame_height = 0;
+ ok = false;
}
/**
*/
static AVFrame * icv_alloc_picture_FFMPEG(int pix_fmt, int width, int height, bool alloc)
{
- AVFrame * picture;
- uint8_t * picture_buf;
- int size;
-
- picture = avcodec_alloc_frame();
- if (!picture)
- return NULL;
- size = avpicture_get_size( (PixelFormat) pix_fmt, width, height);
- if(alloc){
- picture_buf = (uint8_t *) malloc(size);
- if (!picture_buf)
- {
- av_free(picture);
- return NULL;
- }
- avpicture_fill((AVPicture *)picture, picture_buf,
+ AVFrame * picture;
+ uint8_t * picture_buf;
+ int size;
+
+ picture = avcodec_alloc_frame();
+ if (!picture)
+ return NULL;
+ size = avpicture_get_size( (PixelFormat) pix_fmt, width, height);
+ if(alloc){
+ picture_buf = (uint8_t *) malloc(size);
+ if (!picture_buf)
+ {
+ av_free(picture);
+ return NULL;
+ }
+ avpicture_fill((AVPicture *)picture, picture_buf,
(PixelFormat) pix_fmt, width, height);
- }
- else {
- }
- return picture;
+ }
+ else {
+ }
+ return picture;
}
/* add a video output stream to the container */
static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
- CodecID codec_id,
- int w, int h, int bitrate,
- double fps, int pixel_format)
+ CodecID codec_id,
+ int w, int h, int bitrate,
+ double fps, int pixel_format)
{
- AVCodecContext *c;
- AVStream *st;
- int frame_rate, frame_rate_base;
- AVCodec *codec;
+ AVCodecContext *c;
+ AVStream *st;
+ int frame_rate, frame_rate_base;
+ AVCodec *codec;
+#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 10, 0)
+ st = avformat_new_stream(oc, 0);
+#else
+ st = av_new_stream(oc, 0);
+#endif
- st = av_new_stream(oc, 0);
- if (!st) {
- CV_WARN("Could not allocate stream");
- return NULL;
- }
+ if (!st) {
+ CV_WARN("Could not allocate stream");
+ return NULL;
+ }
#if LIBAVFORMAT_BUILD > 4628
- c = st->codec;
+ c = st->codec;
#else
- c = &(st->codec);
+ c = &(st->codec);
#endif
#if LIBAVFORMAT_BUILD > 4621
- c->codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO);
+ c->codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO);
#else
- c->codec_id = oc->oformat->video_codec;
+ c->codec_id = oc->oformat->video_codec;
#endif
- if(codec_id != CODEC_ID_NONE){
- c->codec_id = codec_id;
- }
+ if(codec_id != CODEC_ID_NONE){
+ c->codec_id = codec_id;
+ }
//if(codec_tag) c->codec_tag=codec_tag;
- codec = avcodec_find_encoder(c->codec_id);
+ codec = avcodec_find_encoder(c->codec_id);
- c->codec_type = AVMEDIA_TYPE_VIDEO;
+ c->codec_type = AVMEDIA_TYPE_VIDEO;
- /* put sample parameters */
- c->bit_rate = bitrate;
+ /* put sample parameters */
+ int64_t lbit_rate = (int64_t)bitrate;
+ lbit_rate += (bitrate / 2);
+ lbit_rate = std::min(lbit_rate, (int64_t)INT_MAX);
+ c->bit_rate = lbit_rate;
- /* resolution must be a multiple of two */
- c->width = w;
- c->height = h;
+ // took advice from
+ // http://ffmpeg-users.933282.n4.nabble.com/warning-clipping-1-dct-coefficients-to-127-127-td934297.html
+ c->qmin = 3;
- /* time base: this is the fundamental unit of time (in seconds) in terms
- of which frame timestamps are represented. for fixed-fps content,
+ /* resolution must be a multiple of two */
+ c->width = w;
+ c->height = h;
+
+ /* time base: this is the fundamental unit of time (in seconds) in terms
+ of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
- frame_rate = static_cast<int>(fps+0.5);
- frame_rate_base = 1;
- while (fabs(static_cast<double>(frame_rate)/frame_rate_base) - fps > 0.001){
- frame_rate_base *= 10;
- frame_rate = static_cast<int>(fps*frame_rate_base + 0.5);
- }
+ frame_rate=(int)(fps+0.5);
+ frame_rate_base=1;
+ while (fabs((double)frame_rate/frame_rate_base) - fps > 0.001){
+ frame_rate_base*=10;
+ frame_rate=(int)(fps*frame_rate_base + 0.5);
+ }
#if LIBAVFORMAT_BUILD > 4752
c->time_base.den = frame_rate;
c->time_base.num = frame_rate_base;
- /* adjust time base for supported framerates */
- if(codec && codec->supported_framerates){
- const AVRational *p= codec->supported_framerates;
+ /* adjust time base for supported framerates */
+ if(codec && codec->supported_framerates){
+ const AVRational *p= codec->supported_framerates;
AVRational req = {frame_rate, frame_rate_base};
- const AVRational *best=NULL;
- AVRational best_error= {INT_MAX, 1};
- for(; p->den!=0; p++){
- AVRational error= av_sub_q(req, *p);
- if(error.num <0) error.num *= -1;
- if(av_cmp_q(error, best_error) < 0){
- best_error= error;
- best= p;
- }
- }
- c->time_base.den= best->num;
- c->time_base.num= best->den;
- }
+ const AVRational *best=NULL;
+ AVRational best_error= {INT_MAX, 1};
+ for(; p->den!=0; p++){
+ AVRational error= av_sub_q(req, *p);
+ if(error.num <0) error.num *= -1;
+ if(av_cmp_q(error, best_error) < 0){
+ best_error= error;
+ best= p;
+ }
+ }
+ c->time_base.den= best->num;
+ c->time_base.num= best->den;
+ }
#else
- c->frame_rate = frame_rate;
- c->frame_rate_base = frame_rate_base;
+ c->frame_rate = frame_rate;
+ c->frame_rate_base = frame_rate_base;
#endif
- c->gop_size = 12; /* emit one intra frame every twelve frames at most */
- c->pix_fmt = (PixelFormat) pixel_format;
+ c->gop_size = 12; /* emit one intra frame every twelve frames at most */
+ c->pix_fmt = (PixelFormat) pixel_format;
- if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
+ if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
c->max_b_frames = 2;
}
if (c->codec_id == CODEC_ID_MPEG1VIDEO || c->codec_id == CODEC_ID_MSMPEG4V3){
/* needed to avoid using macroblocks in which some coeffs overflow
this doesnt happen with normal video, it just happens here as the
motion of the chroma plane doesnt match the luma plane */
- /* avoid FFMPEG warning 'clipping 1 dct coefficients...' */
+ /* avoid FFMPEG warning 'clipping 1 dct coefficients...' */
c->mb_decision=2;
}
#if LIBAVCODEC_VERSION_INT>0x000409
return st;
}
+static const int OPENCV_NO_FRAMES_WRITTEN_CODE = 1000;
+
int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
{
-
#if LIBAVFORMAT_BUILD > 4628
- AVCodecContext * c = video_st->codec;
+ AVCodecContext * c = video_st->codec;
#else
- AVCodecContext * c = &(video_st->codec);
+ AVCodecContext * c = &(video_st->codec);
#endif
- int out_size;
- int ret;
+ int out_size;
+ int ret = 0;
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
#ifndef PKT_FLAG_KEY
#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
#endif
-
- pkt.flags |= PKT_FLAG_KEY;
+
+ pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= (uint8_t *)picture;
pkt.size= sizeof(AVPicture);
#if LIBAVFORMAT_BUILD > 4752
if(c->coded_frame->pts != (int64_t)AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
+ pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
#else
- pkt.pts = c->coded_frame->pts;
+ pkt.pts = c->coded_frame->pts;
#endif
if(c->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
/* write the compressed frame in the media file */
ret = av_write_frame(oc, &pkt);
} else {
- ret = 0;
+ ret = OPENCV_NO_FRAMES_WRITTEN_CODE;
}
}
- if (ret != 0) return -1;
-
- return 0;
+ return ret;
}
/// write a frame with FFMPEG
bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int width, int height, int cn, int origin )
{
- bool ret = false;
+ bool ret = false;
+
+ if( (width & -2) != frame_width || (height & -2) != frame_height || !data )
+ return false;
+ width = frame_width;
+ height = frame_height;
- // typecast from opaque data type to implemented struct
+ // typecast from opaque data type to implemented struct
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext *c = video_st->codec;
#else
return false;
}
}
- else if (input_pix_fmt == PIX_FMT_GRAY8) {
+ else if (input_pix_fmt == PIX_FMT_GRAY8) {
if (cn != 1) {
return false;
}
}
- else {
+ else {
assert(false);
}
- // check if buffer sizes match, i.e. image has expected format (size, channels, bitdepth, alignment)
- /*#if LIBAVCODEC_VERSION_INT >= ((52<<16)+(37<<8)+0)
- assert (image->imageSize == avpicture_get_size( (PixelFormat)input_pix_fmt, image->width, image->height ));
-#else
- assert (image->imageSize == avpicture_get_size( input_pix_fmt, image->width, image->height ));
-#endif*/
-
- if ( c->pix_fmt != input_pix_fmt ) {
- assert( input_picture );
- // let input_picture point to the raw data buffer of 'image'
- avpicture_fill((AVPicture *)input_picture, (uint8_t *) data,
+ if ( c->pix_fmt != input_pix_fmt ) {
+ assert( input_picture );
+ // let input_picture point to the raw data buffer of 'image'
+ avpicture_fill((AVPicture *)input_picture, (uint8_t *) data,
(PixelFormat)input_pix_fmt, width, height);
-#if !defined(HAVE_FFMPEG_SWSCALE)
- // convert to the color format needed by the codec
- if( img_convert((AVPicture *)picture, c->pix_fmt,
- (AVPicture *)input_picture, (PixelFormat)input_pix_fmt,
- width, height) < 0){
- return false;
- }
-#else
- img_convert_ctx = sws_getContext(width,
- height,
- (PixelFormat)input_pix_fmt,
- c->width,
- c->height,
- c->pix_fmt,
- SWS_BICUBIC,
- NULL, NULL, NULL);
+ if( !img_convert_ctx )
+ {
+ img_convert_ctx = sws_getContext(width,
+ height,
+ (PixelFormat)input_pix_fmt,
+ c->width,
+ c->height,
+ c->pix_fmt,
+ SWS_BICUBIC,
+ NULL, NULL, NULL);
+ if( !img_convert_ctx )
+ return false;
+ }
if ( sws_scale(img_convert_ctx, input_picture->data,
input_picture->linesize, 0,
height,
picture->data, picture->linesize) < 0 )
- {
return false;
- }
- sws_freeContext(img_convert_ctx);
-#endif
- }
- else{
- avpicture_fill((AVPicture *)picture, (uint8_t *) data,
+ }
+ else{
+ avpicture_fill((AVPicture *)picture, (uint8_t *) data,
(PixelFormat)input_pix_fmt, width, height);
- }
+ }
- ret = icv_av_write_frame_FFMPEG( oc, video_st, outbuf, outbuf_size, picture) >= 0;
+ ret = icv_av_write_frame_FFMPEG( oc, video_st, outbuf, outbuf_size, picture) >= 0;
- return ret;
+ return ret;
}
/// close video output stream and free associated memory
void CvVideoWriter_FFMPEG::close()
{
- unsigned i;
-
- // nothing to do if already released
- if ( !picture )
- return;
+ unsigned i;
- /* no more frame to compress. The codec has a latency of a few
- frames if using B frames, so we get the last frames by
- passing the same picture again */
- // TODO -- do we need to account for latency here?
+ // nothing to do if already released
+ if ( !picture )
+ return;
+
+ /* no more frame to compress. The codec has a latency of a few
+ frames if using B frames, so we get the last frames by
+ passing the same picture again */
+ // TODO -- do we need to account for latency here?
- /* write the trailer, if any */
- av_write_trailer(oc);
+ /* write the trailer, if any */
+ if(ok && oc)
+ {
+ if( (oc->oformat->flags & AVFMT_RAWPICTURE) == 0 )
+ {
+ for(;;)
+ {
+ int ret = icv_av_write_frame_FFMPEG( oc, video_st, outbuf, outbuf_size, NULL);
+ if( ret == OPENCV_NO_FRAMES_WRITTEN_CODE || ret < 0 )
+ break;
+ }
+ }
+ av_write_trailer(oc);
+ }
+
+ if( img_convert_ctx )
+ {
+ sws_freeContext(img_convert_ctx);
+ img_convert_ctx = 0;
+ }
- // free pictures
+ // free pictures
#if LIBAVFORMAT_BUILD > 4628
- if( video_st->codec->pix_fmt != input_pix_fmt){
+ if( video_st->codec->pix_fmt != input_pix_fmt)
#else
- if( video_st->codec.pix_fmt != input_pix_fmt){
+ if( video_st->codec.pix_fmt != input_pix_fmt)
#endif
- if(picture->data[0])
- free(picture->data[0]);
- picture->data[0] = 0;
- }
- av_free(picture);
+ {
+ if(picture->data[0])
+ free(picture->data[0]);
+ picture->data[0] = 0;
+ }
+ av_free(picture);
- if (input_picture) {
- av_free(input_picture);
- }
+ if (input_picture)
+ av_free(input_picture);
- /* close codec */
+ /* close codec */
#if LIBAVFORMAT_BUILD > 4628
- avcodec_close(video_st->codec);
+ avcodec_close(video_st->codec);
#else
- avcodec_close(&(video_st->codec));
+ avcodec_close(&(video_st->codec));
#endif
- av_free(outbuf);
+ av_free(outbuf);
- /* free the streams */
- for(i = 0; i < oc->nb_streams; i++) {
- av_freep(&oc->streams[i]->codec);
- av_freep(&oc->streams[i]);
- }
-
- if (!(fmt->flags & AVFMT_NOFILE)) {
- /* close the output file */
+ /* free the streams */
+ for(i = 0; i < oc->nb_streams; i++)
+ {
+ av_freep(&oc->streams[i]->codec);
+ av_freep(&oc->streams[i]);
+ }
+ if (!(fmt->flags & AVFMT_NOFILE))
+ {
+ /* close the output file */
+#if LIBAVCODEC_VERSION_INT < ((52<<16)+(123<<8)+0)
#if LIBAVCODEC_VERSION_INT >= ((51<<16)+(49<<8)+0)
- url_fclose(oc->pb);
+ url_fclose(oc->pb);
#else
- url_fclose(&oc->pb);
+ url_fclose(&oc->pb);
+#endif
+#else
+ avio_close(oc->pb);
#endif
- }
-
- /* free the stream */
- av_free(oc);
+ }
- if( temp_image.data )
- {
- free(temp_image.data);
- temp_image.data = 0;
- }
+ /* free the stream */
+ av_free(oc);
- init();
+ if( temp_image.data )
+ {
+ free(temp_image.data);
+ temp_image.data = 0;
}
- /// Create a video writer object that uses FFMPEG
- bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
- double fps, int width, int height, bool is_color )
- {
- CodecID codec_id = CODEC_ID_NONE;
- int err, codec_pix_fmt, bitrate_scale = 64;
+ init();
+}
- close();
+/// Create a video writer object that uses FFMPEG
+bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
+ double fps, int width, int height, bool is_color )
+{
+ icvInitFFMPEG_internal();
+
+ CodecID codec_id = CODEC_ID_NONE;
+ int err, codec_pix_fmt;
+ double bitrate_scale = 1;
- // check arguments
- assert(filename);
- assert(fps > 0);
- assert(width > 0 && height > 0);
+ close();
- // tell FFMPEG to register codecs
- av_register_all();
+ // check arguments
+ if( !filename )
+ return false;
+ if(fps <= 0)
+ return false;
+
+ // we allow frames of odd width or height, but in this case we truncate
+ // the rightmost column/the bottom row. Probably, this should be handled more elegantly,
+ // but some internal functions inside FFMPEG swscale require even width/height.
+ width &= -2;
+ height &= -2;
+ if( width <= 0 || height <= 0 )
+ return false;
- /* auto detect the output format from the name and fourcc code. */
+ /* auto detect the output format from the name and fourcc code. */
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
- fmt = av_guess_format(NULL, filename, NULL);
+ fmt = av_guess_format(NULL, filename, NULL);
#else
- fmt = guess_format(NULL, filename, NULL);
+ fmt = guess_format(NULL, filename, NULL);
#endif
- if (!fmt)
- return false;
+ if (!fmt)
+ return false;
- /* determine optimal pixel format */
- if (is_color) {
- input_pix_fmt = PIX_FMT_BGR24;
- }
- else {
- input_pix_fmt = PIX_FMT_GRAY8;
- }
+ /* determine optimal pixel format */
+ if (is_color) {
+ input_pix_fmt = PIX_FMT_BGR24;
+ }
+ else {
+ input_pix_fmt = PIX_FMT_GRAY8;
+ }
- /* Lookup codec_id for given fourcc */
+ /* Lookup codec_id for given fourcc */
#if LIBAVCODEC_VERSION_INT<((51<<16)+(49<<8)+0)
- if( (codec_id = codec_get_bmp_id( fourcc )) == CODEC_ID_NONE )
- return false;
+ if( (codec_id = codec_get_bmp_id( fourcc )) == CODEC_ID_NONE )
+ return false;
#else
- const struct AVCodecTag * tags[] = { codec_bmp_tags, NULL};
- if( (codec_id = av_codec_get_id(tags, fourcc)) == CODEC_ID_NONE )
- return false;
+ const struct AVCodecTag * tags[] = { codec_bmp_tags, NULL};
+ if( (codec_id = av_codec_get_id(tags, fourcc)) == CODEC_ID_NONE )
+ return false;
#endif
- // alloc memory for context
+ // alloc memory for context
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
- oc = avformat_alloc_context();
+ oc = avformat_alloc_context();
#else
- oc = av_alloc_format_context();
+ oc = av_alloc_format_context();
#endif
- assert (oc);
+ assert (oc);
- /* set file name */
- oc->oformat = fmt;
- snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
+ /* set file name */
+ oc->oformat = fmt;
+ snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
- /* set some options */
- oc->max_delay = (int)(0.7*AV_TIME_BASE); /* This reduces buffer underrun warnings with MPEG */
+ /* set some options */
+ oc->max_delay = (int)(0.7*AV_TIME_BASE); /* This reduces buffer underrun warnings with MPEG */
- // set a few optimal pixel formats for lossless codecs of interest..
- switch (codec_id) {
+ // set a few optimal pixel formats for lossless codecs of interest..
+ switch (codec_id) {
#if LIBAVCODEC_VERSION_INT>((50<<16)+(1<<8)+0)
- case CODEC_ID_JPEGLS:
- // BGR24 or GRAY8 depending on is_color...
- codec_pix_fmt = input_pix_fmt;
- break;
+ case CODEC_ID_JPEGLS:
+ // BGR24 or GRAY8 depending on is_color...
+ codec_pix_fmt = input_pix_fmt;
+ break;
#endif
- case CODEC_ID_HUFFYUV:
- codec_pix_fmt = PIX_FMT_YUV422P;
- break;
- case CODEC_ID_MJPEG:
- case CODEC_ID_LJPEG:
- codec_pix_fmt = PIX_FMT_YUVJ420P;
- bitrate_scale = 128;
- break;
- case CODEC_ID_RAWVIDEO:
- codec_pix_fmt = input_pix_fmt == PIX_FMT_GRAY8 ||
- input_pix_fmt == PIX_FMT_GRAY16LE ||
- input_pix_fmt == PIX_FMT_GRAY16BE ? input_pix_fmt : PIX_FMT_YUV420P;
- break;
- default:
- // good for lossy formats, MPEG, etc.
- codec_pix_fmt = PIX_FMT_YUV420P;
- break;
- }
-
- // TODO -- safe to ignore output audio stream?
- video_st = icv_add_video_stream_FFMPEG(oc, codec_id,
- width, height, width*height*bitrate_scale,
- fps, codec_pix_fmt);
-
-
- /* set the output parameters (must be done even if no
- parameters). */
- if (av_set_parameters(oc, NULL) < 0) {
- return false;
- }
-
- dump_format(oc, 0, filename, 1);
+ case CODEC_ID_HUFFYUV:
+ codec_pix_fmt = PIX_FMT_YUV422P;
+ break;
+ case CODEC_ID_MJPEG:
+ case CODEC_ID_LJPEG:
+ codec_pix_fmt = PIX_FMT_YUVJ420P;
+ bitrate_scale = 3;
+ break;
+ case CODEC_ID_RAWVIDEO:
+ codec_pix_fmt = input_pix_fmt == PIX_FMT_GRAY8 ||
+ input_pix_fmt == PIX_FMT_GRAY16LE ||
+ input_pix_fmt == PIX_FMT_GRAY16BE ? input_pix_fmt : PIX_FMT_YUV420P;
+ break;
+ default:
+ // good for lossy formats, MPEG, etc.
+ codec_pix_fmt = PIX_FMT_YUV420P;
+ break;
+ }
+
+ double bitrate = MIN(bitrate_scale*fps*width*height, (double)INT_MAX/2);
- /* now that all the parameters are set, we can open the audio and
- video codecs and allocate the necessary encode buffers */
- if (!video_st){
- return false;
- }
+ // TODO -- safe to ignore output audio stream?
+ video_st = icv_add_video_stream_FFMPEG(oc, codec_id,
+ width, height, (int)(bitrate + 0.5),
+ fps, codec_pix_fmt);
- AVCodec *codec;
- AVCodecContext *c;
+ /* set the output parameters (must be done even if no
+ parameters). */
+#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
+ if (av_set_parameters(oc, NULL) < 0) {
+ return false;
+ }
+#endif
-#if LIBAVFORMAT_BUILD > 4628
- c = (video_st->codec);
+#if 0
+#if FF_API_DUMP_FORMAT
+ dump_format(oc, 0, filename, 1);
#else
- c = &(video_st->codec);
+ av_dump_format(oc, 0, filename, 1);
+#endif
#endif
- c->codec_tag = fourcc;
- /* find the video encoder */
- codec = avcodec_find_encoder(c->codec_id);
- if (!codec) {
- fprintf(stderr, "Could not find encoder for codec id %d: %s", c->codec_id, icvFFMPEGErrStr(
- #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
- AVERROR_ENCODER_NOT_FOUND
- #else
- -1
- #endif
- ));
- return false;
- }
-
- c->bit_rate_tolerance = c->bit_rate;
-
- /* open the codec */
- if ( (err=avcodec_open(c, codec)) < 0 ) {
- fprintf(stderr, "Could not open codec '%s': %s", codec->name, icvFFMPEGErrStr(err));
- return false;
- }
-
- outbuf = NULL;
-
- if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
- /* allocate output buffer */
- /* assume we will never get codec output with more than 4 bytes per pixel... */
- outbuf_size = width*height*4;
- outbuf = (uint8_t *) av_malloc(outbuf_size);
- }
-
- bool need_color_convert;
- need_color_convert = (c->pix_fmt != input_pix_fmt);
-
- /* allocate the encoded raw picture */
- picture = icv_alloc_picture_FFMPEG(c->pix_fmt, c->width, c->height, need_color_convert);
- if (!picture) {
- return false;
- }
-
- /* if the output format is not our input format, then a temporary
- picture of the input format is needed too. It is then converted
- to the required output format */
- input_picture = NULL;
- if ( need_color_convert ) {
- input_picture = icv_alloc_picture_FFMPEG(input_pix_fmt, c->width, c->height, false);
- if (!input_picture) {
- return false;
- }
- }
-
- /* open the output file, if needed */
- if (!(fmt->flags & AVFMT_NOFILE)) {
- if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
- return false;
- }
- }
-
- /* write the stream header, if any */
- av_write_header( oc );
-
- return true;
+ /* now that all the parameters are set, we can open the audio and
+ video codecs and allocate the necessary encode buffers */
+ if (!video_st){
+ return false;
}
+ AVCodec *codec;
+ AVCodecContext *c;
+#if LIBAVFORMAT_BUILD > 4628
+ c = (video_st->codec);
+#else
+ c = &(video_st->codec);
+#endif
- CvCapture_FFMPEG* cvCreateFileCapture_FFMPEG( const char* filename )
- {
- CvCapture_FFMPEG* capture = (CvCapture_FFMPEG*)malloc(sizeof(*capture));
- capture->init();
- if( capture->open( filename ))
- return capture;
- capture->close();
- free(capture);
- return 0;
+ c->codec_tag = fourcc;
+ /* find the video encoder */
+ codec = avcodec_find_encoder(c->codec_id);
+ if (!codec) {
+ fprintf(stderr, "Could not find encoder for codec id %d: %s", c->codec_id, icvFFMPEGErrStr(
+ #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
+ AVERROR_ENCODER_NOT_FOUND
+ #else
+ -1
+ #endif
+ ));
+ return false;
}
+ int64_t lbit_rate = (int64_t)c->bit_rate;
+ lbit_rate += (bitrate / 2);
+ lbit_rate = std::min(lbit_rate, (int64_t)INT_MAX);
+ c->bit_rate_tolerance = (int)lbit_rate;
+ c->bit_rate = (int)lbit_rate;
- void cvReleaseCapture_FFMPEG(CvCapture_FFMPEG** capture)
- {
- if( capture && *capture )
- {
- (*capture)->close();
- free(*capture);
- *capture = 0;
- }
- }
-
- int cvSetCaptureProperty_FFMPEG(CvCapture_FFMPEG* capture, int prop_id, double value)
- {
- return capture->setProperty(prop_id, value);
- }
-
- double cvGetCaptureProperty_FFMPEG(CvCapture_FFMPEG* capture, int prop_id)
- {
- return capture->getProperty(prop_id);
+ /* open the codec */
+ if ((err=
+#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
+ avcodec_open2(c, codec, NULL)
+#else
+ avcodec_open(c, codec)
+#endif
+ ) < 0) {
+ fprintf(stderr, "Could not open codec '%s': %s", codec->name, icvFFMPEGErrStr(err));
+ return false;
}
- int cvGrabFrame_FFMPEG(CvCapture_FFMPEG* capture)
- {
- return capture->grabFrame();
- }
+ outbuf = NULL;
- int cvRetrieveFrame_FFMPEG(CvCapture_FFMPEG* capture, unsigned char** data, int* step, int* width, int* height, int* cn)
- {
- return capture->retrieveFrame(0, data, step, width, height, cn);
+ if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
+ /* allocate output buffer */
+ /* assume we will never get codec output with more than 4 bytes per pixel... */
+ outbuf_size = width*height*4;
+ outbuf = (uint8_t *) av_malloc(outbuf_size);
}
+ bool need_color_convert;
+ need_color_convert = (c->pix_fmt != input_pix_fmt);
-
- CvVideoWriter_FFMPEG* cvCreateVideoWriter_FFMPEG( const char* filename, int fourcc, double fps,
- int width, int height, int isColor )
- {
- CvVideoWriter_FFMPEG* writer = (CvVideoWriter_FFMPEG*)malloc(sizeof(*writer));
- writer->init();
- if( writer->open( filename, fourcc, fps, width, height, isColor != 0 ))
- return writer;
- writer->close();
- free(writer);
- return 0;
+ /* allocate the encoded raw picture */
+ picture = icv_alloc_picture_FFMPEG(c->pix_fmt, c->width, c->height, need_color_convert);
+ if (!picture) {
+ return false;
}
-
- void cvReleaseVideoWriter_FFMPEG( CvVideoWriter_FFMPEG** writer )
- {
- if( writer && *writer )
- {
- (*writer)->close();
- free(*writer);
- *writer = 0;
+ /* if the output format is not our input format, then a temporary
+ picture of the input format is needed too. It is then converted
+ to the required output format */
+ input_picture = NULL;
+ if ( need_color_convert ) {
+ input_picture = icv_alloc_picture_FFMPEG(input_pix_fmt, c->width, c->height, false);
+ if (!input_picture) {
+ return false;
}
}
-
- int cvWriteFrame_FFMPEG( CvVideoWriter_FFMPEG* writer,
- const unsigned char* data, int step,
- int width, int height, int cn, int origin)
- {
- return writer->writeFrame(data, step, width, height, cn, origin);
+ /* open the output file, if needed */
+ if (!(fmt->flags & AVFMT_NOFILE)) {
+#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
+ if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0)
+#else
+ if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0)
+#endif
+ {
+ return false;
+ }
}
-
-/*
- * For CUDA encoder
- */
-
-struct OutputMediaStream_FFMPEG
-{
- bool open(const char* fileName, int width, int height, double fps);
- void close();
+#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)
+ /* write the stream header, if any */
+ err=avformat_write_header(oc, NULL);
+#else
+ err=av_write_header( oc );
+#endif
- void write(unsigned char* data, int size, int keyFrame);
-
- // add a video output stream to the container
- static AVStream* addVideoStream(AVFormatContext *oc, CodecID codec_id, int w, int h, int bitrate, double fps, PixelFormat pixel_format);
-
- AVOutputFormat* fmt_;
- AVFormatContext* oc_;
- AVStream* video_st_;
-};
-
-void OutputMediaStream_FFMPEG::close()
-{
- // no more frame to compress. The codec has a latency of a few
- // frames if using B frames, so we get the last frames by
- // passing the same picture again
-
- // TODO -- do we need to account for latency here?
-
- if (oc_)
+ if(err < 0)
{
- // write the trailer, if any
- av_write_trailer(oc_);
-
- // free the streams
- for (unsigned int i = 0; i < oc_->nb_streams; ++i)
- {
- av_freep(&oc_->streams[i]->codec);
- av_freep(&oc_->streams[i]);
- }
-
- if (!(fmt_->flags & AVFMT_NOFILE) && oc_->pb)
- {
- // close the output file
-
- #if LIBAVCODEC_VERSION_INT < ((52<<16)+(123<<8)+0)
- #if LIBAVCODEC_VERSION_INT >= ((51<<16)+(49<<8)+0)
- url_fclose(oc_->pb);
- #else
- url_fclose(&oc_->pb);
- #endif
- #else
- avio_close(oc_->pb);
- #endif
- }
-
- // free the stream
- av_free(oc_);
+ close();
+ remove(filename);
+ return false;
}
+ frame_width = width;
+ frame_height = height;
+ ok = true;
+ return true;
}
-AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CodecID codec_id, int w, int h, int bitrate, double fps, PixelFormat pixel_format)
-{
- #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 10, 0)
- AVStream* st = avformat_new_stream(oc, 0);
- #else
- AVStream* st = av_new_stream(oc, 0);
- #endif
- if (!st)
- return 0;
-
- #if LIBAVFORMAT_BUILD > 4628
- AVCodecContext* c = st->codec;
- #else
- AVCodecContext* c = &(st->codec);
- #endif
-
- c->codec_id = codec_id;
- c->codec_type = AVMEDIA_TYPE_VIDEO;
-
- // put sample parameters
- unsigned long long lbit_rate = static_cast<unsigned long long>(bitrate);
- lbit_rate += (bitrate / 4);
- lbit_rate = std::min(lbit_rate, static_cast<unsigned long long>(std::numeric_limits<int>::max()));
- c->bit_rate = bitrate;
-
- // took advice from
- // http://ffmpeg-users.933282.n4.nabble.com/warning-clipping-1-dct-coefficients-to-127-127-td934297.html
- c->qmin = 3;
-
- // resolution must be a multiple of two
- c->width = w;
- c->height = h;
-
- AVCodec* codec = avcodec_find_encoder(c->codec_id);
-
- // time base: this is the fundamental unit of time (in seconds) in terms
- // of which frame timestamps are represented. for fixed-fps content,
- // timebase should be 1/framerate and timestamp increments should be
- // identically 1
-
- int frame_rate = static_cast<int>(fps+0.5);
- int frame_rate_base = 1;
- while (fabs(static_cast<double>(frame_rate)/frame_rate_base) - fps > 0.001)
- {
- frame_rate_base *= 10;
- frame_rate = static_cast<int>(fps*frame_rate_base + 0.5);
- }
- c->time_base.den = frame_rate;
- c->time_base.num = frame_rate_base;
-
- #if LIBAVFORMAT_BUILD > 4752
- // adjust time base for supported framerates
- if (codec && codec->supported_framerates)
- {
- AVRational req = {frame_rate, frame_rate_base};
- const AVRational* best = NULL;
- AVRational best_error = {INT_MAX, 1};
-
- for (const AVRational* p = codec->supported_framerates; p->den!=0; ++p)
- {
- AVRational error = av_sub_q(req, *p);
- if (error.num < 0)
- error.num *= -1;
- if (av_cmp_q(error, best_error) < 0)
- {
- best_error= error;
- best= p;
- }
- }
-
- c->time_base.den= best->num;
- c->time_base.num= best->den;
- }
- #endif
-
- c->gop_size = 12; // emit one intra frame every twelve frames at most
- c->pix_fmt = pixel_format;
-
- if (c->codec_id == CODEC_ID_MPEG2VIDEO)
- c->max_b_frames = 2;
-
- if (c->codec_id == CODEC_ID_MPEG1VIDEO || c->codec_id == CODEC_ID_MSMPEG4V3)
- {
- // needed to avoid using macroblocks in which some coeffs overflow
- // this doesnt happen with normal video, it just happens here as the
- // motion of the chroma plane doesnt match the luma plane
-
- // avoid FFMPEG warning 'clipping 1 dct coefficients...'
-
- c->mb_decision = 2;
- }
-
- #if LIBAVCODEC_VERSION_INT > 0x000409
- // some formats want stream headers to be seperate
- if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- {
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
- }
- #endif
-
- return st;
-}
-
-bool OutputMediaStream_FFMPEG::open(const char* fileName, int width, int height, double fps)
+CvCapture_FFMPEG* cvCreateFileCapture_FFMPEG( const char* filename )
{
- fmt_ = 0;
- oc_ = 0;
- video_st_ = 0;
-
- // tell FFMPEG to register codecs
- av_register_all();
-
- av_log_set_level(AV_LOG_ERROR);
-
- // auto detect the output format from the name and fourcc code
- #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
- fmt_ = av_guess_format(NULL, fileName, NULL);
- #else
- fmt_ = guess_format(NULL, fileName, NULL);
- #endif
- if (!fmt_)
- return false;
-
- CodecID codec_id = CODEC_ID_H264;
-
- // alloc memory for context
- #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
- oc_ = avformat_alloc_context();
- #else
- oc_ = av_alloc_format_context();
- #endif
- if (!oc_)
- return false;
-
- // set some options
- oc_->oformat = fmt_;
- snprintf(oc_->filename, sizeof(oc_->filename), "%s", fileName);
-
- oc_->max_delay = (int)(0.7 * AV_TIME_BASE); // This reduces buffer underrun warnings with MPEG
-
- // set a few optimal pixel formats for lossless codecs of interest..
- PixelFormat codec_pix_fmt = PIX_FMT_YUV420P;
- int bitrate_scale = 64;
-
- // TODO -- safe to ignore output audio stream?
- video_st_ = addVideoStream(oc_, codec_id, width, height, width * height * bitrate_scale, fps, codec_pix_fmt);
- if (!video_st_)
- return false;
-
- // set the output parameters (must be done even if no parameters)
- #if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
- if (av_set_parameters(oc_, NULL) < 0)
- return false;
- #endif
-
- // now that all the parameters are set, we can open the audio and
- // video codecs and allocate the necessary encode buffers
-
- #if LIBAVFORMAT_BUILD > 4628
- AVCodecContext* c = (video_st_->codec);
- #else
- AVCodecContext* c = &(video_st_->codec);
- #endif
-
- c->codec_tag = MKTAG('H', '2', '6', '4');
- c->bit_rate_tolerance = c->bit_rate;
-
- // open the output file, if needed
- if (!(fmt_->flags & AVFMT_NOFILE))
- {
- #if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
- int err = url_fopen(&oc_->pb, fileName, URL_WRONLY);
- #else
- int err = avio_open(&oc_->pb, fileName, AVIO_FLAG_WRITE);
- #endif
-
- if (err != 0)
- return false;
- }
-
- // write the stream header, if any
- #if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
- av_write_header(oc_);
- #else
- avformat_write_header(oc_, NULL);
- #endif
-
- return true;
+ CvCapture_FFMPEG* capture = (CvCapture_FFMPEG*)malloc(sizeof(*capture));
+ capture->init();
+ if( capture->open( filename ))
+ return capture;
+ capture->close();
+ free(capture);
+ return 0;
}
-void OutputMediaStream_FFMPEG::write(unsigned char* data, int size, int keyFrame)
+
+void cvReleaseCapture_FFMPEG(CvCapture_FFMPEG** capture)
{
- // if zero size, it means the image was buffered
- if (size > 0)
+ if( capture && *capture )
{
- AVPacket pkt;
- av_init_packet(&pkt);
-
- if (keyFrame)
- pkt.flags |= PKT_FLAG_KEY;
-
- pkt.stream_index = video_st_->index;
- pkt.data = data;
- pkt.size = size;
-
- // write the compressed frame in the media file
- av_write_frame(oc_, &pkt);
+ (*capture)->close();
+ free(*capture);
+ *capture = 0;
}
}
-struct OutputMediaStream_FFMPEG* create_OutputMediaStream_FFMPEG(const char* fileName, int width, int height, double fps)
+int cvSetCaptureProperty_FFMPEG(CvCapture_FFMPEG* capture, int prop_id, double value)
{
- OutputMediaStream_FFMPEG* stream = (OutputMediaStream_FFMPEG*) malloc(sizeof(OutputMediaStream_FFMPEG));
-
- if (stream->open(fileName, width, height, fps))
- return stream;
-
- stream->close();
- free(stream);
-
- return 0;
+ return capture->setProperty(prop_id, value);
}
-void release_OutputMediaStream_FFMPEG(struct OutputMediaStream_FFMPEG* stream)
+double cvGetCaptureProperty_FFMPEG(CvCapture_FFMPEG* capture, int prop_id)
{
- stream->close();
- free(stream);
+ return capture->getProperty(prop_id);
}
-void write_OutputMediaStream_FFMPEG(struct OutputMediaStream_FFMPEG* stream, unsigned char* data, int size, int keyFrame)
+int cvGrabFrame_FFMPEG(CvCapture_FFMPEG* capture)
{
- stream->write(data, size, keyFrame);
+ return capture->grabFrame();
}
-/*
- * For CUDA decoder
- */
-
-enum
+int cvRetrieveFrame_FFMPEG(CvCapture_FFMPEG* capture, unsigned char** data, int* step, int* width, int* height, int* cn)
{
- VideoCodec_MPEG1 = 0,
- VideoCodec_MPEG2,
- VideoCodec_MPEG4,
- VideoCodec_VC1,
- VideoCodec_H264,
- VideoCodec_JPEG,
- VideoCodec_H264_SVC,
- VideoCodec_H264_MVC,
-
- // Uncompressed YUV
- VideoCodec_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), // Y,U,V (4:2:0)
- VideoCodec_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,V,U (4:2:0)
- VideoCodec_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,UV (4:2:0)
- VideoCodec_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), // YUYV/YUY2 (4:2:2)
- VideoCodec_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')), // UYVY (4:2:2)
-};
-
-enum
-{
- VideoChromaFormat_Monochrome = 0,
- VideoChromaFormat_YUV420,
- VideoChromaFormat_YUV422,
- VideoChromaFormat_YUV444,
-};
-
-struct InputMediaStream_FFMPEG
-{
-public:
- bool open(const char* fileName, int* codec, int* chroma_format, int* width, int* height);
- void close();
-
- bool read(unsigned char** data, int* size, int* endOfFile);
-
-private:
- InputMediaStream_FFMPEG(const InputMediaStream_FFMPEG&);
- InputMediaStream_FFMPEG& operator =(const InputMediaStream_FFMPEG&);
-
- AVFormatContext* ctx_;
- int video_stream_id_;
- AVPacket pkt_;
-};
-
-bool InputMediaStream_FFMPEG::open(const char* fileName, int* codec, int* chroma_format, int* width, int* height)
-{
- int err;
-
- ctx_ = 0;
- video_stream_id_ = -1;
- memset(&pkt_, 0, sizeof(AVPacket));
-
- #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 13, 0)
- avformat_network_init();
- #endif
-
- // register all codecs, demux and protocols
- av_register_all();
-
- av_log_set_level(AV_LOG_ERROR);
-
- #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 6, 0)
- err = avformat_open_input(&ctx_, fileName, 0, 0);
- #else
- err = av_open_input_file(&ctx_, fileName, 0, 0, 0);
- #endif
- if (err < 0)
- return false;
-
- #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 3, 0)
- err = avformat_find_stream_info(ctx_, 0);
- #else
- err = av_find_stream_info(ctx_);
- #endif
- if (err < 0)
- return false;
-
- for (unsigned int i = 0; i < ctx_->nb_streams; ++i)
- {
- #if LIBAVFORMAT_BUILD > 4628
- AVCodecContext *enc = ctx_->streams[i]->codec;
- #else
- AVCodecContext *enc = &ctx_->streams[i]->codec;
- #endif
-
- if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
- {
- video_stream_id_ = static_cast<int>(i);
-
- switch (enc->codec_id)
- {
- case CODEC_ID_MPEG1VIDEO:
- *codec = ::VideoCodec_MPEG1;
- break;
-
- case CODEC_ID_MPEG2VIDEO:
- *codec = ::VideoCodec_MPEG2;
- break;
-
- case CODEC_ID_MPEG4:
- *codec = ::VideoCodec_MPEG4;
- break;
-
- case CODEC_ID_VC1:
- *codec = ::VideoCodec_VC1;
- break;
-
- case CODEC_ID_H264:
- *codec = ::VideoCodec_H264;
- break;
-
- default:
- return false;
- };
-
- switch (enc->pix_fmt)
- {
- case PIX_FMT_YUV420P:
- *chroma_format = ::VideoChromaFormat_YUV420;
- break;
-
- case PIX_FMT_YUV422P:
- *chroma_format = ::VideoChromaFormat_YUV422;
- break;
-
- case PIX_FMT_YUV444P:
- *chroma_format = ::VideoChromaFormat_YUV444;
- break;
-
- default:
- return false;
- }
-
- *width = enc->coded_width;
- *height = enc->coded_height;
-
- break;
- }
- }
-
- if (video_stream_id_ < 0)
- return false;
-
- av_init_packet(&pkt_);
-
- return true;
+ return capture->retrieveFrame(0, data, step, width, height, cn);
}
-void InputMediaStream_FFMPEG::close()
+CvVideoWriter_FFMPEG* cvCreateVideoWriter_FFMPEG( const char* filename, int fourcc, double fps,
+ int width, int height, int isColor )
{
- if (ctx_)
- {
- #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 24, 2)
- avformat_close_input(&ctx_);
- #else
- av_close_input_file(ctx_);
- #endif
- }
-
- // free last packet if exist
- if (pkt_.data)
- av_free_packet(&pkt_);
+ CvVideoWriter_FFMPEG* writer = (CvVideoWriter_FFMPEG*)malloc(sizeof(*writer));
+ writer->init();
+ if( writer->open( filename, fourcc, fps, width, height, isColor != 0 ))
+ return writer;
+ writer->close();
+ free(writer);
+ return 0;
}
-bool InputMediaStream_FFMPEG::read(unsigned char** data, int* size, int* endOfFile)
-{
- // free last packet if exist
- if (pkt_.data)
- av_free_packet(&pkt_);
- // get the next frame
- for (;;)
+void cvReleaseVideoWriter_FFMPEG( CvVideoWriter_FFMPEG** writer )
+{
+ if( writer && *writer )
{
- int ret = av_read_frame(ctx_, &pkt_);
-
- if (ret == AVERROR(EAGAIN))
- continue;
-
- if (ret < 0)
- {
- if (ret == AVERROR_EOF)
- *endOfFile = true;
- return false;
- }
-
- if (pkt_.stream_index != video_stream_id_)
- {
- av_free_packet(&pkt_);
- continue;
- }
-
- break;
+ (*writer)->close();
+ free(*writer);
+ *writer = 0;
}
-
- *data = pkt_.data;
- *size = pkt_.size;
- *endOfFile = false;
-
- return true;
}
-InputMediaStream_FFMPEG* create_InputMediaStream_FFMPEG(const char* fileName, int* codec, int* chroma_format, int* width, int* height)
-{
- InputMediaStream_FFMPEG* stream = (InputMediaStream_FFMPEG*) malloc(sizeof(InputMediaStream_FFMPEG));
-
- if (stream && stream->open(fileName, codec, chroma_format, width, height))
- return stream;
- stream->close();
- free(stream);
-
- return 0;
-}
-
-void release_InputMediaStream_FFMPEG(InputMediaStream_FFMPEG* stream)
+int cvWriteFrame_FFMPEG( CvVideoWriter_FFMPEG* writer,
+ const unsigned char* data, int step,
+ int width, int height, int cn, int origin)
{
- stream->close();
- free(stream);
+ return writer->writeFrame(data, step, width, height, cn, origin);
}
-int read_InputMediaStream_FFMPEG(InputMediaStream_FFMPEG* stream, unsigned char** data, int* size, int* endOfFile)
-{
- return stream->read(data, size, endOfFile);
-}
class CvCapture_OpenNI : public CvCapture
{
public:
+ enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_MAX=1 };
+
static const int INVALID_PIXEL_VAL = 0;
static const int INVALID_COORDINATE_VAL = 0;
-
#ifdef HAVE_TBB
static const int DEFAULT_MAX_BUFFER_SIZE = 8;
#else
return mode;
}
+
CvCapture_OpenNI::CvCapture_OpenNI( int index )
{
+ int deviceType = DEVICE_DEFAULT;
XnStatus status;
-
+
isContextOpened = false;
maxBufferSize = DEFAULT_MAX_BUFFER_SIZE;
isCircleBuffer = DEFAULT_IS_CIRCLE_BUFFER;
maxTimeDuration = DEFAULT_MAX_TIME_DURATION;
+
+ if( index >= 10 )
+ {
+ deviceType = index / 10;
+ index %= 10;
+ }
+
+ if( deviceType > DEVICE_MAX )
+ return;
// Initialize and configure the context.
status = context.Init();
CV_DbgAssert( imageGenerator.SetMapOutputMode(defaultMapOutputMode()) == XN_STATUS_OK );
}
+ if( deviceType == DEVICE_ASUS_XTION )
+ {
+ //ps/asus specific
+ imageGenerator.SetIntProperty("InputFormat", 1 /*XN_IO_IMAGE_FORMAT_YUV422*/);
+ imageGenerator.SetPixelFormat(XN_PIXEL_FORMAT_RGB24);
+ depthGenerator.SetIntProperty("RegistrationType", 1 /*XN_PROCESSING_HARDWARE*/);
+ }
+
// Start generating data.
status = context.StartGeneratingAll();
if( status != XN_STATUS_OK )
// Stop the acquisition & free the camera
PvCommandRun(Camera.Handle, "AcquisitionStop");
PvCaptureEnd(Camera.Handle);
- PvCameraClose(Camera.Handle);
+ PvCameraClose(Camera.Handle);
+ PvUnInitialize();
}
// Initialize camera input
png_set_palette_to_rgb( png_ptr );
if( m_color_type == PNG_COLOR_TYPE_GRAY && m_bit_depth < 8 )
-#if PNG_LIBPNG_VER_MAJOR*100 + PNG_LIBPNG_VER_MINOR >= 104
+#if (PNG_LIBPNG_VER_MAJOR*10000 + PNG_LIBPNG_VER_MINOR*100 + PNG_LIBPNG_VER_RELEASE >= 10209) || \
+ (PNG_LIBPNG_VER_MAJOR == 1 && PNG_LIBPNG_VER_MINOR == 0 && PNG_LIBPNG_VER_RELEASE >= 18)
png_set_expand_gray_1_2_4_to_8( png_ptr );
#else
png_set_gray_1_2_4_to_8( png_ptr );
\r
#ifdef _WIN32\r
#include <windows.h>\r
+#else\r
+#include <unistd.h>\r
#endif\r
\r
#ifdef HAVE_QT_OPENGL\r
return 0;
}
-TEST(Highgui_Drawing_CPP, regression) { CV_DrawingTest_CPP test; test.safe_run(); }
-TEST(Highgui_Drawing_C, regression) { CV_DrawingTest_C test; test.safe_run(); }
+#ifdef HAVE_JPEG
+TEST(Highgui_Drawing, cpp_regression) { CV_DrawingTest_CPP test; test.safe_run(); }
+TEST(Highgui_Drawing, c_regression) { CV_DrawingTest_C test; test.safe_run(); }
+#endif
class CV_FillConvexPolyTest : public cvtest::BaseTest
{
}
};
-TEST(Highgui_Drawing_FillConvexPoly, clipping) { CV_FillConvexPolyTest test; test.safe_run(); }
+TEST(Highgui_Drawing, fillconvexpoly_clipping) { CV_FillConvexPolyTest test; test.safe_run(); }
-/*M///////////////////////////////////////////////////////////////////////////////////////\r
-//\r
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
-//\r
-// By downloading, copying, installing or using the software you agree to this license.\r
-// If you do not agree to this license, do not download, install,\r
-// copy or use the software.\r
-//\r
-//\r
-// License Agreement\r
-// For Open Source Computer Vision Library\r
-//\r
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
-// Third party copyrights are property of their respective owners.\r
-//\r
-// Redistribution and use in source and binary forms, with or without modification,\r
-// are permitted provided that the following conditions are met:\r
-//\r
-// * Redistribution's of source code must retain the above copyright notice,\r
-// this list of conditions and the following disclaimer.\r
-//\r
-// * Redistribution's in binary form must reproduce the above copyright notice,\r
-// this list of conditions and the following disclaimer in the documentation\r
-// and/or other materials provided with the distribution.\r
-//\r
-// * The name of the copyright holders may not be used to endorse or promote products\r
-// derived from this software without specific prior written permission.\r
-//\r
-// This software is provided by the copyright holders and contributors "as is" and\r
-// any express or implied warranties, including, but not limited to, the implied\r
-// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
-// In no event shall the Intel Corporation or contributors be liable for any direct,\r
-// indirect, incidental, special, exemplary, or consequential damages\r
-// (including, but not limited to, procurement of substitute goods or services;\r
-// loss of use, data, or profits; or business interruption) however caused\r
-// and on any theory of liability, whether in contract, strict liability,\r
-// or tort (including negligence or otherwise) arising in any way out of\r
-// the use of this software, even if advised of the possibility of such damage.\r
-//\r
-//M*/\r
-\r
-#include "test_precomp.hpp"\r
-#include "opencv2/highgui/highgui.hpp"\r
-\r
-#ifdef HAVE_FFMPEG\r
-\r
-#include "ffmpeg_codecs.hpp"\r
-\r
-using namespace cv;\r
-using namespace std;\r
-\r
-class CV_FFmpegWriteBigVideoTest : public cvtest::BaseTest\r
-{\r
-public:\r
- void run(int)\r
- {\r
- const int img_r = 4096;\r
- const int img_c = 4096;\r
- const double fps0 = 15;\r
- const double time_sec = 1;\r
- \r
- const size_t n = sizeof(codec_bmp_tags)/sizeof(codec_bmp_tags[0]);\r
-\r
- bool created = false;\r
-\r
- for (size_t j = 0; j < n; ++j)\r
- {\r
- stringstream s; s << codec_bmp_tags[j].tag;\r
- int tag = codec_bmp_tags[j].tag;\r
- \r
- if( tag != MKTAG('H', '2', '6', '3') &&\r
- tag != MKTAG('H', '2', '6', '1') &&\r
- tag != MKTAG('D', 'I', 'V', 'X') &&\r
- tag != MKTAG('D', 'X', '5', '0') &&\r
- tag != MKTAG('X', 'V', 'I', 'D') &&\r
- tag != MKTAG('m', 'p', '4', 'v') &&\r
- tag != MKTAG('D', 'I', 'V', '3') &&\r
- tag != MKTAG('W', 'M', 'V', '1') &&\r
- tag != MKTAG('W', 'M', 'V', '2') &&\r
- tag != MKTAG('M', 'P', 'E', 'G') &&\r
- tag != MKTAG('M', 'J', 'P', 'G') &&\r
- tag != MKTAG('j', 'p', 'e', 'g') &&\r
- tag != 0 &&\r
- tag != MKTAG('I', '4', '2', '0') &&\r
- tag != MKTAG('Y', 'U', 'Y', '2') &&\r
- tag != MKTAG('F', 'L', 'V', '1') )\r
- continue;\r
-\r
- const string filename = "output_"+s.str()+".avi";\r
-\r
- try\r
- {\r
- double fps = fps0;\r
- Size frame_s = Size(img_c, img_r);\r
- \r
- if( tag == CV_FOURCC('H', '2', '6', '1') )\r
- frame_s = Size(352, 288);\r
- else if( tag == CV_FOURCC('H', '2', '6', '3') )\r
- frame_s = Size(704, 576);\r
- /*else if( tag == CV_FOURCC('M', 'J', 'P', 'G') ||\r
- tag == CV_FOURCC('j', 'p', 'e', 'g') )\r
- frame_s = Size(1920, 1080);*/\r
- \r
- if( tag == CV_FOURCC('M', 'P', 'E', 'G') )\r
- fps = 25;\r
- \r
- VideoWriter writer(filename, tag, fps, frame_s);\r
-\r
- if (writer.isOpened() == false)\r
- {\r
- ts->printf(ts->LOG, "\n\nFile name: %s\n", filename.c_str());\r
- ts->printf(ts->LOG, "Codec id: %d Codec tag: %c%c%c%c\n", j,\r
- tag & 255, (tag >> 8) & 255, (tag >> 16) & 255, (tag >> 24) & 255);\r
- ts->printf(ts->LOG, "Error: cannot create video file.");\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- }\r
- else\r
- {\r
- Mat img(frame_s, CV_8UC3, Scalar::all(0));\r
- const int coeff = cvRound(cv::min(frame_s.width, frame_s.height)/(fps0 * time_sec));\r
-\r
- for (int i = 0 ; i < static_cast<int>(fps * time_sec); i++ )\r
- {\r
- //circle(img, Point2i(img_c / 2, img_r / 2), cv::min(img_r, img_c) / 2 * (i + 1), Scalar(255, 0, 0, 0), 2);\r
- rectangle(img, Point2i(coeff * i, coeff * i), Point2i(coeff * (i + 1), coeff * (i + 1)),\r
- Scalar::all(255 * (1.0 - static_cast<double>(i) / (fps * time_sec * 2) )), -1);\r
- writer << img;\r
- }\r
-\r
- if (!created) created = true;\r
- else remove(filename.c_str());\r
- }\r
- }\r
- catch(...)\r
- {\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- }\r
- ts->set_failed_test_info(cvtest::TS::OK);\r
-\r
- }\r
- }\r
-};\r
-\r
-TEST(Highgui_FFmpeg_WriteBigVideo, regression) { CV_FFmpegWriteBigVideoTest test; test.safe_run(); }\r
-\r
-#endif\r
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "test_precomp.hpp"
+#include "opencv2/highgui/highgui.hpp"
+
+#ifdef HAVE_FFMPEG
+
+#include "ffmpeg_codecs.hpp"
+
+using namespace cv;
+using namespace std;
+
+class CV_FFmpegWriteBigVideoTest : public cvtest::BaseTest
+{
+public:
+ void run(int)
+ {
+ const int img_r = 4096;
+ const int img_c = 4096;
+ const double fps0 = 15;
+ const double time_sec = 1;
+
+ const size_t n = sizeof(codec_bmp_tags)/sizeof(codec_bmp_tags[0]);
+
+ bool created = false;
+
+ for (size_t j = 0; j < n; ++j)
+ {
+ stringstream s; s << codec_bmp_tags[j].tag;
+ int tag = codec_bmp_tags[j].tag;
+
+ if( tag != MKTAG('H', '2', '6', '3') &&
+ tag != MKTAG('H', '2', '6', '1') &&
+ //tag != MKTAG('D', 'I', 'V', 'X') &&
+ tag != MKTAG('D', 'X', '5', '0') &&
+ tag != MKTAG('X', 'V', 'I', 'D') &&
+ tag != MKTAG('m', 'p', '4', 'v') &&
+ //tag != MKTAG('D', 'I', 'V', '3') &&
+ //tag != MKTAG('W', 'M', 'V', '1') &&
+ //tag != MKTAG('W', 'M', 'V', '2') &&
+ tag != MKTAG('M', 'P', 'E', 'G') &&
+ tag != MKTAG('M', 'J', 'P', 'G') &&
+ //tag != MKTAG('j', 'p', 'e', 'g') &&
+ tag != 0 &&
+ tag != MKTAG('I', '4', '2', '0') &&
+ //tag != MKTAG('Y', 'U', 'Y', '2') &&
+ tag != MKTAG('F', 'L', 'V', '1') )
+ continue;
+
+ const string filename = "output_"+s.str()+".avi";
+
+ try
+ {
+ double fps = fps0;
+ Size frame_s = Size(img_c, img_r);
+
+ if( tag == CV_FOURCC('H', '2', '6', '1') )
+ frame_s = Size(352, 288);
+ else if( tag == CV_FOURCC('H', '2', '6', '3') )
+ frame_s = Size(704, 576);
+ /*else if( tag == CV_FOURCC('M', 'J', 'P', 'G') ||
+ tag == CV_FOURCC('j', 'p', 'e', 'g') )
+ frame_s = Size(1920, 1080);*/
+
+ if( tag == CV_FOURCC('M', 'P', 'E', 'G') )
+ fps = 25;
+
+ VideoWriter writer(filename, tag, fps, frame_s);
+
+ if (writer.isOpened() == false)
+ {
+ ts->printf(ts->LOG, "\n\nFile name: %s\n", filename.c_str());
+ ts->printf(ts->LOG, "Codec id: %d Codec tag: %c%c%c%c\n", j,
+ tag & 255, (tag >> 8) & 255, (tag >> 16) & 255, (tag >> 24) & 255);
+ ts->printf(ts->LOG, "Error: cannot create video file.");
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ }
+ else
+ {
+ Mat img(frame_s, CV_8UC3, Scalar::all(0));
+ const int coeff = cvRound(cv::min(frame_s.width, frame_s.height)/(fps0 * time_sec));
+
+ for (int i = 0 ; i < static_cast<int>(fps * time_sec); i++ )
+ {
+ //circle(img, Point2i(img_c / 2, img_r / 2), cv::min(img_r, img_c) / 2 * (i + 1), Scalar(255, 0, 0, 0), 2);
+ rectangle(img, Point2i(coeff * i, coeff * i), Point2i(coeff * (i + 1), coeff * (i + 1)),
+ Scalar::all(255 * (1.0 - static_cast<double>(i) / (fps * time_sec * 2) )), -1);
+ writer << img;
+ }
+
+ if (!created) created = true;
+ else remove(filename.c_str());
+ }
+ }
+ catch(...)
+ {
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ }
+ ts->set_failed_test_info(cvtest::TS::OK);
+
+ }
+ }
+};
+
+TEST(Highgui_Video, ffmpeg_writebig) { CV_FFmpegWriteBigVideoTest test; test.safe_run(); }
+
+class CV_FFmpegReadImageTest : public cvtest::BaseTest
+{
+public:
+ void run(int)
+ {
+ try
+ {
+ string filename = ts->get_data_path() + "../cv/features2d/tsukuba.png";
+ VideoCapture cap(filename);
+ Mat img0 = imread(filename, 1);
+ Mat img, img_next;
+ cap >> img;
+ cap >> img_next;
+
+ CV_Assert( !img0.empty() && !img.empty() && img_next.empty() );
+
+ double diff = norm(img0, img, CV_C);
+ CV_Assert( diff == 0 );
+ }
+ catch(...)
+ {
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ }
+ ts->set_failed_test_info(cvtest::TS::OK);
+ }
+};
+
+TEST(Highgui_Video, ffmpeg_image) { CV_FFmpegReadImageTest test; test.safe_run(); }
+
+#endif
void CV_FramecountTest::run(int)
{
-#if defined WIN32 || (defined __linux__ && !defined ANDROID) || (defined __APPLE__ && defined HAVE_FFMPEG)
-#if !defined HAVE_GSTREAMER || defined HAVE_GSTREAMER_APP
-
const int time_sec = 5, fps = 25;
- const string ext[] = {"avi", "mov", "mp4", "mpg", "wmv"};
+ const string ext[] = {"avi", "mov", "mp4"};
const size_t n = sizeof(ext)/sizeof(ext[0]);
ts->printf(cvtest::TS::LOG, "\n\nSource files directory: %s\n", (src_dir+"video/").c_str());
- int failed = 0;
+ Ptr<CvCapture> cap;
for (size_t i = 0; i < n; ++i)
{
- int code = cvtest::TS::OK;
-
string file_path = src_dir+"video/big_buck_bunny."+ext[i];
- printf("\nReading video file in %s...\n", file_path.c_str());
-
- CvCapture *cap = cvCreateFileCapture(file_path.c_str());
- if (!cap)
+ cap = cvCreateFileCapture(file_path.c_str());
+ if (cap.empty())
{
ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\nFAILED\n\n", i+1, ext[i].c_str());
ts->printf(cvtest::TS::LOG, "Error: cannot read source video file.\n");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
- failed++; continue;
+ return;
}
- cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, 0);
- IplImage* frame; int FrameCount = -1;
+ //cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, 0);
+ IplImage* frame; int FrameCount = 0;
- do
+ for(;;)
{
- FrameCount++;
frame = cvQueryFrame(cap);
+ if( !frame )
+ break;
+ FrameCount++;
}
- while (frame);
int framecount = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_COUNT);
"Frame count returned by cvGetCaptureProperty function: %d\n",
i+1, ext[i].c_str(), time_sec*fps, FrameCount, framecount);
- code = FrameCount != time_sec*fps ? cvtest::TS::FAIL_INVALID_OUTPUT : FrameCount != framecount ? cvtest::TS::FAIL_INVALID_OUTPUT : code;
-
- if (code)
+ if( (FrameCount != cvRound(time_sec*fps) ||
+ FrameCount != framecount) && ext[i] != "mpg" )
{
ts->printf(cvtest::TS::LOG, "FAILED\n");
ts->printf(cvtest::TS::LOG, "\nError: actual frame count and returned frame count are not matched.\n");
- ts->set_failed_test_info(code);
- failed++;
- }
- else
- {
- ts->printf(cvtest::TS::LOG, "OK\n");
- ts->set_failed_test_info(ts->OK);
+ ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
+ return;
}
-
- cvReleaseImage(&frame);
- cvReleaseCapture(&cap);
}
-
- ts->printf(cvtest::TS::LOG, "\nSuccessfull experiments: %d (%d%%)\n", n-failed, (n - failed)*100/n);
- ts->printf(cvtest::TS::LOG, "Failed experiments: %d (%d%%)\n", failed, failed*100/n);
-
-#endif
-#endif
}
-
-TEST(HighguiFramecount, regression) {CV_FramecountTest test; test.safe_run();}
+#if BUILD_WITH_VIDEO_INPUT_SUPPORT
+TEST(Highgui_Video, framecount) {CV_FramecountTest test; test.safe_run();}
+#endif
}
};
-TEST(Highgui_Grfmt_WriteBigImage, regression) { CV_GrfmtWriteBigImageTest test; test.safe_run(); }
-TEST(Highgui_Grfmt_WriteSequenceImage, regression) { CV_GrfmtWriteSequenceImageTest test; test.safe_run(); }
-TEST(GrfmtReadBMPRLE8, regression) { CV_GrfmtReadBMPRLE8Test test; test.safe_run(); }
+#ifdef HAVE_PNG
+TEST(Highgui_Image, write_big) { CV_GrfmtWriteBigImageTest test; test.safe_run(); }
+#endif
+
+#if defined(HAVE_PNG) && defined(HAVE_TIFF) && defined(HAVE_JPEG)
+TEST(Highgui_Image, write_imageseq) { CV_GrfmtWriteSequenceImageTest test; test.safe_run(); }
+#endif
+
+TEST(Highgui_Image, read_bmp_rle8) { CV_GrfmtReadBMPRLE8Test test; test.safe_run(); }
using namespace cv;
using namespace std;
-enum NAVIGATION_METHOD {PROGRESSIVE, RANDOM};
-
class CV_VideoPositioningTest: public cvtest::BaseTest
{
public:
+ enum {PROGRESSIVE, RANDOM};
+
CV_VideoPositioningTest();
~CV_VideoPositioningTest();
virtual void run(int) = 0;
ts->printf(cvtest::TS::LOG, "\n\nSource files directory: %s\n", (src_dir+"video/").c_str());
- const string ext[] = {"avi", "mp4", "wmv"};
+ const string ext[] = {"avi", "mov", "mp4", "mpg"};
- size_t n = sizeof(ext)/sizeof(ext[0]);
+ int n = (int)(sizeof(ext)/sizeof(ext[0]));
int failed_videos = 0;
- for (size_t i = 0; i < n; ++i)
+ for (int i = 0; i < n; ++i)
{
+ // skip random positioning test in plain mpegs
+ if( method == RANDOM && ext[i] == "mpg" )
+ continue;
string file_path = src_dir + "video/big_buck_bunny." + ext[i];
- printf("\nReading video file in %s...\n", file_path.c_str());
+ ts->printf(cvtest::TS::LOG, "\nReading video file in %s...\n", file_path.c_str());
CvCapture* cap = cvCreateFileCapture(file_path.c_str());
ts->printf(cvtest::TS::LOG, "Required pos: %d\nReturned pos: %d\n", idx.at(j), val);
ts->printf(cvtest::TS::LOG, "Error: required and returned positions are not matched.\n");
ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
- if (!flag) flag = !flag;
+ flag = true;
}
- if (flag) failed_iterations++;
+ if (flag)
+ {
+ failed_iterations++;
+ failed_videos++;
+ break;
+ }
}
- ts->printf(cvtest::TS::LOG, "\nSuccessfull iterations: %d (%d%%)\n", idx.size()-failed_iterations, 100*(idx.size()-failed_iterations)/idx.size());
- ts->printf(cvtest::TS::LOG, "Failed iterations: %d (%d%%)\n", failed_iterations, 100*failed_iterations/idx.size());
-
- if (failed_frames||failed_positions)
- {
- ts->printf(cvtest::TS::LOG, "\nFAILED\n----------\n"); failed_videos++;
- }
-
cvReleaseCapture(&cap);
}
void CV_VideoProgressivePositioningTest::run(int)
{
-#if defined WIN32 || (defined __linux__ && !defined ANDROID) || (defined __APPLE__ && defined HAVE_FFMPEG)
-#if !defined HAVE_GSTREAMER || defined HAVE_GSTREAMER_APP
-
run_test(PROGRESSIVE);
-
-#endif
-#endif
}
void CV_VideoRandomPositioningTest::run(int)
{
-#if defined WIN32 || (defined __linux__ && !defined ANDROID) || (defined __APPLE__ && defined HAVE_FFMPEG)
-#if !defined HAVE_GSTREAMER || defined HAVE_GSTREAMER_APP
-
run_test(RANDOM);
-
-#endif
-#endif
}
-TEST (HighguiPositioning, progressive) { CV_VideoProgressivePositioningTest test; test.safe_run(); }
-TEST (HighguiPositioning, random) { CV_VideoRandomPositioningTest test; test.safe_run(); }
+#if BUILD_WITH_VIDEO_INPUT_SUPPORT
+TEST (Highgui_Video, seek_progressive) { CV_VideoProgressivePositioningTest test; test.safe_run(); }
+TEST (Highgui_Video, seek_random) { CV_VideoRandomPositioningTest test; test.safe_run(); }
+#endif
\ No newline at end of file
#include "opencv2/imgproc/imgproc_c.h"
#include <iostream>
+#if defined(HAVE_VIDEOINPUT) || \
+ defined(HAVE_TYZX) || \
+ defined(HAVE_VFW) || \
+ defined(HAVE_LIBV4L) || \
+ (defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2)) || \
+ defined(HAVE_GSTREAMER) || \
+ defined(HAVE_DC1394_2) || \
+ defined(HAVE_DC1394) || \
+ defined(HAVE_CMU1394) || \
+ defined(HAVE_MIL) || \
+ defined(HAVE_QUICKTIME) || \
+ defined(HAVE_UNICAP) || \
+ defined(HAVE_PVAPI) || \
+ defined(HAVE_OPENNI) || \
+ defined(HAVE_XIMEA) || \
+ defined(HAVE_AVFOUNDATION) || \
+ (0)
+ //defined(HAVE_ANDROID_NATIVE_CAMERA) || - enable after #1193
+# define BUILD_WITH_CAMERA_SUPPORT 1
+#else
+# define BUILD_WITH_CAMERA_SUPPORT 0
+#endif
+
+#if defined(HAVE_XINE) || \
+ defined(HAVE_GSTREAMER) || \
+ defined(HAVE_QUICKTIME) || \
+ defined(HAVE_AVFOUNDATION) || \
+ /*defined(HAVE_OPENNI) || too specialized */ \
+ defined(HAVE_FFMPEG) || \
+ defined(WIN32) /* assume that we have ffmpeg */
+
+# define BUILD_WITH_VIDEO_INPUT_SUPPORT 1
+#else
+# define BUILD_WITH_VIDEO_INPUT_SUPPORT 0
+#endif
+
+#if /*defined(HAVE_XINE) || */\
+ defined(HAVE_GSTREAMER) || \
+ defined(HAVE_QUICKTIME) || \
+ defined(HAVE_AVFOUNDATION) || \
+ defined(HAVE_FFMPEG) || \
+ defined(WIN32) /* assume that we have ffmpeg */
+# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 1
+#else
+# define BUILD_WITH_VIDEO_OUTPUT_SUPPORT 0
+#endif
+
+namespace cvtest
+{
+
+string fourccToString(int fourcc);
+
+struct VideoFormat
+{
+ VideoFormat() { fourcc = -1; }
+ VideoFormat(const string& _ext, int _fourcc) : ext(_ext), fourcc(_fourcc) {}
+ bool empty() const { return ext.empty(); }
+
+ string ext;
+ int fourcc;
+};
+
+extern const VideoFormat g_specific_fmt_list[];
+
+}
+
#endif
using namespace cv;
using namespace std;
+namespace cvtest
+{
+
+string fourccToString(int fourcc)
+{
+ return format("%c%c%c%c", fourcc & 255, (fourcc >> 8) & 255, (fourcc >> 16) & 255, (fourcc >> 24) & 255);
+}
+
+const VideoFormat g_specific_fmt_list[] =
+{
+ VideoFormat("avi", CV_FOURCC('X', 'V', 'I', 'D')),
+ VideoFormat("avi", CV_FOURCC('M', 'P', 'E', 'G')),
+ VideoFormat("avi", CV_FOURCC('M', 'J', 'P', 'G')),
+ //VideoFormat("avi", CV_FOURCC('I', 'Y', 'U', 'V')),
+ VideoFormat("mkv", CV_FOURCC('X', 'V', 'I', 'D')),
+ VideoFormat("mkv", CV_FOURCC('M', 'P', 'E', 'G')),
+ VideoFormat("mkv", CV_FOURCC('M', 'J', 'P', 'G')),
+
+ VideoFormat("mov", CV_FOURCC('m', 'p', '4', 'v')),
+ VideoFormat()
+};
+
+}
+
class CV_HighGuiTest : public cvtest::BaseTest
{
protected:
void ImageTest(const string& dir);
- void VideoTest (const string& dir, int fourcc);
+ void VideoTest (const string& dir, const cvtest::VideoFormat& fmt);
void SpecificImageTest (const string& dir);
- void SpecificVideoFileTest (const string& dir, const char codecchars[4]);
- void SpecificVideoCameraTest (const string& dir, const char codecchars[4]);
+ void SpecificVideoTest (const string& dir, const cvtest::VideoFormat& fmt);
-public:
- CV_HighGuiTest();
- ~CV_HighGuiTest();
+ CV_HighGuiTest() {}
+ ~CV_HighGuiTest() {}
virtual void run(int) = 0;
};
class CV_ImageTest : public CV_HighGuiTest
{
public:
- CV_ImageTest();
- ~CV_ImageTest();
+ CV_ImageTest() {}
+ ~CV_ImageTest() {}
void run(int);
};
class CV_SpecificImageTest : public CV_HighGuiTest
{
public:
- CV_SpecificImageTest();
- ~CV_SpecificImageTest();
+ CV_SpecificImageTest() {}
+ ~CV_SpecificImageTest() {}
void run(int);
};
class CV_VideoTest : public CV_HighGuiTest
{
public:
- CV_VideoTest();
- ~CV_VideoTest();
+ CV_VideoTest() {}
+ ~CV_VideoTest() {}
void run(int);
};
-class CV_SpecificVideoFileTest : public CV_HighGuiTest
+class CV_SpecificVideoTest : public CV_HighGuiTest
{
public:
- CV_SpecificVideoFileTest();
- ~CV_SpecificVideoFileTest();
+ CV_SpecificVideoTest() {}
+ ~CV_SpecificVideoTest() {}
void run(int);
};
-class CV_SpecificVideoCameraTest : public CV_HighGuiTest
-{
-public:
- CV_SpecificVideoCameraTest();
- ~CV_SpecificVideoCameraTest();
- void run(int);
-};
-
-CV_HighGuiTest::CV_HighGuiTest() {}
-CV_HighGuiTest::~CV_HighGuiTest() {}
-
-CV_ImageTest::CV_ImageTest() : CV_HighGuiTest() {}
-CV_VideoTest::CV_VideoTest() : CV_HighGuiTest() {}
-CV_SpecificImageTest::CV_SpecificImageTest() : CV_HighGuiTest() {}
-CV_SpecificVideoFileTest::CV_SpecificVideoFileTest() : CV_HighGuiTest() {}
-CV_SpecificVideoCameraTest::CV_SpecificVideoCameraTest() : CV_HighGuiTest() {}
-
-CV_ImageTest::~CV_ImageTest() {}
-CV_VideoTest::~CV_VideoTest() {}
-CV_SpecificImageTest::~CV_SpecificImageTest() {}
-CV_SpecificVideoFileTest::~CV_SpecificVideoFileTest() {}
-CV_SpecificVideoCameraTest::~CV_SpecificVideoCameraTest() {}
-
-double PSNR(const Mat& m1, const Mat& m2)
-{
- Mat tmp;
- absdiff( m1.reshape(1), m2.reshape(1), tmp);
- multiply(tmp, tmp, tmp);
-
- double MSE = 1.0/(tmp.cols * tmp.rows) * sum(tmp)[0];
-
- return 20 * log10(255.0 / sqrt(MSE));
-}
void CV_HighGuiTest::ImageTest(const string& dir)
{
return;
}
- const string exts[] = {"png", "bmp", "tiff", "jpg", "jp2", "ppm", "ras" };
+ const string exts[] = {
+#ifdef HAVE_PNG
+ "png",
+#endif
+#ifdef HAVE_TIFF
+ "tiff",
+#endif
+#ifdef HAVE_JPEG
+ "jpg",
+#endif
+#ifdef HAVE_JASPER
+ "jp2",
+#endif
+#ifdef HAVE_OPENEXR
+ "exr",
+#endif
+ "bmp",
+ "ppm",
+ "ras"
+ };
const size_t ext_num = sizeof(exts)/sizeof(exts[0]);
for(size_t i = 0; i < ext_num; ++i)
ts->set_failed_test_info(ts->OK);
}
-void CV_HighGuiTest::VideoTest(const string& dir, int fourcc)
+
+void CV_HighGuiTest::VideoTest(const string& dir, const cvtest::VideoFormat& fmt)
{
string src_file = dir + "../cv/shared/video_for_test.avi";
- string tmp_name = "video.avi";
+ string tmp_name = format("video_%s.%s", cvtest::fourccToString(fmt.fourcc).c_str(), fmt.ext.c_str());
- ts->printf(ts->LOG, "reading video : %s\n", src_file.c_str());
+ ts->printf(ts->LOG, "reading video : %s and converting it to %s\n", src_file.c_str(), tmp_name.c_str());
CvCapture* cap = cvCaptureFromFile(src_file.c_str());
}
CvVideoWriter* writer = 0;
+ vector<Mat> frames;
for(;;)
{
if (!img)
break;
+
+ frames.push_back(Mat(img).clone());
if (writer == 0)
{
- writer = cvCreateVideoWriter(tmp_name.c_str(), fourcc, 24, cvGetSize(img));
+ writer = cvCreateVideoWriter(tmp_name.c_str(), fmt.fourcc, 24, cvGetSize(img));
if (writer == 0)
{
- ts->printf(ts->LOG, "can't create writer (with fourcc : %d)\n", fourcc);
+ ts->printf(ts->LOG, "can't create writer (with fourcc : %d)\n",
+ cvtest::fourccToString(fmt.fourcc).c_str());
cvReleaseCapture( &cap );
ts->set_failed_test_info(ts->FAIL_MISMATCH);
return;
cvReleaseVideoWriter( &writer );
cvReleaseCapture( &cap );
- cap = cvCaptureFromFile(src_file.c_str());
-
CvCapture *saved = cvCaptureFromFile(tmp_name.c_str());
if (!saved)
{
const double thresDbell = 20;
- for(;;)
+ for(int i = 0;; i++)
{
- IplImage* ipl = cvQueryFrame( cap );
IplImage* ipl1 = cvQueryFrame( saved );
- if (!ipl || !ipl1)
+ if (!ipl1)
break;
- Mat img(ipl);
+ Mat img = frames[i];
Mat img1(ipl1);
- if (PSNR(img1, img) < thresDbell)
+ double psnr = PSNR(img1, img);
+ if (psnr < thresDbell)
{
+ printf("Too low psnr = %gdb\n", psnr);
+ imwrite("img.png", img);
+ imwrite("img1.png", img1);
ts->set_failed_test_info(ts->FAIL_MISMATCH);
break;
}
}
- cvReleaseCapture( &cap );
cvReleaseCapture( &saved );
ts->printf(ts->LOG, "end test function : ImagesVideo \n");
return;
}
- cv::resize(image, image, cv::Size(968, 757), 0.0, 0.0, cv::INTER_CUBIC);
+ resize(image, image, Size(968, 757), 0.0, 0.0, INTER_CUBIC);
stringstream s_digit; s_digit << i;
ts->set_failed_test_info(ts->OK);
}
-void CV_HighGuiTest::SpecificVideoFileTest(const string& dir, const char codecchars[4])
+
+void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFormat& fmt)
{
- const string exts[] = {"avi", "mov", "mpg", "wmv"};
- const size_t n = sizeof(exts)/sizeof(exts[0]);
- int fourcc0 = CV_FOURCC(codecchars[0], codecchars[1], codecchars[2], codecchars[3]);
-
- for (size_t j = 0; j < n; ++j)
- {
- string ext = exts[j];
- int fourcc = fourcc0;
+ string ext = fmt.ext;
+ int fourcc = fmt.fourcc;
- if( (ext == "mov" && fourcc != CV_FOURCC('M', 'J', 'P', 'G')) ||
- (ext == "mpg" && fourcc != CV_FOURCC('m', 'p', 'e', 'g')) ||
- (ext == "wmv" && fourcc != CV_FOURCC('M', 'J', 'P', 'G')))
- continue;
- if( ext == "mov" )
- fourcc = CV_FOURCC('m', 'p', '4', 'v');
-
- string fourcc_str = format("%c%c%c%c", fourcc & 255, (fourcc >> 8) & 255, (fourcc >> 16) & 255, (fourcc >> 24) & 255);
- const string video_file = "video_" + fourcc_str + "." + ext;
-
- Size frame_size(968 & -2, 757 & -2);
- //Size frame_size(968 & -16, 757 & -16);
- //Size frame_size(640, 480);
- VideoWriter writer(video_file, fourcc, 25, frame_size, true);
-
- if (!writer.isOpened())
- {
- VideoWriter writer(video_file, fourcc, 25, frame_size, true);
- ts->printf(ts->LOG, "Creating a video in %s...\n", video_file.c_str());
- ts->printf(ts->LOG, "Cannot create VideoWriter object with codec %s.\n", fourcc_str.c_str());
- ts->set_failed_test_info(ts->FAIL_MISMATCH);
- continue;
- }
-
- const size_t IMAGE_COUNT = 30;
-
- for(size_t i = 0; i < IMAGE_COUNT; ++i)
- {
- stringstream s_digit;
- if (i < 10) {s_digit << "0"; s_digit << i;}
- else s_digit << i;
-
- const string file_path = dir+"../python/images/QCIF_"+s_digit.str()+".bmp";
-
- cv::Mat img = imread(file_path, CV_LOAD_IMAGE_COLOR);
-
- if (img.empty())
- {
- ts->printf(ts->LOG, "Creating a video in %s...\n", video_file.c_str());
- ts->printf(ts->LOG, "Error: cannot read frame from %s.\n", (ts->get_data_path()+"../python/images/QCIF_"+s_digit.str()+".bmp").c_str());
- ts->printf(ts->LOG, "Continue creating the video file...\n");
- ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);
- break;//continue;
- }
-
- cv::resize(img, img, frame_size, 0.0, 0.0, cv::INTER_CUBIC);
-
- for (int k = 0; k < img.rows; ++k)
- for (int l = 0; l < img.cols; ++l)
- if (img.at<Vec3b>(k, l) == Vec3b::all(0))
- img.at<Vec3b>(k, l) = Vec3b(0, 255, 0);
- else img.at<Vec3b>(k, l) = Vec3b(0, 0, 255);
+ string fourcc_str = cvtest::fourccToString(fourcc);
+ const string video_file = "video_" + fourcc_str + "." + ext;
- imwrite("QCIF_"+s_digit.str()+".bmp", img);
+ Size frame_size(968 & -2, 757 & -2);
+ VideoWriter writer(video_file, fourcc, 25, frame_size, true);
- writer << img;
- }
-
- writer.release();
- cv::VideoCapture cap(video_file);
+ if (!writer.isOpened())
+ {
+ // call it repeatedly for easier debugging
+ VideoWriter writer(video_file, fourcc, 25, frame_size, true);
+ ts->printf(ts->LOG, "Creating a video in %s...\n", video_file.c_str());
+ ts->printf(ts->LOG, "Cannot create VideoWriter object with codec %s.\n", fourcc_str.c_str());
+ ts->set_failed_test_info(ts->FAIL_MISMATCH);
+ return;
+ }
- size_t FRAME_COUNT = (size_t)cap.get(CV_CAP_PROP_FRAME_COUNT);
+ const size_t IMAGE_COUNT = 30;
+ vector<Mat> images;
+
+ for( size_t i = 0; i < IMAGE_COUNT; ++i )
+ {
+ string file_path = format("%s../python/images/QCIF_%02d.bmp", dir.c_str(), i);
+ Mat img = imread(file_path, CV_LOAD_IMAGE_COLOR);
- if (FRAME_COUNT != IMAGE_COUNT && ext != "mpg" )
+ if (img.empty())
{
- ts->printf(ts->LOG, "\nFrame count checking for video_%s.%s...\n", fourcc_str.c_str(), ext.c_str());
- ts->printf(ts->LOG, "Video codec: %s\n", fourcc_str.c_str());
- ts->printf(ts->LOG, "Required frame count: %d; Returned frame count: %d\n", IMAGE_COUNT, FRAME_COUNT);
- ts->printf(ts->LOG, "Error: Incorrect frame count in the video.\n");
- ts->printf(ts->LOG, "Continue checking...\n");
- ts->set_failed_test_info(ts->FAIL_BAD_ACCURACY);
+ ts->printf(ts->LOG, "Creating a video in %s...\n", video_file.c_str());
+ ts->printf(ts->LOG, "Error: cannot read frame from %s.\n", file_path.c_str());
+ ts->printf(ts->LOG, "Continue creating the video file...\n");
+ ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);
+ break;
}
- //cap.set(CV_CAP_PROP_POS_FRAMES, -1);
-
- for (int i = 0; i < (int)std::min<size_t>(FRAME_COUNT, IMAGE_COUNT)-1; i++)
- {
- cv::Mat frame; cap >> frame;
- if (frame.empty())
- {
- ts->printf(ts->LOG, "\nVideo file directory: %s\n", ".");
- ts->printf(ts->LOG, "File name: video_%s.%s\n", fourcc_str.c_str(), ext.c_str());
- ts->printf(ts->LOG, "Video codec: %s\n", fourcc_str.c_str());
- ts->printf(ts->LOG, "Error: cannot read the next frame with index %d.\n", i+1);
- ts->set_failed_test_info(ts->FAIL_MISSING_TEST_DATA);
- break;
- }
-
- stringstream s_digit;
- if (i+1 < 10) {s_digit << "0"; s_digit << i+1;}
- else s_digit << i+1;
-
- cv::Mat img = imread("QCIF_"+s_digit.str()+".bmp", CV_LOAD_IMAGE_COLOR);
-
- if (img.empty())
- {
- ts->printf(ts->LOG, "\nError: cannot read an image from %s.\n", ("QCIF_"+s_digit.str()+".bmp").c_str());
- ts->set_failed_test_info(ts->FAIL_MISMATCH);
- continue;
- }
-
- const double thresDbell = 40;
-
- double psnr = PSNR(img, frame);
+ for (int k = 0; k < img.rows; ++k)
+ for (int l = 0; l < img.cols; ++l)
+ if (img.at<Vec3b>(k, l) == Vec3b::all(0))
+ img.at<Vec3b>(k, l) = Vec3b(0, 255, 0);
+ else img.at<Vec3b>(k, l) = Vec3b(0, 0, 255);
+
+ resize(img, img, frame_size, 0.0, 0.0, INTER_CUBIC);
- if (psnr > thresDbell)
- {
- ts->printf(ts->LOG, "\nReading frame from the file video_%s.%s...\n", fourcc_str.c_str(), ext.c_str());
- ts->printf(ts->LOG, "Frame index: %d\n", i+1);
- ts->printf(ts->LOG, "Difference between saved and original images: %g\n", psnr);
- ts->printf(ts->LOG, "Maximum allowed difference: %g\n", thresDbell);
- ts->printf(ts->LOG, "Error: too big difference between saved and original images.\n");
- break;
- }
- }
+ images.push_back(img);
+ writer << img;
}
-}
-
-void CV_HighGuiTest::SpecificVideoCameraTest(const string& dir, const char codecchars[4])
-{
- const string ext[] = {"avi", "mov", "mp4", "mpg", "wmv"};
-
- const size_t n = sizeof(ext)/sizeof(ext[0]);
- const int IMAGE_COUNT = 125;
+ writer.release();
+ VideoCapture cap(video_file);
- cv::VideoCapture cap(0);
+ size_t FRAME_COUNT = (size_t)cap.get(CV_CAP_PROP_FRAME_COUNT);
- if (!cap.isOpened())
+ if (FRAME_COUNT != IMAGE_COUNT )
{
- ts->printf(ts->LOG, "\nError: cannot start working with device.\n");
- ts->set_failed_test_info(ts->OK);
+ ts->printf(ts->LOG, "\nFrame count checking for video_%s.%s...\n", fourcc_str.c_str(), ext.c_str());
+ ts->printf(ts->LOG, "Video codec: %s\n", fourcc_str.c_str());
+ ts->printf(ts->LOG, "Required frame count: %d; Returned frame count: %d\n", IMAGE_COUNT, FRAME_COUNT);
+ ts->printf(ts->LOG, "Error: Incorrect frame count in the video.\n");
+ ts->printf(ts->LOG, "Continue checking...\n");
+ ts->set_failed_test_info(ts->FAIL_BAD_ACCURACY);
return;
}
- for (size_t i = 0; i < n; ++i)
- if ((ext[i]!="mp4")||(string(&codecchars[0], 4)!="IYUV"))
- #if defined WIN32 || defined _WIN32
- if (((ext[i]!="mov")||(string(&codecchars[0], 4)=="XVID"))&&(ext[i]!="mp4"))
- #endif
+ for (int i = 0; (size_t)i < FRAME_COUNT; i++)
{
- Mat frame; int framecount = 0;
- cv::VideoWriter writer;
-
- std::vector <cv::Mat> tmp_img(IMAGE_COUNT);
-
- writer.open("video_"+string(&codecchars[0], 4)+"."+ext[i], CV_FOURCC(codecchars[0], codecchars[1], codecchars[2], codecchars[3]), 25, Size(968, 757), true);
-
- if (!writer.isOpened())
+ Mat frame; cap >> frame;
+ if (frame.empty())
{
ts->printf(ts->LOG, "\nVideo file directory: %s\n", ".");
- ts->printf(ts->LOG, "Video codec: %s\n", std::string(&codecchars[0], 4).c_str());
- ts->printf(ts->LOG, "Error: cannot create VideoWriter object for video_%s.%s.\n", string(&codecchars[0]).c_str(), ext[i].c_str());
- ts->set_failed_test_info(ts->FAIL_EXCEPTION);
- continue;
- }
-
- for (;;)
- {
- cap >> frame;
-
- if (frame.empty())
- {
- ts->printf(ts->LOG, "\nVideo file directory: %s\n", ".");
- ts->printf(ts->LOG, "File name: video_%s.%s\n", string(&codecchars[0], 4).c_str(), ext[i].c_str());
- ts->printf(ts->LOG, "Video codec: %s\n", string(&codecchars[0], 4).c_str());
- ts->printf(ts->LOG, "Error: cannot read next frame with index %d from the device.\n", framecount);
- break;
- }
-
- cv::resize(frame, frame, Size(968, 757), 0, 0, INTER_CUBIC);
- writer << frame; tmp_img[framecount] = frame;
-
- framecount++;
- if (framecount == IMAGE_COUNT) break;
+ ts->printf(ts->LOG, "File name: video_%s.%s\n", fourcc_str.c_str(), ext.c_str());
+ ts->printf(ts->LOG, "Video codec: %s\n", fourcc_str.c_str());
+ ts->printf(ts->LOG, "Error: cannot read the next frame with index %d.\n", i+1);
+ ts->set_failed_test_info(ts->FAIL_MISSING_TEST_DATA);
+ break;
}
- cv::VideoCapture vcap(dir+"video_"+string(&codecchars[0], 4)+"."+ext[i]);
+ Mat img = images[i];
- if (!vcap.isOpened())
- {
- ts->printf(ts->LOG, "\nVideo file directory: %s\n", ".");
- ts->printf(ts->LOG, "File name: video_%s.%s\n", string(&codecchars[0], 4).c_str(), ext[i].c_str());
- ts->printf(ts->LOG, "Video codec: %s\n", string(&codecchars[0], 4).c_str());
- ts->printf(ts->LOG, "Error: cannot open video file.\n");
- continue;
- }
+ const double thresDbell = 40;
+ double psnr = PSNR(img, frame);
- int FRAME_COUNT = (int)vcap.get(CV_CAP_PROP_FRAME_COUNT);
-
- if (FRAME_COUNT != IMAGE_COUNT)
+ if (psnr > thresDbell)
{
- ts->printf(ts->LOG, "\nChecking frame count...\n");
- ts->printf(ts->LOG, "Video file directory: %s\n", ".");
- ts->printf(ts->LOG, "File name: video_%s.%s\n", string(&codecchars[0], 4).c_str(), ext[i].c_str());
- ts->printf(ts->LOG, "Video codec: %s\n", string(&codecchars[0], 4).c_str());
- ts->printf(ts->LOG, "Required frame count: %d Returned frame count: %d\n", IMAGE_COUNT, FRAME_COUNT);
- ts->printf(ts->LOG, "Error: required and returned frame count are not matched.\n");
- ts->printf(ts->LOG, "Continue checking...\n");
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
- }
-
- cv::Mat img; framecount = 0;
- vcap.set(CV_CAP_PROP_POS_FRAMES, 0);
-
- for ( ; framecount < std::min<int>(FRAME_COUNT, IMAGE_COUNT); framecount++ )
- {
- vcap >> img;
-
- if (img.empty())
- {
- ts->printf(ts->LOG, "\nVideo file directory: %s\n", ".");
- ts->printf(ts->LOG, "File name: video_%s.%s\n", string(&codecchars[0], 4).c_str(), ext[i].c_str());
- ts->printf(ts->LOG, "Video codec: %s\n", string(&codecchars[0], 4).c_str());
- ts->printf(ts->LOG, "Error: cannot read frame with index %d from the video.\n", framecount);
- break;
- }
-
- const double thresDbell = 20;
- double psnr = PSNR(img, tmp_img[framecount]);
-
- if (psnr > thresDbell)
- {
- ts->printf(ts->LOG, "\nReading frame from the file video_%s.%s...\n", string(&codecchars[0], 4).c_str(), ext[i].c_str());
- ts->printf(ts->LOG, "Frame index: %d\n", framecount);
- ts->printf(ts->LOG, "Difference between saved and original images: %g\n", psnr);
- ts->printf(ts->LOG, "Maximum allowed difference: %g\n", thresDbell);
- ts->printf(ts->LOG, "Error: too big difference between saved and original images.\n");
- continue;
- }
+ ts->printf(ts->LOG, "\nReading frame from the file video_%s.%s...\n", fourcc_str.c_str(), ext.c_str());
+ ts->printf(ts->LOG, "Frame index: %d\n", i+1);
+ ts->printf(ts->LOG, "Difference between saved and original images: %g\n", psnr);
+ ts->printf(ts->LOG, "Maximum allowed difference: %g\n", thresDbell);
+ ts->printf(ts->LOG, "Error: too big difference between saved and original images.\n");
+ break;
}
}
}
void CV_VideoTest::run(int)
{
-#if defined WIN32 || (defined __linux__ && !defined ANDROID) || (defined __APPLE__ && defined HAVE_FFMPEG)
-#if !defined HAVE_GSTREAMER || defined HAVE_GSTREAMER_APP
-
- const char codecs[][4] = { {'I', 'Y', 'U', 'V'},
- {'X', 'V', 'I', 'D'},
- {'m', 'p', 'e', 'g'},
- {'M', 'J', 'P', 'G'} };
-
- printf("%s", ts->get_data_path().c_str());
-
- int count = sizeof(codecs)/(4*sizeof(char));
-
- for (int i = 0; i < count; ++i)
+ for (int i = 0; ; ++i)
{
- VideoTest(ts->get_data_path(), CV_FOURCC(codecs[i][0], codecs[i][1], codecs[i][2], codecs[i][3]));
+ const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[i];
+ if( fmt.empty() )
+ break;
+ VideoTest(ts->get_data_path(), fmt);
}
-
-#endif
-#endif
}
-void CV_SpecificVideoFileTest::run(int)
+void CV_SpecificVideoTest::run(int)
{
-#if defined WIN32 || (defined __linux__ && !defined ANDROID) || (defined __APPLE__ && defined HAVE_FFMPEG)
-#if !defined HAVE_GSTREAMER || defined HAVE_GSTREAMER_APP
-
- const char codecs[][4] = { {'m', 'p', 'e', 'g'},
- {'X', 'V', 'I', 'D'},
- {'M', 'J', 'P', 'G'},
- {'I', 'Y', 'U', 'V'} };
-
- int count = sizeof(codecs)/(4*sizeof(char));
-
- for (int i = 0; i < count; ++i)
+ for (int i = 0; ; ++i)
{
- SpecificVideoFileTest(ts->get_data_path(), codecs[i]);
+ const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[i];
+ if( fmt.empty() )
+ break;
+ SpecificVideoTest(ts->get_data_path(), fmt);
}
-
-#endif
-#endif
}
-void CV_SpecificVideoCameraTest::run(int)
-{
-#if defined WIN32 || (defined __linux__ && !defined ANDROID)
-#if !defined HAVE_GSTREAMER || defined HAVE_GSTREAMER_APP
-
- const char codecs[][4] = { {'m', 'p', 'e', 'g'},
- {'X', 'V', 'I', 'D'},
- {'M', 'J', 'P', 'G'},
- {'I', 'Y', 'U', 'V'} };
-
- int count = sizeof(codecs)/(4*sizeof(char));
-
- for (int i = 0; i < count; ++i)
- {
- SpecificVideoCameraTest(ts->get_data_path(), codecs[i]);
- }
-
-#endif
+#ifdef HAVE_JPEG
+TEST(Highgui_Image, regression) { CV_ImageTest test; test.safe_run(); }
#endif
-}
-TEST(Highgui_Image, regression) { CV_ImageTest test; test.safe_run(); }
+#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT
TEST(Highgui_Video, regression) { CV_VideoTest test; test.safe_run(); }
-TEST(Highgui_SpecificImage, regression) { CV_SpecificImageTest test; test.safe_run(); }
-TEST(Highgui_SpecificVideoFile, regression) { CV_SpecificVideoFileTest test; test.safe_run(); }
-TEST(Highgui_SpecificVideoCamera, regression) { CV_SpecificVideoCameraTest test; test.safe_run(); }
+TEST(Highgui_Video, write_read) { CV_SpecificVideoTest test; test.safe_run(); }
+#endif
+
+TEST(Highgui_Image, write_read) { CV_SpecificImageTest test; test.safe_run(); }
-/*M///////////////////////////////////////////////////////////////////////////////////////\r
-//\r
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
-//\r
-// By downloading, copying, installing or using the software you agree to this license.\r
-// If you do not agree to this license, do not download, install,\r
-// copy or use the software.\r
-//\r
-//\r
-// License Agreement\r
-// For Open Source Computer Vision Library\r
-//\r
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
-// Third party copyrights are property of their respective owners.\r
-//\r
-// Redistribution and use in source and binary forms, with or without modification,\r
-// are permitted provided that the following conditions are met:\r
-//\r
-// * Redistribution's of source code must retain the above copyright notice,\r
-// this list of conditions and the following disclaimer.\r
-//\r
-// * Redistribution's in binary form must reproduce the above copyright notice,\r
-// this list of conditions and the following disclaimer in the documentation\r
-// and/or other materials provided with the distribution.\r
-//\r
-// * The name of the copyright holders may not be used to endorse or promote products\r
-// derived from this software without specific prior written permission.\r
-//\r
-// This software is provided by the copyright holders and contributors "as is" and\r
-// any express or implied warranties, including, but not limited to, the implied\r
-// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
-// In no event shall the Intel Corporation or contributors be liable for any direct,\r
-// indirect, incidental, special, exemplary, or consequential damages\r
-// (including, but not limited to, procurement of substitute goods or services;\r
-// loss of use, data, or profits; or business interruption) however caused\r
-// and on any theory of liability, whether in contract, strict liability,\r
-// or tort (including negligence or otherwise) arising in any way out of\r
-// the use of this software, even if advised of the possibility of such damage.\r
-//\r
-//M*/\r
-\r
-#include "test_precomp.hpp"\r
-#include "opencv2/highgui/highgui.hpp"\r
-\r
-#ifdef HAVE_FFMPEG\r
-\r
-#include "ffmpeg_codecs.hpp"\r
-\r
-using namespace cv;\r
-using namespace std;\r
-\r
-class CV_PositioningTest : public cvtest::BaseTest\r
-{\r
-public:\r
- void CreateTestVideo(const string& format, int codec, int framecount = 125);\r
- void run(int);\r
-};\r
-\r
-void CV_PositioningTest::CreateTestVideo(const string& format, int codec, int framecount)\r
-{\r
- stringstream s; s << codec;\r
-\r
- //if( format == "mov" && codec == CV_FOURCC('m', 'p', 'e', 'g')\r
- // putchar('$');\r
-\r
- cv::VideoWriter writer("test_video_"+s.str()+"."+format, codec, 25, cv::Size(640, 480), false);\r
-\r
- for (int i = 0; i < framecount; ++i)\r
- {\r
- cv::Mat mat(480, 640, CV_8UC1);\r
- size_t n = 8, tmp = i;\r
-\r
- vector<char> tmp_code; tmp_code.clear();\r
-\r
- while ( tmp > 1 )\r
- {\r
- tmp_code.push_back(tmp%2);\r
- tmp /= 2;\r
- }\r
- tmp_code.push_back(tmp);\r
-\r
- vector<char> i_code;\r
-\r
- for (size_t j = 0; j < n; ++j)\r
- {\r
- char val = j < n - tmp_code.size() ? 0 : tmp_code.at(n-1-j);\r
- i_code.push_back(val);\r
- }\r
-\r
- const size_t w = 480/n;\r
-\r
- for (size_t j = 0; j < n; ++j)\r
- {\r
- cv::Scalar color = i_code[j] ? 255 : 0;\r
- rectangle(mat, Rect(0, w*j, 640, w), color, -1);\r
- }\r
-\r
- writer << mat;\r
- }\r
-}\r
-\r
-void CV_PositioningTest::run(int)\r
-{\r
-#if defined WIN32 || (defined __linux__ && !defined ANDROID) || (defined __APPLE__ && defined HAVE_FFMPEG)\r
-#if !defined HAVE_GSTREAMER || defined HAVE_GSTREAMER_APP\r
-\r
- const string format[] = {"avi", "mov", "mp4", "mpg", "wmv", "3gp"};\r
-\r
- const char codec[][4] = { {'X', 'V', 'I', 'D'},\r
- {'m', 'p', 'e', 'g'},\r
- {'M', 'J', 'P', 'G'} };\r
-\r
- size_t n_format = sizeof(format)/sizeof(format[0]),\r
- n_codec = sizeof(codec)/sizeof(codec[0]);\r
-\r
- int n_frames = 256;\r
-\r
- for (size_t i = 0; i < n_format; ++i)\r
- for (size_t j = 0; j < n_codec; ++j)\r
- {\r
- CreateTestVideo(format[i], CV_FOURCC(codec[j][0], codec[j][1], codec[j][2], codec[j][3]), n_frames);\r
-\r
- stringstream s; s << CV_FOURCC(codec[j][0], codec[j][1], codec[j][2], codec[j][3]); //codec_bmp_tags[j].tag;\r
-\r
- const string file_path = "test_video_"+s.str()+"."+format[i];\r
-\r
- bool error = false; int failed = 0;\r
-\r
- cv::VideoCapture cap(file_path);\r
-\r
- if (!cap.isOpened())\r
- {\r
- ts->printf(ts->LOG, "\n\nFile: %s\n", file_path.c_str());\r
- ts->printf(ts->LOG, "\nVideo codec: %s\n", string(&codec[j][0], 4).c_str());\r
- ts->printf(ts->LOG, "\nError: cannot read video file.");\r
- ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);\r
- error = true;\r
- }\r
-\r
- cap.set(CV_CAP_PROP_POS_FRAMES, 0);\r
-\r
- int N = cap.get(CV_CAP_PROP_FRAME_COUNT);\r
-\r
- if (N != n_frames)\r
- {\r
- if (!error)\r
- {\r
- ts->printf(ts->LOG, "\n\nFile: %s\n", file_path.c_str());\r
- ts->printf(ts->LOG, "\nVideo codec: %s\n", string(&codec[j][0], 4).c_str());\r
- error = true;\r
- }\r
- ts->printf(ts->LOG, "\nError: returned frame count in clip is incorrect.\n");\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- }\r
-\r
- vector <int> idx;\r
-\r
- RNG rng(N);\r
- idx.clear();\r
- for( int k = 0; k < N-1; k++ )\r
- idx.push_back(rng.uniform(0, N));\r
- idx.push_back(N-1);\r
- std::swap(idx.at(rng.uniform(0, N-1)), idx.at(N-1));\r
-\r
- for (int k = 0; k < N; ++k)\r
- {\r
- cap.set(CV_CAP_PROP_POS_FRAMES, (double)idx[k]);\r
-\r
- cv::Mat img; cap.retrieve(img);\r
-\r
- if (img.empty())\r
- {\r
- if (!error)\r
- {\r
- ts->printf(ts->LOG, "\n\nFile: %s\n", file_path.c_str());\r
- ts->printf(ts->LOG, "\nVideo codec: %s\n", string(&codec[j][0], 4).c_str());\r
- error = true;\r
- }\r
- ts->printf(ts->LOG, "\nError: cannot read a frame in position %d.\n", idx[k]);\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- }\r
-\r
- const double thresh = 100;\r
-\r
- const size_t n = 8, w = img.rows/n;\r
-\r
- int index = 0, deg = n-1;\r
-\r
- for (size_t l = 0; l < n; ++l)\r
- {\r
- cv::Mat mat = img.rowRange(w*l, w*(l+1)-1);\r
-\r
- Scalar mat_mean = cv::mean(mat);\r
-\r
- if (mat_mean[0] > thresh) index += (int)std::pow(2.0, 1.0*deg);\r
-\r
- deg--;\r
- }\r
-\r
- if (index != idx[k])\r
- {\r
- if (!error)\r
- {\r
- ts->printf(ts->LOG, "\n\nFile: %s\n", file_path.c_str());\r
- ts->printf(ts->LOG, "\nVideo codec: %s\n\n", string(&codec[j][0], 4).c_str());\r
- error = true;\r
- }\r
- ts->printf(ts->LOG, "Required position: %d Returned position: %d\n", idx[k], index);\r
- ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);\r
- failed++;\r
- }\r
- }\r
-\r
- if (!error) { ts->printf(ts->LOG, "\n\nFile: %s\n", file_path.c_str());\r
- ts->printf(ts->LOG, "\nVideo codec: %s\n", string(&codec[j][0], 4).c_str()); }\r
-\r
- const string status = failed ? "FAILED" : "OK";\r
- ts->printf(ts->LOG, "\nSuccessfull iterations: %d(%d%%) Failed iterations: %d(%d%%) %s\n", N-failed, (N-failed)*100/N, failed, failed*100/N, status.c_str());\r
- if( i < n_format-1 || j < n_codec-1 ) ts->printf(ts->LOG, "\n----------");\r
- }\r
-\r
-#endif\r
-#endif\r
-}\r
-\r
-TEST(Highgui_Positioning, regression) { CV_PositioningTest test; test.safe_run(); }\r
-\r
-#endif\r
+/*M///////////////////////////////////////////////////////////////////////////////////////
+ //
+ // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+ //
+ // By downloading, copying, installing or using the software you agree to this license.
+ // If you do not agree to this license, do not download, install,
+ // copy or use the software.
+ //
+ //
+ // License Agreement
+ // For Open Source Computer Vision Library
+ //
+ // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+ // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+ // Third party copyrights are property of their respective owners.
+ //
+ // Redistribution and use in source and binary forms, with or without modification,
+ // are permitted provided that the following conditions are met:
+ //
+ // * Redistribution's of source code must retain the above copyright notice,
+ // this list of conditions and the following disclaimer.
+ //
+ // * Redistribution's in binary form must reproduce the above copyright notice,
+ // this list of conditions and the following disclaimer in the documentation
+ // and/or other materials provided with the distribution.
+ //
+ // * The name of the copyright holders may not be used to endorse or promote products
+ // derived from this software without specific prior written permission.
+ //
+ // This software is provided by the copyright holders and contributors "as is" and
+ // any express or implied warranties, including, but not limited to, the implied
+ // warranties of merchantability and fitness for a particular purpose are disclaimed.
+ // In no event shall the Intel Corporation or contributors be liable for any direct,
+ // indirect, incidental, special, exemplary, or consequential damages
+ // (including, but not limited to, procurement of substitute goods or services;
+ // loss of use, data, or profits; or business interruption) however caused
+ // and on any theory of liability, whether in contract, strict liability,
+ // or tort (including negligence or otherwise) arising in any way out of
+ // the use of this software, even if advised of the possibility of such damage.
+ //
+ //M*/
+
+#include "test_precomp.hpp"
+#include "opencv2/highgui/highgui.hpp"
+
+using namespace cv;
+using namespace std;
+
+class CV_PositioningTest : public cvtest::BaseTest
+{
+public:
+ CV_PositioningTest()
+ {
+ framesize = Size(640, 480);
+ }
+
+ Mat drawFrame(int i)
+ {
+ Mat mat = Mat::zeros(framesize, CV_8UC3);
+
+ mat = Scalar(fabs(cos(i*0.08)*255), fabs(sin(i*0.05)*255), i);
+ putText(mat, format("%03d", i), Point(10, 350), 0, 10, Scalar(128, 255, 255), 15);
+ return mat;
+ }
+
+ string getFilename(const cvtest::VideoFormat& fmt)
+ {
+ return format("test_video_%s.%s", cvtest::fourccToString(fmt.fourcc).c_str(), fmt.ext.c_str());
+ }
+
+ bool CreateTestVideo(const cvtest::VideoFormat& fmt, int framecount)
+ {
+ string filename = getFilename(fmt);
+
+ VideoWriter writer(filename, fmt.fourcc, 25, framesize, true);
+ if( !writer.isOpened() )
+ return false;
+
+ for (int i = 0; i < framecount; ++i)
+ {
+ Mat img = drawFrame(i);
+ writer << img;
+ }
+ return true;
+ }
+
+ void run(int)
+ {
+ int n_frames = 100;
+
+ for( int testcase = 0; ; testcase++ )
+ {
+ const cvtest::VideoFormat& fmt = cvtest::g_specific_fmt_list[testcase];
+ if( fmt.empty() )
+ break;
+ string filename = getFilename(fmt);
+ ts->printf(ts->LOG, "\nFile: %s\n", filename.c_str());
+
+ if( !CreateTestVideo(fmt, n_frames) )
+ {
+ ts->printf(ts->LOG, "\nError: cannot create video file");
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ return;
+ }
+
+ VideoCapture cap(filename);
+
+ if (!cap.isOpened())
+ {
+ ts->printf(ts->LOG, "\nError: cannot read video file.");
+ ts->set_failed_test_info(ts->FAIL_INVALID_TEST_DATA);
+ return;
+ }
+
+ int N0 = cap.get(CV_CAP_PROP_FRAME_COUNT);
+ cap.set(CV_CAP_PROP_POS_FRAMES, 0);
+ int N = cap.get(CV_CAP_PROP_FRAME_COUNT);
+
+ if (N != n_frames || N != N0)
+ {
+ ts->printf(ts->LOG, "\nError: returned frame count (N0=%d, N=%d) is different from the reference number %d\n", N0, N, n_frames);
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ return;
+ }
+
+ for (int k = 0; k < N; ++k)
+ {
+ int idx = theRNG().uniform(0, N);
+
+ if( !cap.set(CV_CAP_PROP_POS_FRAMES, idx) )
+ {
+ ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx);
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ return;
+ }
+
+ int idx1 = (int)cap.get(CV_CAP_PROP_POS_FRAMES);
+
+ Mat img; cap >> img;
+ Mat img0 = drawFrame(idx);
+
+ if( idx != idx1 )
+ {
+ ts->printf(ts->LOG, "\nError: the current position (%d) after seek is different from specified (%d)\n",
+ idx1, idx);
+ ts->printf(ts->LOG, "Saving both frames ...\n");
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ imwrite("opencv_test_highgui_postest_actual.png", img);
+ imwrite("opencv_test_highgui_postest_expected.png", img0);
+ return;
+ }
+
+ if (img.empty())
+ {
+ ts->printf(ts->LOG, "\nError: cannot read a frame at position %d.\n", idx);
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ return;
+ }
+
+ double err = PSNR(img, img0);
+
+ if( err < 20 )
+ {
+ ts->printf(ts->LOG, "The frame read after positioning to %d is incorrect (PSNR=%g)\n", idx, err);
+ ts->printf(ts->LOG, "Saving both frames ...\n");
+ ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
+ imwrite("opencv_test_highgui_postest_actual.png", img);
+ imwrite("opencv_test_highgui_postest_expected.png", img0);
+ return;
+ }
+ }
+ }
+ }
+
+ Size framesize;
+};
+
+#if BUILD_WITH_VIDEO_INPUT_SUPPORT && BUILD_WITH_VIDEO_OUTPUT_SUPPORT
+TEST(Highgui_Video, seek_random_synthetic) { CV_PositioningTest test; test.safe_run(); }
+#endif
===============================
.. highlight:: cpp
-The functions in this section perform various geometrical transformations of 2D images. They do not change the image content but deform the pixel grid and map this deformed grid to the destination image. In fact, to avoid sampling artifacts, the mapping is done in the reverse order, from destination to the source. That is, for each pixel
-:math:`(x, y)` of the destination image, the functions compute coordinates of the corresponding "donor" pixel in the source image and copy the pixel value:
-
The functions in this section perform various geometrical transformations of 2D images. They do not change the image content but deform the pixel grid and map this deformed grid to the destination image. In fact, to avoid sampling artifacts, the mapping is done in the reverse order, from destination to the source. That is, for each pixel :math:`(x, y)` of the destination image, the functions compute coordinates of the corresponding "donor" pixel in the source image and copy the pixel value:
+
.. math::
\texttt{dst} (x,y)= \texttt{src} (f_x(x,y), f_y(x,y))
miscellaneous_transformations
histograms
structural_analysis_and_shape_descriptors
- planar_subdivisions
motion_analysis_and_object_tracking
feature_detection
object_detection
:param object2: Second contour or grayscale image.
- :param method: Comparison method: ``CV_CONTOUR_MATCH_I1`` , \ ``CV_CONTOURS_MATCH_I2`` \
+ :param method: Comparison method: ``CV_CONTOURS_MATCH_I1`` , \ ``CV_CONTOURS_MATCH_I2`` \
or ``CV_CONTOURS_MATCH_I3`` (see the details below).
:param parameter: Method-specific parameter (not supported now).
:ocv:func:`HuMoments` ) as follows (
:math:`A` denotes ``object1``,:math:`B` denotes ``object2`` ):
-* method=CV\_CONTOUR\_MATCH\_I1
+* method=CV_CONTOURS_MATCH_I1
.. math::
I_1(A,B) = \sum _{i=1...7} \left | \frac{1}{m^A_i} - \frac{1}{m^B_i} \right |
-* method=CV\_CONTOUR\_MATCH\_I2
+* method=CV_CONTOURS_MATCH_I2
.. math::
I_2(A,B) = \sum _{i=1...7} \left | m^A_i - m^B_i \right |
-* method=CV\_CONTOUR\_MATCH\_I3
+* method=CV_CONTOURS_MATCH_I3
.. math::
- I_3(A,B) = \sum _{i=1...7} \frac{ \left| m^A_i - m^B_i \right| }{ \left| m^A_i \right| }
+ I_3(A,B) = \max _{i=1...7} \frac{ \left| m^A_i - m^B_i \right| }{ \left| m^A_i \right| }
where
CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst,
double alpha, InputArray mask=noArray() );
+//! computes PSNR image/video quality metric
+CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);
+
CV_EXPORTS_W Point2d phaseCorrelate(InputArray src1, InputArray src2, InputArray window = noArray());
CV_EXPORTS_W void createHanningWindow(OutputArray dst, Size winSize, int type);
TEST_CYCLE() blur(src, dst, Size(3,3), Point(-1,-1), btype);
- SANITY_CHECK(dst);
+ SANITY_CHECK(dst, 1e-3);
}
PERF_TEST_P(Size_MatType_BorderType, gaussianBlur5x5,
TEST_CYCLE() blur(src, dst, Size(5,5), Point(-1,-1), btype);
- SANITY_CHECK(dst);
+ SANITY_CHECK(dst, 1e-3);
}
TEST_CYCLE() cornerEigenValsAndVecs(src, dst, blockSize, apertureSize, borderType);
- SANITY_CHECK(dst);
+ SANITY_CHECK(dst, 2e-5);
}
\ No newline at end of file
\r
SANITY_CHECK(sum, 1e-6);\r
SANITY_CHECK(sqsum, 1e-6);\r
- SANITY_CHECK(tilted, 1e-6);\r
+ SANITY_CHECK(tilted, 1e-6, tilted.depth() > CV_32S ? ERROR_RELATIVE : ERROR_ABSOLUTE);\r
}\r
void cv::convexityDefects( InputArray _points, InputArray _hull, OutputArray _defects )
{
Mat points = _points.getMat();
- CV_Assert( points.isContinuous() && points.type() == CV_32SC2 );
+ int ptnum = points.checkVector(2, CV_32S);
+ CV_Assert( ptnum > 3 );
Mat hull = _hull.getMat();
+ CV_Assert( hull.checkVector(1, CV_32S) > 2 );
Ptr<CvMemStorage> storage = cvCreateMemStorage();
CvMat c_points = points, c_hull = hull;
- CvSeq* seq = cvConvexityDefects(&c_points, &c_hull);
+ CvSeq* seq = cvConvexityDefects(&c_points, &c_hull, storage);
int i, n = seq->total;
if( n == 0 )
int idx0 = (int)(d.start - ptorg);
int idx1 = (int)(d.end - ptorg);
int idx2 = (int)(d.depth_point - ptorg);
- CV_Assert( 0 <= idx0 && idx0 < n );
- CV_Assert( 0 <= idx1 && idx1 < n );
- CV_Assert( 0 <= idx2 && idx2 < n );
+ CV_Assert( 0 <= idx0 && idx0 < ptnum );
+ CV_Assert( 0 <= idx1 && idx1 < ptnum );
+ CV_Assert( 0 <= idx2 && idx2 < ptnum );
CV_Assert( d.depth >= 0 );
int idepth = cvRound(d.depth*256);
defects.at<Vec4i>(i) = Vec4i(idx0, idx1, idx2, idepth);
ksize.width = 1;
}
#ifdef HAVE_TEGRA_OPTIMIZATION
- if(tegra::box(src, dst, ksize, borderType))
- return;
- if ( tegra::boxFilter(src, dst, ksize, anchor, normalize, borderType) )
+ if ( tegra::box(src, dst, ksize, anchor, normalize, borderType) )
return;
#endif
// temporary copy of the image with borders for easy processing
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
+ patchNaNs(temp);
// allocate lookup tables
vector<float> _space_weight(d*d);
}
+double cv::PSNR(InputArray _src1, InputArray _src2)
+{
+ Mat src1 = _src1.getMat(), src2 = _src2.getMat();
+ CV_Assert( src1.depth() == CV_8U );
+ double diff = std::sqrt(norm(src1, src2, NORM_L2SQR)/(src1.total()*src1.channels()));
+ return 20*log10(255./(diff+DBL_EPSILON));
+}
+
+
CV_IMPL void
cvCopyMakeBorder( const CvArr* srcarr, CvArr* dstarr, CvPoint offset,
int borderType, CvScalar value )
endif()
set(the_description "The java bindings")
-ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_highgui opencv_ml opencv_calib3d opencv_photo)
+ocv_add_module(java BINDINGS opencv_core opencv_imgproc OPTIONAL opencv_objdetect opencv_features2d opencv_video opencv_highgui opencv_ml opencv_calib3d opencv_photo opencv_nonfree)
ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/src/cpp")
# get list of modules to wrap
endforeach()
# first run (to get list of generated files)
- file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out/")
- file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out")
- execute_process(COMMAND ${PYTHON_EXECUTABLE} "${GEN_JAVA}" "${HDR_PARSER}" ${module} ${module_headers}
- WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out"
- OUTPUT_QUIET ERROR_QUIET)
- file(GLOB_RECURSE ${module}_generated_java_sources RELATIVE "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out/" "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out/*.java")
- ocv_list_add_prefix(${module}_generated_java_sources "${CMAKE_CURRENT_BINARY_DIR}/")
-
- # second run (at build time)
- add_custom_command(OUTPUT ${${module}_generated_java_sources} "${CMAKE_CURRENT_BINARY_DIR}/${module}.cpp"
- COMMAND ${PYTHON_EXECUTABLE} "${GEN_JAVA}" "${HDR_PARSER}" ${module} ${module_headers}
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- DEPENDS "${GEN_JAVA}" "${HDR_PARSER}" ${module_headers})
-
- list(APPEND java_hdr_deps ${module_headers})
- list(APPEND generated_cpp_sources "${CMAKE_CURRENT_BINARY_DIR}/${module}.cpp")
- list(APPEND generated_java_sources ${${module}_generated_java_sources})
+ if(module_headers)
+ file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out/")
+ file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out")
+ execute_process(COMMAND ${PYTHON_EXECUTABLE} "${GEN_JAVA}" "${HDR_PARSER}" ${module} ${module_headers}
+ WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out"
+ OUTPUT_QUIET ERROR_QUIET)
+ file(GLOB_RECURSE ${module}_generated_java_sources RELATIVE "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out/" "${CMAKE_CURRENT_BINARY_DIR}/gen_java_out/*.java")
+ ocv_list_add_prefix(${module}_generated_java_sources "${CMAKE_CURRENT_BINARY_DIR}/")
+
+ # second run (at build time)
+ add_custom_command(OUTPUT ${${module}_generated_java_sources} "${CMAKE_CURRENT_BINARY_DIR}/${module}.cpp"
+ COMMAND ${PYTHON_EXECUTABLE} "${GEN_JAVA}" "${HDR_PARSER}" ${module} ${module_headers}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ DEPENDS "${GEN_JAVA}" "${HDR_PARSER}" ${module_headers})
+
+ list(APPEND java_hdr_deps ${module_headers})
+ list(APPEND generated_cpp_sources "${CMAKE_CURRENT_BINARY_DIR}/${module}.cpp")
+ list(APPEND generated_java_sources ${${module}_generated_java_sources})
+ endif()
endforeach()
# get handwritten files used for wrappers generation
# manifest, jni, Eclipse project
file(GLOB_RECURSE android_lib_project_files RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/android/" "${CMAKE_CURRENT_SOURCE_DIR}/android/*")
+ list(REMOVE_ITEM android_lib_project_files "${ANDROID_MANIFEST_FILE}")
foreach(f ${android_lib_project_files})
if(NOT f MATCHES "\\.svn")
add_custom_command(
OUTPUT "${OpenCV_BINARY_DIR}/${f}"
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/android/${f}" "${OpenCV_BINARY_DIR}/${f}"
- DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/android/${f}"
+ MAIN_DEPENDENCY "${CMAKE_CURRENT_SOURCE_DIR}/android/${f}"
COMMENT "Generating ${f}"
)
android_get_compatible_target(lib_target_sdk_target ${ANDROID_NATIVE_API_LEVEL} ${ANDROID_SDK_TARGET})
add_custom_command(
- OUTPUT ${lib_target_files}
+ OUTPUT ${lib_target_files} "${OpenCV_BINARY_DIR}/${ANDROID_MANIFEST_FILE}"
COMMAND ${CMAKE_COMMAND} -E remove ${lib_target_files}
+ COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/android/${ANDROID_MANIFEST_FILE}" "${OpenCV_BINARY_DIR}/${ANDROID_MANIFEST_FILE}"
COMMAND ${ANDROID_EXECUTABLE} --silent create lib-project --path \"${OpenCV_BINARY_DIR}\" --target \"${lib_target_sdk_target}\" --name OpenCV --package org.opencv 2>\"${CMAKE_CURRENT_BINARY_DIR}/create_lib_project.log\"
- MAIN_DEPENDENCY "${OpenCV_BINARY_DIR}/${ANDROID_MANIFEST_FILE}"
+ COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/android/${ANDROID_MANIFEST_FILE}" "${OpenCV_BINARY_DIR}/${ANDROID_MANIFEST_FILE}"
+ MAIN_DEPENDENCY "${CMAKE_CURRENT_SOURCE_DIR}/android/${ANDROID_MANIFEST_FILE}"
DEPENDS ${lib_proj_files}
COMMENT "Generating OpenCV Android library project. SDK target: ${lib_target_sdk_target}"
)
install(FILES "${OpenCV_BINARY_DIR}/${ANDROID_PROJECT_PROPERTIES_FILE}" DESTINATION . COMPONENT main)
+ install(FILES "${OpenCV_BINARY_DIR}/${ANDROID_MANIFEST_FILE}" DESTINATION . COMPONENT main)
if(ANT_EXECUTABLE AND ANDROID_TOOLS_Pkg_Revision GREATER 13)
# build the library project
list(APPEND lib_target_files "${OpenCV_BINARY_DIR}/bin/classes.jar")
endif()
- add_custom_target(${lib_target}
- SOURCES ${lib_proj_files} ${lib_target_files}
- )
+ add_custom_target(${lib_target} SOURCES ${lib_proj_files} ${lib_target_files} "${OpenCV_BINARY_DIR}/${ANDROID_MANIFEST_FILE}")
add_dependencies(${lib_target} ${api_target})
add_dependencies(${the_module} ${lib_target})
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
- <name>OpenCV-2.4.beta</name>
+ <name>OpenCV-2.4.0</name>
<comment></comment>
<projects>
</projects>
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.opencv"
- android:versionCode="1"
- android:versionName="1.0">
+ android:versionCode="240"
+ android:versionName="2.4.0">
</manifest>
Bitmap bmp16 = BitmapFactory.decodeFile(OpenCVTestRunner.LENA_PATH, opt16);\r
Mat m16 = new Mat();\r
Utils.bitmapToMat(bmp16, m16);\r
+ assertTrue(m16.rows() == 512 && m16.cols() == 512 && m16.type() == CvType.CV_8UC4);\r
\r
- BitmapFactory.Options opt32 = new BitmapFactory.Options();\r
+ /*BitmapFactory.Options opt32 = new BitmapFactory.Options();\r
opt32.inPreferredConfig = Bitmap.Config.ARGB_8888;\r
- Bitmap bmp32 = BitmapFactory.decodeFile(OpenCVTestRunner.LENA_PATH, opt32);\r
+ Bitmap bmp32 = BitmapFactory.decodeFile(OpenCVTestRunner.LENA_PATH, opt32);*/\r
+ Bitmap bmp32 = bmp16.copy(Bitmap.Config.ARGB_8888, false);\r
Mat m32 = new Mat();\r
Utils.bitmapToMat(bmp32, m32);\r
\r
\r
double maxDiff = Core.norm(m16, m32, Core.NORM_INF);\r
Log.d("Bmp->Mat", "bmp16->Mat vs bmp32->Mat diff = " + maxDiff);\r
+\r
assertTrue(maxDiff <= 8 /* 8 == 2^8 / 2^5 */);\r
}\r
\r
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
+import org.opencv.core.MatOfDouble;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.MatOfPoint3f;
import org.opencv.core.Point;
public void testFindChessboardCornersMatSizeMat() {
Size patternSize = new Size(9, 6);
- Calib3d.findChessboardCorners(grayChess, patternSize, dst);
- assertTrue(!dst.empty());
+ MatOfPoint2f corners = new MatOfPoint2f();
+ Calib3d.findChessboardCorners(grayChess, patternSize, corners);
+ assertTrue(!corners.empty());
}
public void testFindChessboardCornersMatSizeMatInt() {
Size patternSize = new Size(9, 6);
- Calib3d.findChessboardCorners(grayChess, patternSize, dst, Calib3d.CALIB_CB_ADAPTIVE_THRESH + Calib3d.CALIB_CB_NORMALIZE_IMAGE
+ MatOfPoint2f corners = new MatOfPoint2f();
+ Calib3d.findChessboardCorners(grayChess, patternSize, corners, Calib3d.CALIB_CB_ADAPTIVE_THRESH + Calib3d.CALIB_CB_NORMALIZE_IMAGE
+ Calib3d.CALIB_CB_FAST_CHECK);
- assertTrue(!dst.empty());
+ assertTrue(!corners.empty());
}
public void testFindCirclesGridDefaultMatSizeMat() {
Mat rvec = new Mat();
Mat tvec = new Mat();
- Calib3d.solvePnP(points3d, points2d, intrinsics, new Mat(), rvec, tvec);
+ Calib3d.solvePnP(points3d, points2d, intrinsics, new MatOfDouble(), rvec, tvec);
Mat truth_rvec = new Mat(3, 1, CvType.CV_64F);
truth_rvec.put(0, 0, 0, Math.PI / 2, 0);
rgba0.setTo(new Scalar(10, 20, 30, 40));
List<Mat> src = Arrays.asList(rgba0);
List<Mat> dst = Arrays.asList(gray3, gray2, gray1, gray0, getMat(CvType.CV_8UC3, 0, 0, 0));
- MatOfInt fromTo = new MatOfInt(1, new int[]
- { 3, 0,
+ MatOfInt fromTo = new MatOfInt(
+ 3, 0,
3, 1,
2, 2,
0, 3,
2, 4,
1, 5,
- 0, 6 }
+ 0, 6
);
Core.mixChannels(src, dst, fromTo);
package org.opencv.test.core;
+import java.util.Arrays;
+
import org.opencv.core.Core;
import org.opencv.core.CvException;
import org.opencv.core.CvType;
import org.opencv.core.Size;
import org.opencv.test.OpenCVTestCase;
-import java.util.Arrays;
-
public class MatTest extends OpenCVTestCase {
public void testAdjustROI() {
assertEquals(5, Core.countNonZero(eye));
}
-
- public void testGetIntInt() {
- Mat src = new Mat(3, 3, CvType.CV_8U, new Scalar(2));
- double[] actualArray = src.get(1, 1);
-
- assertTrue(Arrays.equals(new double[] { 2 }, actualArray));
+
+ public Mat getTestMat(int size, int type) {
+ Mat m = new Mat(size, size, type);
+ final int ch = CvType.channels(type);
+ double buff[] = new double[size*size * ch];
+ for(int i=0; i<size; i++)
+ for(int j=0; j<size; j++)
+ for(int k=0; k<ch; k++) {
+ buff[i*size*ch + j*ch + k] = 100*i + 10*j + k;
+ }
+ m.put(0, 0, buff);
+ return m;
+ }
+
+ public void testGetIntInt_8U() {
+ Mat m = getTestMat(5, CvType.CV_8UC2);
+
+ // whole Mat
+ assertTrue(Arrays.equals(new double[] {0, 1}, m.get(0, 0)));
+ assertTrue(Arrays.equals(new double[] {240, 241}, m.get(2, 4)));
+ assertTrue(Arrays.equals(new double[] {255, 255}, m.get(4, 4)));
+
+ // sub-Mat
+ Mat sm = m.submat(2, 4, 3, 5);
+ assertTrue(Arrays.equals(new double[] {230, 231}, sm.get(0, 0)));
+ assertTrue(Arrays.equals(new double[] {255, 255}, sm.get(1, 1)));
+ }
+
+ public void testGetIntInt_32S() {
+ Mat m = getTestMat(5, CvType.CV_32SC3);
+
+ // whole Mat
+ assertTrue(Arrays.equals(new double[] {0, 1, 2}, m.get(0, 0)));
+ assertTrue(Arrays.equals(new double[] {240, 241, 242}, m.get(2, 4)));
+ assertTrue(Arrays.equals(new double[] {440, 441, 442}, m.get(4, 4)));
+
+ // sub-Mat
+ Mat sm = m.submat(2, 4, 3, 5);
+ assertTrue(Arrays.equals(new double[] {230, 231, 232}, sm.get(0, 0)));
+ assertTrue(Arrays.equals(new double[] {340, 341, 342}, sm.get(1, 1)));
+ }
+
+ public void testGetIntInt_64F() {
+ Mat m = getTestMat(5, CvType.CV_64FC1);
+
+ // whole Mat
+ assertTrue(Arrays.equals(new double[] {0}, m.get(0, 0)));
+ assertTrue(Arrays.equals(new double[] {240}, m.get(2, 4)));
+ assertTrue(Arrays.equals(new double[] {440}, m.get(4, 4)));
+
+ // sub-Mat
+ Mat sm = m.submat(2, 4, 3, 5);
+ assertTrue(Arrays.equals(new double[] {230}, sm.get(0, 0)));
+ assertTrue(Arrays.equals(new double[] {340}, sm.get(1, 1)));
}
public void testGetIntIntByteArray() {
- Mat m = new Mat(5, 5, CvType.CV_8UC3, new Scalar(1, 2, 3));
+ Mat m = getTestMat(5, CvType.CV_8UC3);
byte[] goodData = new byte[9];
byte[] badData = new byte[7];
- m.get(1, 1, goodData);
+
+ // whole Mat
+ int bytesNum = m.get(1, 1, goodData);
- assertTrue(Arrays.equals(new byte[] { 1, 2, 3, 1, 2, 3, 1, 2, 3 }, goodData));
+ assertEquals(9, bytesNum);
+ assertTrue(Arrays.equals(new byte[] { 110, 111, 112, 120, 121, 122, (byte) 130, (byte) 131, (byte) 132 }, goodData));
try {
m.get(2, 2, badData);
} catch (UnsupportedOperationException e) {
// expected
}
+
+ // sub-Mat
+ Mat sm = m.submat(2, 4, 3, 5);
+ byte buff00[] = new byte[3];
+ bytesNum = sm.get(0, 0, buff00);
+ assertEquals(3, bytesNum);
+ assertTrue(Arrays.equals(new byte[] {(byte) 230, (byte) 231, (byte) 232}, buff00));
+ byte buff11[] = new byte[3];
+ bytesNum = sm.get(1, 1, buff11);
+ assertEquals(3, bytesNum);
+ assertTrue(Arrays.equals(new byte[] {(byte) 255, (byte) 255, (byte) 255}, buff11));
}
public void testGetIntIntDoubleArray() {
- Mat src = new Mat(2, 2, CvType.CV_64F);
- double[] doubleArray = { 1.0, 2.0, 3.0 };
-
- int numOfBytes = src.get(0, 0, doubleArray);
- assertEquals(24, numOfBytes);
- }
+ Mat m = getTestMat(5, CvType.CV_64F);
+ double buff[] = new double[4];
+
+ // whole Mat
+ int bytesNum = m.get(1, 1, buff);
+
+ assertEquals(32, bytesNum);
+ assertTrue(Arrays.equals(new double[] { 110, 120, 130, 140 }, buff));
+
+ // sub-Mat
+ Mat sm = m.submat(2, 4, 3, 5);
+ double buff00[] = new double[2];
+ bytesNum = sm.get(0, 0, buff00);
+ assertEquals(16, bytesNum);
+ assertTrue(Arrays.equals(new double[] {230, 240}, buff00));
+ double buff11[] = new double[] {0, 0};
+ bytesNum = sm.get(1, 1, buff11);
+ assertEquals(8, bytesNum);
+ assertTrue(Arrays.equals(new double[] {340, 0}, buff11));
+}
public void testGetIntIntFloatArray() {
- Mat src = new Mat(2, 2, CvType.CV_32F);
- float[] floatArray = { 3.0f, 1.0f, 4.0f };
+ Mat m = getTestMat(5, CvType.CV_32F);
+ float buff[] = new float[4];
+
+ // whole Mat
+ int bytesNum = m.get(1, 1, buff);
- int numOfBytes = src.get(0, 0, floatArray);
- assertEquals(12, numOfBytes);
+ assertEquals(16, bytesNum);
+ assertTrue(Arrays.equals(new float[] { 110, 120, 130, 140 }, buff));
+
+ // sub-Mat
+ Mat sm = m.submat(2, 4, 3, 5);
+ float buff00[] = new float[2];
+ bytesNum = sm.get(0, 0, buff00);
+ assertEquals(8, bytesNum);
+ assertTrue(Arrays.equals(new float[] {230, 240}, buff00));
+ float buff11[] = new float[] {0, 0};
+ bytesNum = sm.get(1, 1, buff11);
+ assertEquals(4, bytesNum);
+ assertTrue(Arrays.equals(new float[] {340, 0}, buff11));
}
public void testGetIntIntIntArray() {
- Mat src = new Mat(2, 2, CvType.CV_32S);
- int[] intArray = { 3, 1, 4, 7 };
-
- int numOfBytes = src.get(0, 0, intArray);
- assertEquals(16, numOfBytes);
+ Mat m = getTestMat(5, CvType.CV_32SC2);
+ int[] buff = new int[6];
+
+ // whole Mat
+ int bytesNum = m.get(1, 1, buff);
+
+ assertEquals(24, bytesNum);
+ assertTrue(Arrays.equals(new int[] { 110, 111, 120, 121, 130, 131 }, buff));
+
+ // sub-Mat
+ Mat sm = m.submat(2, 4, 3, 5);
+ int buff00[] = new int[4];
+ bytesNum = sm.get(0, 0, buff00);
+ assertEquals(16, bytesNum);
+ assertTrue(Arrays.equals(new int[] {230, 231, 240, 241}, buff00));
+ int buff11[] = new int[]{0, 0, 0, 0};
+ bytesNum = sm.get(1, 1, buff11);
+ assertEquals(8, bytesNum);
+ assertTrue(Arrays.equals(new int[] {340, 341, 0, 0}, buff11));
}
public void testGetIntIntShortArray() {
- Mat src = new Mat(2, 2, CvType.CV_16U);
- short[] data = { 3, 1, 4, 7 };
-
- int numOfBytes = src.get(1, 1, data);
- assertEquals(2, numOfBytes);
+ Mat m = getTestMat(5, CvType.CV_16SC2);
+ short[] buff = new short[6];
+
+ // whole Mat
+ int bytesNum = m.get(1, 1, buff);
+
+ assertEquals(12, bytesNum);
+ assertTrue(Arrays.equals(new short[] { 110, 111, 120, 121, 130, 131 }, buff));
+
+ // sub-Mat
+ Mat sm = m.submat(2, 4, 3, 5);
+ short buff00[] = new short[4];
+ bytesNum = sm.get(0, 0, buff00);
+ assertEquals(8, bytesNum);
+ assertTrue(Arrays.equals(new short[] {230, 231, 240, 241}, buff00));
+ short buff11[] = new short[]{0, 0, 0, 0};
+ bytesNum = sm.get(1, 1, buff11);
+ assertEquals(4, bytesNum);
+ assertTrue(Arrays.equals(new short[] {340, 341, 0, 0}, buff11));
}
public void testGetNativeObjAddr() {
}
public void testPutIntIntByteArray() {
- Mat m = new Mat(5, 5, CvType.CV_8UC3);
- byte[] bytes = new byte[] { 10, 20, 30, 40, 50, 60 };
- m.put(1, 1, bytes);
+ Mat m = new Mat(5, 5, CvType.CV_8UC3, new Scalar(1, 2, 3));
+ Mat sm = m.submat(2, 4, 3, 5);
+ byte[] buff = new byte[] { 0, 0, 0, 0, 0, 0 };
+ byte[] buff0 = new byte[] { 10, 20, 30, 40, 50, 60 };
+ byte[] buff1 = new byte[] { -1, -2, -3, -4, -5, -6 };
+
+ int bytesNum = m.put(1, 2, buff0);
+
+ assertEquals(6, bytesNum);
+ bytesNum = m.get(1, 2, buff);
+ assertEquals(6, bytesNum);
+ assertTrue(Arrays.equals(buff, buff0));
+
+ bytesNum = sm.put(0, 0, buff1);
+
+ assertEquals(6, bytesNum);
+ bytesNum = sm.get(0, 0, buff);
+ assertEquals(6, bytesNum);
+ assertTrue(Arrays.equals(buff, buff1));
+ bytesNum = m.get(2, 3, buff);
+ assertEquals(6, bytesNum);
+ assertTrue(Arrays.equals(buff, buff1));
+
+ Mat m1 = m.row(1);
+ bytesNum = m1.get(0, 2, buff);
+ assertEquals(6, bytesNum);
+ assertTrue(Arrays.equals(buff, buff0));
try {
byte[] bytes2 = new byte[] { 10, 20, 30, 40, 50 };
}
public void testPutIntIntDoubleArray() {
- Mat m = new Mat(5, 5, CvType.CV_8UC3);
- m.put(1, 1, 10, 20, 30, 40, 50, 60);
-
- try {
- m.put(2, 2, 11, 22, 33, 44, 55);
- fail("Expected UnsupportedOperationException (data.length % CvType.channels(t) != 0)");
- } catch (UnsupportedOperationException e) {
- // expected
- }
+ Mat m = new Mat(5, 5, CvType.CV_8UC3, new Scalar(1, 2, 3));
+ Mat sm = m.submat(2, 4, 3, 5);
+ byte[] buff = new byte[] { 0, 0, 0, 0, 0, 0 };
+
+ int bytesNum = m.put(1, 2, 10, 20, 30, 40, 50, 60);
+
+ assertEquals(6, bytesNum);
+ bytesNum = m.get(1, 2, buff);
+ assertEquals(6, bytesNum);
+ assertTrue(Arrays.equals(buff, new byte[]{10, 20, 30, 40, 50, 60}));
+
+ bytesNum = sm.put(0, 0, 255, 254, 253, 252, 251, 250);
+
+ assertEquals(6, bytesNum);
+ bytesNum = sm.get(0, 0, buff);
+ assertEquals(6, bytesNum);
+ assertTrue(Arrays.equals(buff, new byte[]{-1, -2, -3, -4, -5, -6}));
+ bytesNum = m.get(2, 3, buff);
+ assertEquals(6, bytesNum);
+ assertTrue(Arrays.equals(buff, new byte[]{-1, -2, -3, -4, -5, -6}));
}
public void testPutIntIntFloatArray() {
- Mat m = new Mat(5, 5, CvType.CV_32FC3);
+ Mat m = new Mat(5, 5, CvType.CV_32FC3, new Scalar(1, 2, 3));
float[] elements = new float[] { 10, 20, 30, 40, 50, 60 };
- m.put(1, 1, elements);
+
+ int bytesNum = m.put(4, 3, elements);
+
+ assertEquals(elements.length * 4, bytesNum);
+ Mat m1 = m.row(4);
+ float buff[] = new float[3];
+ bytesNum = m1.get(0, 4, buff);
+ assertEquals(buff.length * 4, bytesNum);
+ assertTrue(Arrays.equals(new float[]{40, 50, 60}, buff));
+ assertArrayEquals(new double[]{10, 20, 30}, m.get(4, 3), EPS);
try {
float[] elements2 = new float[] { 10, 20, 30, 40, 50 };
}
public void testPutIntIntIntArray() {
- Mat m = new Mat(5, 5, CvType.CV_32SC3);
+ Mat m = new Mat(5, 5, CvType.CV_32SC3, new Scalar(-1, -2, -3));
int[] elements = new int[] { 10, 20, 30, 40, 50, 60 };
- m.put(1, 1, elements);
+
+ int bytesNum = m.put(0, 4, elements);
+
+ assertEquals(elements.length * 4, bytesNum);
+ Mat m1 = m.col(4);
+ int buff[] = new int[3];
+ bytesNum = m1.get(0, 0, buff);
+ assertEquals(buff.length * 4, bytesNum);
+ assertTrue(Arrays.equals(new int[]{10, 20, 30}, buff));
+ assertArrayEquals(new double[]{40, 50, 60}, m.get(1, 0), EPS);
try {
int[] elements2 = new int[] { 10, 20, 30, 40, 50 };
}
public void testPutIntIntShortArray() {
- Mat m = new Mat(5, 5, CvType.CV_16SC3);
+ Mat m = new Mat(5, 5, CvType.CV_16SC3, new Scalar(-1, -2, -3));
short[] elements = new short[] { 10, 20, 30, 40, 50, 60 };
- m.put(1, 1, elements);
+
+ int bytesNum = m.put(2, 3, elements);
+
+ assertEquals(elements.length * 2, bytesNum);
+ Mat m1 = m.col(3);
+ short buff[] = new short[3];
+ bytesNum = m1.get(2, 0, buff);
+ assertTrue(Arrays.equals(new short[]{10, 20, 30}, buff));
+ assertArrayEquals(new double[]{40, 50, 60}, m.get(2, 4), EPS);
try {
short[] elements2 = new short[] { 10, 20, 30, 40, 50 };
assertMatEqual(gray127, gray0);
}
+ public void testSetToScalarMask() {
+ Mat mask = gray0.clone();
+ mask.put(1, 1, 1, 2, 3);
+ gray0.setTo(new Scalar(1), mask);
+ assertEquals(3, Core.countNonZero(gray0));
+ Core.subtract(gray0, mask, gray0);
+ assertEquals(0, Core.countNonZero(gray0));
+ }
+
public void testSize() {
assertEquals(new Size(matSize, matSize), gray0.size());
matSize = 100;
truth = new DMatch[] {
- /*
- new DMatch(0, 0, 0, 0.643284f),
- new DMatch(1, 1, 0, 0.92945856f),
- new DMatch(2, 1, 0, 0.2841479f),
- new DMatch(3, 1, 0, 0.9194034f),
- new DMatch(4, 1, 0, 0.3006621f)
- */
- new DMatch(0, 0, 0, 1.049694f),
- new DMatch(1, 0, 0, 1.083795f),
- new DMatch(2, 1, 0, 0.484352f),
- new DMatch(3, 0, 0, 1.098605f),
- new DMatch(4, 1, 0, 0.494587f)
+ new DMatch(0, 0, 0, 1.049694f),
+ new DMatch(1, 0, 0, 1.098605f),
+ new DMatch(2, 1, 0, 0.494587f),
+ new DMatch(3, 1, 0, 0.484352f),
+ new DMatch(4, 0, 0, 1.083795f)
};
super.setUp();
Log.d("knnMatch", "train = " + train);
Log.d("knnMatch", "query = " + query);
/*
+ Log.d("knnMatch", "train = " + train);
+ Log.d("knnMatch", "query = " + query);
+
matcher.add(train);
matcher.knnMatch(query, matches, k);
*/
for(int i = 0; i<matches.size(); i++)
{
MatOfDMatch vdm = matches.get(i);
- Log.d("knn", "vdm["+i+"]="+vdm.dump());
+ //Log.d("knn", "vdm["+i+"]="+vdm.dump());
assertTrue(Math.min(k, train.rows()) >= vdm.total());
for(DMatch dm : vdm.toArray())
{
matcher.match(query, train, matches);
+ /*
OpenCVTestRunner.Log("matches found: " + matches.size());
for (DMatch m : matches.toArray())
OpenCVTestRunner.Log(m.toString());
+ */
assertArrayDMatchEquals(truth, matches.toArray(), EPS);
}
matSize = 100;
truth = new DMatch[] {
- /*
- new DMatch(0, 0, 0, 3.175296f),
- new DMatch(1, 1, 0, 3.5954158f),
- new DMatch(2, 1, 0, 1.2537984f),
- new DMatch(3, 1, 0, 3.5761614f),
- new DMatch(4, 1, 0, 1.3250958f)
- */
- new DMatch(0, 1, 0, 6.920234f),
- new DMatch(1, 0, 0, 6.1294847f),
- new DMatch(2, 1, 0, 2.6545324f),
- new DMatch(3, 1, 0, 6.1675916f),
- new DMatch(4, 1, 0, 2.679859f)
+ new DMatch(0, 1, 0, 6.9202342f),
+ new DMatch(1, 1, 0, 6.1675916f),
+ new DMatch(2, 1, 0, 2.6798589f),
+ new DMatch(3, 1, 0, 2.6545324f),
+ new DMatch(4, 0, 0, 6.1294847f)
};
super.setUp();
}
matSize = 100;
truth = new DMatch[] {
- /*
- new DMatch(0, 0, 0, sqr(0.643284f)),
- new DMatch(1, 1, 0, sqr(0.92945856f)),
- new DMatch(2, 1, 0, sqr(0.2841479f)),
- new DMatch(3, 1, 0, sqr(0.9194034f)),
- new DMatch(4, 1, 0, sqr(0.3006621f))
- */
new DMatch(0, 0, 0, 1.1018577f),
- new DMatch(1, 0, 0, 1.1746116f),
- new DMatch(2, 1, 0, 0.23459719f),
- new DMatch(3, 0, 0, 1.2069331f),
- new DMatch(4, 1, 0, 0.2446168f)
+ new DMatch(1, 0, 0, 1.2069331f),
+ new DMatch(2, 1, 0, 0.2446168f),
+ new DMatch(3, 1, 0, 0.2345972f),
+ new DMatch(4, 0, 0, 1.1746116f)
};
super.setUp();
matSize = 100;
truth = new DMatch[] {
- /*
- new DMatch(0, 0, 0, 0.643284f),
- new DMatch(1, 1, 0, 0.92945856f),
- new DMatch(2, 1, 0, 0.2841479f),
- new DMatch(3, 1, 0, 0.9194034f),
- new DMatch(4, 1, 0, 0.3006621f)
- */
new DMatch(0, 0, 0, 1.049694f),
- new DMatch(1, 0, 0, 1.083795f),
- new DMatch(2, 1, 0, 0.484352f),
- new DMatch(3, 0, 0, 1.098605f),
- new DMatch(4, 1, 0, 0.494587f)
+ new DMatch(1, 0, 0, 1.098605f),
+ new DMatch(2, 1, 0, 0.494587f),
+ new DMatch(3, 1, 0, 0.484352f),
+ new DMatch(4, 0, 0, 1.083795f)
};
super.setUp();
DescriptorExtractor extractor;
int matSize;
+ public static void assertDescriptorsClose(Mat expected, Mat actual, int allowedDistance) {
+ double distance = Core.norm(expected, actual, Core.NORM_HAMMING);
+ assertTrue("expected:<" + allowedDistance + "> but was:<" + distance + ">", distance <= allowedDistance);
+ }
+
private Mat getTestImg() {
Mat cross = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Core.line(cross, new Point(20, matSize / 2), new Point(matSize - 21, matSize / 2), new Scalar(100), 2);
6, 74, 6, 129, 2, 130, 56, 0, 36, 132, 66, 165, 172, 6, 3, 72, 102, 61, 163, 214, 0, 144, 65, 232, 4, 32, 138, 129, 4, 21, 37, 88);
}
};
- assertMatEqual(truth, descriptors);
+ assertDescriptorsClose(truth, descriptors, 1);
}
public void testCreate() {
6, 10, 22, 5, 2, 130, 56, 0, 44, 164, 66, 165, 140, 6, 1, 72, 38, 61, 163, 210, 0, 208, 1, 104, 4, 32, 10, 131, 0, 37, 37, 67);
}
};
- assertMatEqual(truth, descriptors);
+ assertDescriptorsClose(truth, descriptors, 1);
}
public void testWrite() {
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
+import org.opencv.core.MatOfInt4;
import org.opencv.core.MatOfPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
public void testCalcBackProject() {
List<Mat> images = Arrays.asList(grayChess);
- MatOfInt channels = new MatOfInt(1, new int[]{0});
- MatOfInt histSize = new MatOfInt(1, new int[]{10});
- MatOfFloat ranges = new MatOfFloat(1, 0f, 256f);
+ MatOfInt channels = new MatOfInt(0);
+ MatOfInt histSize = new MatOfInt(10);
+ MatOfFloat ranges = new MatOfFloat(0f, 256f);
Mat hist = new Mat();
Imgproc.calcHist(images, channels, new Mat(), hist, histSize, ranges);
public void testCalcHistListOfMatListOfIntegerMatMatListOfIntegerListOfFloat() {
List<Mat> images = Arrays.asList(gray128);
- MatOfInt channels = new MatOfInt(1, new int[]{0});
- MatOfInt histSize = new MatOfInt(1, new int[]{10});
- MatOfFloat ranges = new MatOfFloat(1, 0f, 256f);
+ MatOfInt channels = new MatOfInt(0);
+ MatOfInt histSize = new MatOfInt(10);
+ MatOfFloat ranges = new MatOfFloat(0f, 256f);
Mat hist = new Mat();
Imgproc.calcHist(images, channels, new Mat(), hist, histSize, ranges);
assertMatEqual(truth, hist, EPS);
}
- public void testCalcHistListOfMatListOfIntegerMatMatListOfIntegerListOfFloat2d() {
+ public void testCalcHistListOfMatListOfIntegerMatMatListOfIntegerListOfFloat2D() {
List<Mat> images = Arrays.asList(gray255, gray128);
- MatOfInt channels = new MatOfInt(1, 0, 1);
- MatOfInt histSize = new MatOfInt(1, 10, 10);
- MatOfFloat ranges = new MatOfFloat(1, 0f, 256f, 0f, 256f);
+ MatOfInt channels = new MatOfInt(0, 1);
+ MatOfInt histSize = new MatOfInt(10, 10);
+ MatOfFloat ranges = new MatOfFloat(0f, 256f, 0f, 256f);
Mat hist = new Mat();
Imgproc.calcHist(images, channels, new Mat(), hist, histSize, ranges);
assertMatEqual(truth, hist, EPS);
}
+ public void testCalcHistListOfMatListOfIntegerMatMatListOfIntegerListOfFloat3D() {
+ List<Mat> images = Arrays.asList(rgbLena);
+
+ Mat hist3D = new Mat();
+ List<Mat> histList = Arrays.asList( new Mat[] {new Mat(), new Mat(), new Mat()} );
+
+ MatOfInt histSize = new MatOfInt(10);
+ MatOfFloat ranges = new MatOfFloat(0f, 256f);
+
+ for(int i=0; i<rgbLena.channels(); i++)
+ {
+ Imgproc.calcHist(images, new MatOfInt(i), new Mat(), histList.get(i), histSize, ranges);
+
+ assertEquals(10, histList.get(i).checkVector(1));
+ }
+
+ Core.merge(histList, hist3D);
+
+ assertEquals(CvType.CV_32FC3, hist3D.type());
+ assertEquals(10, hist3D.checkVector(3));
+
+ Mat truth = new Mat(10, 1, CvType.CV_32FC3);
+ truth.put(0, 0,
+ 0, 24870, 0,
+ 1863, 31926, 1,
+ 56682, 37677, 2260,
+ 77278, 44751, 32436,
+ 69397, 41343, 18526,
+ 27180, 40407, 18658,
+ 21101, 15993, 32042,
+ 8343, 18585, 47786,
+ 300, 6567, 80988,
+ 0, 25, 29447
+ );
+
+ assertMatEqual(truth, hist3D, EPS);
+ }
+
public void testCalcHistListOfMatListOfIntegerMatMatListOfIntegerListOfFloatBoolean() {
List<Mat> images = Arrays.asList(gray255, gray128);
- MatOfInt channels = new MatOfInt(1, 0, 1);
- MatOfInt histSize = new MatOfInt(1, 10, 10);
- MatOfFloat ranges = new MatOfFloat(1, 0f, 256f, 0f, 256f);
+ MatOfInt channels = new MatOfInt(0, 1);
+ MatOfInt histSize = new MatOfInt(10, 10);
+ MatOfFloat ranges = new MatOfFloat(0f, 256f, 0f, 256f);
Mat hist = new Mat();
Imgproc.calcHist(images, channels, new Mat(), hist, histSize, ranges, true);
}
public void testConvexHullMatMat() {
- Mat points = new Mat(1, 6, CvType.CV_32FC2);
- points.put(0, 0, 2, 0, 4, 0, 3, 2, 0, 2, 2, 1, 3, 1);
+ MatOfPoint points = new MatOfPoint(
+ new Point(20, 0),
+ new Point(40, 0),
+ new Point(30, 20),
+ new Point(0, 20),
+ new Point(20, 10),
+ new Point(30, 10)
+ );
+
+ MatOfInt hull = new MatOfInt();
- Imgproc.convexHull(points, dst);
+ Imgproc.convexHull(points, hull);
- Mat expHull = new Mat(4, 1, CvType.CV_32FC2);
- expHull.put(0, 0, 4, 0, 3, 2, 0, 2, 2, 0);
- assertMatEqual(expHull, dst, EPS);
+ MatOfInt expHull = new MatOfInt(
+ 1, 2, 3, 0
+ );
+ assertMatEqual(expHull, hull, EPS);
}
public void testConvexHullMatMatBooleanBoolean() {
- Mat points = new Mat(1, 6, CvType.CV_32FC2);
- points.put(0, 0, 2, 0, 4, 0, 3, 2, 0, 2, 2, 1, 3, 1);
+ MatOfPoint points = new MatOfPoint(
+ new Point(2, 0),
+ new Point(4, 0),
+ new Point(3, 2),
+ new Point(0, 2),
+ new Point(2, 1),
+ new Point(3, 1)
+ );
+
+ MatOfInt hull = new MatOfInt();
- Imgproc.convexHull(points, dst, true, true);
- // TODO_: write better test (last param == false)
+ Imgproc.convexHull(points, hull, true);
- Mat expHull = new Mat(4, 1, CvType.CV_32FC2);
- expHull.put(0, 0, 0, 2, 3, 2, 4, 0, 2, 0);
- assertMatEqual(expHull, dst, EPS);
+ MatOfInt expHull = new MatOfInt(
+ 3, 2, 1, 0
+ );
+ assertMatEqual(expHull, hull, EPS);
+ }
+
+ public void testConvexityDefects() {
+ MatOfPoint points = new MatOfPoint(
+ new Point(20, 0),
+ new Point(40, 0),
+ new Point(30, 20),
+ new Point(0, 20),
+ new Point(20, 10),
+ new Point(30, 10)
+ );
+
+ MatOfInt hull = new MatOfInt();
+ Imgproc.convexHull(points, hull);
+
+ MatOfInt4 convexityDefects = new MatOfInt4();
+ Imgproc.convexityDefects(points, hull, convexityDefects);
+
+ assertMatEqual(new MatOfInt4(3, 0, 5, 3620), convexityDefects);
}
public void testCopyMakeBorderMatMatIntIntIntIntInt() {
}
public void testIsContourConvex() {
- MatOfPoint2f contour1 = new MatOfPoint2f(new Point(0, 0), new Point(10, 0), new Point(10, 10), new Point(5, 4));
+ MatOfPoint contour1 = new MatOfPoint(new Point(0, 0), new Point(10, 0), new Point(10, 10), new Point(5, 4));
assertFalse(Imgproc.isContourConvex(contour1));
- MatOfPoint2f contour2 = new MatOfPoint2f(new Point(0, 0), new Point(10, 0), new Point(10, 10), new Point(5, 6));
+ MatOfPoint contour2 = new MatOfPoint(new Point(0, 0), new Point(10, 0), new Point(10, 10), new Point(5, 6));
assertTrue(Imgproc.isContourConvex(contour2));
}
package org.opencv.test.imgproc;
-import org.opencv.core.MatOfFloat;
+import org.opencv.core.MatOfFloat6;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.imgproc.Subdiv2D;
s2d.insert( new Point(20, 10) );
s2d.insert( new Point(20, 20) );
s2d.insert( new Point(10, 20) );
- MatOfFloat triangles = new MatOfFloat();
+ MatOfFloat6 triangles = new MatOfFloat6();
s2d.getTriangleList(triangles);
assertEquals(10, triangles.rows());
/*
include/opencv2/core/core.hpp
+../java/src/cpp/core_manual.hpp
-import sys, re, os.path\r
-from string import Template\r
-\r
-try:\r
- from cStringIO import StringIO\r
-except:\r
- from StringIO import StringIO\r
-\r
-class_ignore_list = (\r
- #core\r
- "FileNode", "FileStorage", "KDTree",\r
- #highgui\r
- "VideoWriter", "VideoCapture",\r
- #features2d\r
- #"KeyPoint", "MSER", "StarDetector", "SURF", "DMatch",\r
- #ml\r
- "EM",\r
-)\r
-\r
-const_ignore_list = (\r
- "CV_CAP_OPENNI",\r
- "CV_CAP_PROP_OPENNI_",\r
- "WINDOW_AUTOSIZE",\r
- "CV_WND_PROP_",\r
- "CV_WINDOW_",\r
- "CV_EVENT_",\r
- "CV_GUI_",\r
- "CV_PUSH_BUTTON",\r
- "CV_CHECKBOX",\r
- "CV_RADIOBOX",\r
-\r
- #attention!\r
- #the following constants are added to this list using code automatic generation\r
- #TODO: should be checked\r
- "CV_CAP_ANY",\r
- "CV_CAP_MIL",\r
- "CV_CAP_VFW",\r
- "CV_CAP_V4L",\r
- "CV_CAP_V4L2",\r
- "CV_CAP_FIREWARE",\r
- "CV_CAP_FIREWIRE",\r
- "CV_CAP_IEEE1394",\r
- "CV_CAP_DC1394",\r
- "CV_CAP_CMU1394",\r
- "CV_CAP_STEREO",\r
- "CV_CAP_TYZX",\r
- "CV_TYZX_LEFT",\r
- "CV_TYZX_RIGHT",\r
- "CV_TYZX_COLOR",\r
- "CV_TYZX_Z",\r
- "CV_CAP_QT",\r
- "CV_CAP_UNICAP",\r
- "CV_CAP_DSHOW",\r
- "CV_CAP_PVAPI",\r
- "CV_CAP_PROP_DC1394_OFF",\r
- "CV_CAP_PROP_DC1394_MODE_MANUAL",\r
- "CV_CAP_PROP_DC1394_MODE_AUTO",\r
- "CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO",\r
- "CV_CAP_PROP_POS_MSEC",\r
- "CV_CAP_PROP_POS_FRAMES",\r
- "CV_CAP_PROP_POS_AVI_RATIO",\r
- "CV_CAP_PROP_FPS",\r
- "CV_CAP_PROP_FOURCC",\r
- "CV_CAP_PROP_FRAME_COUNT",\r
- "CV_CAP_PROP_FORMAT",\r
- "CV_CAP_PROP_MODE",\r
- "CV_CAP_PROP_BRIGHTNESS",\r
- "CV_CAP_PROP_CONTRAST",\r
- "CV_CAP_PROP_SATURATION",\r
- "CV_CAP_PROP_HUE",\r
- "CV_CAP_PROP_GAIN",\r
- "CV_CAP_PROP_EXPOSURE",\r
- "CV_CAP_PROP_CONVERT_RGB",\r
- "CV_CAP_PROP_WHITE_BALANCE_BLUE_U",\r
- "CV_CAP_PROP_RECTIFICATION",\r
- "CV_CAP_PROP_MONOCROME",\r
- "CV_CAP_PROP_SHARPNESS",\r
- "CV_CAP_PROP_AUTO_EXPOSURE",\r
- "CV_CAP_PROP_GAMMA",\r
- "CV_CAP_PROP_TEMPERATURE",\r
- "CV_CAP_PROP_TRIGGER",\r
- "CV_CAP_PROP_TRIGGER_DELAY",\r
- "CV_CAP_PROP_WHITE_BALANCE_RED_V",\r
- "CV_CAP_PROP_MAX_DC1394",\r
- "CV_CAP_GSTREAMER_QUEUE_LENGTH",\r
- "CV_CAP_PROP_PVAPI_MULTICASTIP",\r
- "CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING",\r
- "EVENT_.*",\r
- "CV_L?(BGRA?|RGBA?|GRAY|XYZ|YCrCb|Luv|Lab|HLS|YUV|HSV)\d*2L?(BGRA?|RGBA?|GRAY|XYZ|YCrCb|Luv|Lab|HLS|YUV|HSV).*",\r
- "CV_COLORCVT_MAX",\r
- "CV_.*Bayer.*",\r
- "CV_YUV420(i|sp|p)2.+",\r
- "CV_TM_.+",\r
- "CV_FLOODFILL_.+",\r
- "CV_ADAPTIVE_THRESH_.+",\r
- "WINDOW_.+",\r
- "WND_PROP_.+",\r
-)\r
-\r
-const_private_list = (\r
- "CV_MOP_.+",\r
- "CV_INTER_.+",\r
- "CV_THRESH_.+",\r
- "CV_INPAINT_.+",\r
- "CV_RETR_.+",\r
- "CV_CHAIN_APPROX_.+",\r
- "OPPONENTEXTRACTOR",\r
- "GRIDRETECTOR",\r
- "PYRAMIDDETECTOR",\r
- "DYNAMICDETECTOR",\r
-)\r
-\r
-# { Module : { public : [[name, val],...], private : [[]...] } }\r
-missing_consts = \\r
-{\r
- 'Core' :\r
- {\r
- 'private' :\r
- (\r
- ('CV_8U', 0 ), ('CV_8S', 1 ),\r
- ('CV_16U', 2 ), ('CV_16S', 3 ),\r
- ('CV_32S', 4 ),\r
- ('CV_32F', 5 ), ('CV_64F', 6 ),\r
- ('CV_USRTYPE1', 7 ),\r
- ), # private\r
- 'public' :\r
- (\r
- ('SVD_MODIFY_A', 1), ('SVD_NO_UV', 2), ('SVD_FULL_UV', 4),\r
- ('FILLED', -1),\r
- ('LINE_AA', 16), ('LINE_8', 8), ('LINE_4', 4),\r
- ('REDUCE_SUM', 0), ('REDUCE_AVG', 1), ('REDUCE_MAX', 2), ('REDUCE_MIN', 3),\r
- ) #public\r
- }, # Core\r
-\r
- "Imgproc":\r
- {\r
- 'private' :\r
- (\r
- ('IPL_BORDER_CONSTANT', 0 ),\r
- ('IPL_BORDER_REPLICATE', 1 ),\r
- ('IPL_BORDER_REFLECT', 2 ),\r
- ('IPL_BORDER_WRAP', 3 ),\r
- ('IPL_BORDER_REFLECT_101', 4 ),\r
- ('IPL_BORDER_TRANSPARENT', 5 ),\r
- ) # private\r
- }, # Imgproc\r
-\r
- "Calib3d":\r
- {\r
- 'private' :\r
- (\r
- ('CV_LMEDS', 4),\r
- ('CV_RANSAC', 8),\r
- ('CV_FM_LMEDS', 'CV_LMEDS'),\r
- ('CV_FM_RANSAC','CV_RANSAC'),\r
- ('CV_FM_7POINT', 1),\r
- ('CV_FM_8POINT', 2),\r
- ('CV_CALIB_USE_INTRINSIC_GUESS', 1),\r
- ('CV_CALIB_FIX_ASPECT_RATIO', 2),\r
- ('CV_CALIB_FIX_PRINCIPAL_POINT', 4),\r
- ('CV_CALIB_ZERO_TANGENT_DIST', 8),\r
- ('CV_CALIB_FIX_FOCAL_LENGTH', 16),\r
- ('CV_CALIB_FIX_K1', 32),\r
- ('CV_CALIB_FIX_K2', 64),\r
- ('CV_CALIB_FIX_K3', 128),\r
- ('CV_CALIB_FIX_K4', 2048),\r
- ('CV_CALIB_FIX_K5', 4096),\r
- ('CV_CALIB_FIX_K6', 8192),\r
- ('CV_CALIB_RATIONAL_MODEL', 16384),\r
- ('CV_CALIB_FIX_INTRINSIC', 256),\r
- ('CV_CALIB_SAME_FOCAL_LENGTH', 512),\r
- ('CV_CALIB_ZERO_DISPARITY', 1024),\r
- ) # public\r
- }, # Calib3d\r
-\r
- "Video":\r
- {\r
- 'private' :\r
- (\r
- ('CV_LKFLOW_INITIAL_GUESSES', 4 ),\r
- ('CV_LKFLOW_GET_MIN_EIGENVALS', 8 ),\r
- ) # private\r
- }, # Video\r
-\r
-}\r
-\r
-\r
-# c_type : { java/jni correspondence }\r
-type_dict = {\r
-# "simple" : { j_type : "?", jn_type : "?", jni_type : "?", suffix : "?" },\r
- "" : { "j_type" : "", "jn_type" : "long", "jni_type" : "jlong" }, # c-tor ret_type\r
- "void" : { "j_type" : "void", "jn_type" : "void", "jni_type" : "void" },\r
- "env" : { "j_type" : "", "jn_type" : "", "jni_type" : "JNIEnv*"},\r
- "cls" : { "j_type" : "", "jn_type" : "", "jni_type" : "jclass"},\r
- "bool" : { "j_type" : "boolean", "jn_type" : "boolean", "jni_type" : "jboolean", "suffix" : "Z" },\r
- "int" : { "j_type" : "int", "jn_type" : "int", "jni_type" : "jint", "suffix" : "I" },\r
- "long" : { "j_type" : "int", "jn_type" : "int", "jni_type" : "jint", "suffix" : "I" },\r
- "float" : { "j_type" : "float", "jn_type" : "float", "jni_type" : "jfloat", "suffix" : "F" },\r
- "double" : { "j_type" : "double", "jn_type" : "double", "jni_type" : "jdouble", "suffix" : "D" },\r
- "size_t" : { "j_type" : "long", "jn_type" : "long", "jni_type" : "jlong", "suffix" : "J" },\r
- "__int64" : { "j_type" : "long", "jn_type" : "long", "jni_type" : "jlong", "suffix" : "J" },\r
- "int64" : { "j_type" : "long", "jn_type" : "long", "jni_type" : "jlong", "suffix" : "J" },\r
- "double[]": { "j_type" : "double[]", "jn_type" : "double[]", "jni_type" : "jdoubleArray", "suffix" : "_3D" },\r
-\r
-# "complex" : { j_type : "?", jn_args : (("", ""),), jn_name : "", jni_var : "", jni_name : "", "suffix" : "?" },\r
-\r
- "vector_Point" : { "j_type" : "MatOfPoint", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point> %(n)s", "suffix" : "J" },\r
- "vector_Point2f" : { "j_type" : "MatOfPoint2f", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point2f> %(n)s", "suffix" : "J" },\r
- #"vector_Point2d" : { "j_type" : "MatOfPoint2d", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point2d> %(n)s", "suffix" : "J" },\r
- "vector_Point3i" : { "j_type" : "MatOfPoint3", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point3i> %(n)s", "suffix" : "J" },\r
- "vector_Point3f" : { "j_type" : "MatOfPoint3f", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point3f> %(n)s", "suffix" : "J" },\r
- #"vector_Point3d" : { "j_type" : "MatOfPoint3d", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point3d> %(n)s", "suffix" : "J" },\r
- "vector_KeyPoint" : { "j_type" : "MatOfKeyPoint", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<KeyPoint> %(n)s", "suffix" : "J" },\r
- "vector_DMatch" : { "j_type" : "MatOfDMatch", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<DMatch> %(n)s", "suffix" : "J" },\r
- "vector_Rect" : { "j_type" : "MatOfRect", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Rect> %(n)s", "suffix" : "J" },\r
- "vector_uchar" : { "j_type" : "MatOfByte", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<uchar> %(n)s", "suffix" : "J" },\r
- "vector_char" : { "j_type" : "MatOfByte", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<char> %(n)s", "suffix" : "J" },\r
- "vector_int" : { "j_type" : "MatOfInt", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<int> %(n)s", "suffix" : "J" },\r
- "vector_float" : { "j_type" : "MatOfFloat", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<float> %(n)s", "suffix" : "J" },\r
- "vector_double" : { "j_type" : "MatOfDouble", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<double> %(n)s", "suffix" : "J" },\r
- "vector_Vec4f" : { "j_type" : "MatOfFloat", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Vec4f> %(n)s", "suffix" : "J" },\r
- "vector_Vec6f" : { "j_type" : "MatOfFloat", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Vec6f> %(n)s", "suffix" : "J" },\r
-\r
- "vector_Mat" : { "j_type" : "List<Mat>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Mat> %(n)s", "suffix" : "J" },\r
-\r
- "vector_vector_KeyPoint": { "j_type" : "List<MatOfKeyPoint>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<KeyPoint> > %(n)s" },\r
- "vector_vector_DMatch" : { "j_type" : "List<MatOfDMatch>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<DMatch> > %(n)s" },\r
- "vector_vector_char" : { "j_type" : "List<MatOfByte>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<char> > %(n)s" },\r
- "vector_vector_Point" : { "j_type" : "List<MatOfPoint>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<Point> > %(n)s" },\r
- "vector_vector_Point2f" : { "j_type" : "List<MatOfPoint2f>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<Point2f> > %(n)s" },\r
-\r
- "Mat" : { "j_type" : "Mat", "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),\r
- "jni_var" : "Mat& %(n)s = *((Mat*)%(n)s_nativeObj)",\r
- "jni_type" : "jlong", #"jni_name" : "*%(n)s",\r
- "suffix" : "J" },\r
-\r
- "Point" : { "j_type" : "Point", "jn_args" : (("double", ".x"), ("double", ".y")),\r
- "jni_var" : "Point %(n)s((int)%(n)s_x, (int)%(n)s_y)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DD"},\r
- "Point2f" : { "j_type" : "Point", "jn_args" : (("double", ".x"), ("double", ".y")),\r
- "jni_var" : "Point2f %(n)s((float)%(n)s_x, (float)%(n)s_y)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DD"},\r
- "Point2d" : { "j_type" : "Point", "jn_args" : (("double", ".x"), ("double", ".y")),\r
- "jni_var" : "Point2d %(n)s(%(n)s_x, %(n)s_y)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DD"},\r
- "Point3i" : { "j_type" : "Point3", "jn_args" : (("double", ".x"), ("double", ".y"), ("double", ".z")),\r
- "jni_var" : "Point3i %(n)s((int)%(n)s_x, (int)%(n)s_y, (int)%(n)s_z)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DDD"},\r
- "Point3f" : { "j_type" : "Point3", "jn_args" : (("double", ".x"), ("double", ".y"), ("double", ".z")),\r
- "jni_var" : "Point3f %(n)s((float)%(n)s_x, (float)%(n)s_y, (float)%(n)s_z)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DDD"},\r
- "Point3d" : { "j_type" : "Point3", "jn_args" : (("double", ".x"), ("double", ".y"), ("double", ".z")),\r
- "jni_var" : "Point3d %(n)s(%(n)s_x, %(n)s_y, %(n)s_z)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DDD"},\r
- "KeyPoint": { "j_type" : "KeyPoint", "jn_args" : (("float", ".x"), ("float", ".y"), ("float", ".size"),\r
- ("float", ".angle"), ("float", ".response"), ("int", ".octave"), ("int", ".class_id")),\r
- "jni_var" : "KeyPoint %(n)s(%(n)s_x, %(n)s_y, %(n)s_size, %(n)s_angle, %(n)s_response, %(n)s_octave, %(n)s_class_id)",\r
- "jni_type" : "jdoubleArray",\r
- "suffix" : "FFFFFII"},\r
- "DMatch" : { "j_type" : "DMatch", "jn_args" : ( ('int', 'queryIdx'), ('int', 'trainIdx'),\r
- ('int', 'imgIdx'), ('float', 'distance'), ),\r
- "jni_var" : "DMatch %(n)s(%(n)s_queryIdx, %(n)s_trainIdx, %(n)s_imgIdx, %(n)s_distance)",\r
- "jni_type" : "jdoubleArray",\r
- "suffix" : "IIIF"},\r
- "Rect" : { "j_type" : "Rect", "jn_args" : (("int", ".x"), ("int", ".y"), ("int", ".width"), ("int", ".height")),\r
- "jni_var" : "Rect %(n)s(%(n)s_x, %(n)s_y, %(n)s_width, %(n)s_height)", "jni_type" : "jdoubleArray",\r
- "suffix" : "IIII"},\r
- "Size" : { "j_type" : "Size", "jn_args" : (("double", ".width"), ("double", ".height")),\r
- "jni_var" : "Size %(n)s((int)%(n)s_width, (int)%(n)s_height)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DD"},\r
- "Size2f" : { "j_type" : "Size", "jn_args" : (("double", ".width"), ("double", ".height")),\r
- "jni_var" : "Size2f %(n)s((float)%(n)s_width, (float)%(n)s_height)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DD"},\r
- "RotatedRect": { "j_type" : "RotatedRect", "jn_args" : (("double", ".center.x"), ("double", ".center.y"), ("double", ".size.width"), ("double", ".size.height"), ("double", ".angle")),\r
- "jni_var" : "RotatedRect %(n)s(cv::Point2f(%(n)s_center_x, %(n)s_center_y), cv::Size2f(%(n)s_size_width, %(n)s_size_height), %(n)s_angle)",\r
- "jni_type" : "jdoubleArray", "suffix" : "DDDDD"},\r
- "Scalar" : { "j_type" : "Scalar", "jn_args" : (("double", ".val[0]"), ("double", ".val[1]"), ("double", ".val[2]"), ("double", ".val[3]")),\r
- "jni_var" : "Scalar %(n)s(%(n)s_val0, %(n)s_val1, %(n)s_val2, %(n)s_val3)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DDDD"},\r
- "Range" : { "j_type" : "Range", "jn_args" : (("int", ".start"), ("int", ".end")),\r
- "jni_var" : "Range %(n)s(%(n)s_start, %(n)s_end)", "jni_type" : "jdoubleArray",\r
- "suffix" : "II"},\r
- "CvSlice" : { "j_type" : "Range", "jn_args" : (("int", ".start"), ("int", ".end")),\r
- "jni_var" : "Range %(n)s(%(n)s_start, %(n)s_end)", "jni_type" : "jdoubleArray",\r
- "suffix" : "II"},\r
- "string" : { "j_type" : "String", "jn_type" : "String",\r
- "jni_type" : "jstring", "jni_name" : "n_%(n)s",\r
- "jni_var" : 'const char* utf_%(n)s = env->GetStringUTFChars(%(n)s, 0); std::string n_%(n)s( utf_%(n)s ? utf_%(n)s : "" ); env->ReleaseStringUTFChars(%(n)s, utf_%(n)s)',\r
- "suffix" : "Ljava_lang_String_2"},\r
- "String" : { "j_type" : "String", "jn_type" : "String",\r
- "jni_type" : "jstring", "jni_name" : "n_%(n)s",\r
- "jni_var" : 'const char* utf_%(n)s = env->GetStringUTFChars(%(n)s, 0); String n_%(n)s( utf_%(n)s ? utf_%(n)s : "" ); env->ReleaseStringUTFChars(%(n)s, utf_%(n)s)',\r
- "suffix" : "Ljava_lang_String_2"},\r
- "c_string": { "j_type" : "String", "jn_type" : "String",\r
- "jni_type" : "jstring", "jni_name" : "n_%(n)s.c_str()",\r
- "jni_var" : 'const char* utf_%(n)s = env->GetStringUTFChars(%(n)s, 0); std::string n_%(n)s( utf_%(n)s ? utf_%(n)s : "" ); env->ReleaseStringUTFChars(%(n)s, utf_%(n)s)',\r
- "suffix" : "Ljava_lang_String_2"},\r
-"TermCriteria": { "j_type" : "TermCriteria", "jn_args" : (("int", ".type"), ("int", ".maxCount"), ("double", ".epsilon")),\r
- "jni_var" : "TermCriteria %(n)s(%(n)s_type, %(n)s_maxCount, %(n)s_epsilon)",\r
- "suffix" : "IID"},\r
- "Vec3d" : { "j_type" : "double[]", "jn_args" : (("double", ".val[0]"), ("double", ".val[1]"), ("double", ".val[2]")),\r
- "jn_type" : "double[]",\r
- "jni_var" : "Vec3d %(n)s(%(n)s_val0, %(n)s_val1, %(n)s_val2)", "jni_type" : "jdoubleArray",\r
- "suffix" : "DDD"},\r
-\r
-}\r
-\r
-# { class : { func : {j_code, jn_code, cpp_code} } }\r
-ManualFuncs = {\r
- 'Core' :\r
- {\r
- 'minMaxLoc' : {\r
- 'j_code' : """\r
- // manual port\r
- public static class MinMaxLocResult {\r
- public double minVal;\r
- public double maxVal;\r
- public Point minLoc;\r
- public Point maxLoc;\r
-\r
- public MinMaxLocResult() {\r
- minVal=0; maxVal=0;\r
- minLoc=new Point();\r
- maxLoc=new Point();\r
- }\r
- }\r
-\r
- // C++: minMaxLoc(Mat src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, InputArray mask=noArray())\r
-\r
- //javadoc: minMaxLoc(src, mask)\r
- public static MinMaxLocResult minMaxLoc(Mat src, Mat mask) {\r
- MinMaxLocResult res = new MinMaxLocResult();\r
- long maskNativeObj=0;\r
- if (mask != null) {\r
- maskNativeObj=mask.nativeObj;\r
- }\r
- double resarr[] = n_minMaxLocManual(src.nativeObj, maskNativeObj);\r
- res.minVal=resarr[0];\r
- res.maxVal=resarr[1];\r
- res.minLoc.x=resarr[2];\r
- res.minLoc.y=resarr[3];\r
- res.maxLoc.x=resarr[4];\r
- res.maxLoc.y=resarr[5];\r
- return res;\r
- }\r
-\r
- //javadoc: minMaxLoc(src)\r
- public static MinMaxLocResult minMaxLoc(Mat src) {\r
- return minMaxLoc(src, null);\r
- }\r
-\r
-""",\r
- 'jn_code' :\r
-""" private static native double[] n_minMaxLocManual(long src_nativeObj, long mask_nativeObj);\n""",\r
- 'cpp_code' :\r
-"""\r
-// C++: minMaxLoc(Mat src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, InputArray mask=noArray())\r
-\r
-JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Core_n_1minMaxLocManual\r
- (JNIEnv* env, jclass cls, jlong src_nativeObj, jlong mask_nativeObj)\r
-{\r
- try {\r
- LOGD("Core::n_1minMaxLoc()");\r
- jdoubleArray result;\r
- result = env->NewDoubleArray(6);\r
- if (result == NULL) {\r
- return NULL; /* out of memory error thrown */\r
- }\r
-\r
- Mat& src = *((Mat*)src_nativeObj);\r
-\r
- double minVal, maxVal;\r
- Point minLoc, maxLoc;\r
- if (mask_nativeObj != 0) {\r
- Mat& mask = *((Mat*)mask_nativeObj);\r
- minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc, mask);\r
- } else {\r
- minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc);\r
- }\r
-\r
- jdouble fill[6];\r
- fill[0]=minVal;\r
- fill[1]=maxVal;\r
- fill[2]=minLoc.x;\r
- fill[3]=minLoc.y;\r
- fill[4]=maxLoc.x;\r
- fill[5]=maxLoc.y;\r
-\r
- env->SetDoubleArrayRegion(result, 0, 6, fill);\r
-\r
- return result;\r
-\r
- } catch(cv::Exception e) {\r
- LOGD("Core::n_1minMaxLoc() catched cv::Exception: %s", e.what());\r
- jclass je = env->FindClass("org/opencv/core/CvException");\r
- if(!je) je = env->FindClass("java/lang/Exception");\r
- env->ThrowNew(je, e.what());\r
- return NULL;\r
- } catch (...) {\r
- LOGD("Core::n_1minMaxLoc() catched unknown exception (...)");\r
- jclass je = env->FindClass("java/lang/Exception");\r
- env->ThrowNew(je, "Unknown exception in JNI code {core::minMaxLoc()}");\r
- return NULL;\r
- }\r
-}\r
-\r
-""",\r
- }, # minMaxLoc\r
-\r
- 'getTextSize' :\r
- {\r
- 'j_code' :\r
-"""\r
- // C++: Size getTextSize(const string& text, int fontFace, double fontScale, int thickness, int* baseLine);\r
- //javadoc:getTextSize(text, fontFace, fontScale, thickness, baseLine)\r
- public static Size getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine) {\r
- if(baseLine != null && baseLine.length != 1)\r
- throw new java.lang.IllegalArgumentException("'baseLine' must be 'int[1]' or 'null'.");\r
- Size retVal = new Size(n_getTextSize(text, fontFace, fontScale, thickness, baseLine));\r
- return retVal;\r
- }\r
-""",\r
- 'jn_code' :\r
-""" private static native double[] n_getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine);\n""",\r
- 'cpp_code' :\r
-"""\r
-// C++: Size getTextSize(const string& text, int fontFace, double fontScale, int thickness, int* baseLine);\r
-\r
-JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Core_n_1getTextSize\r
- (JNIEnv* env, jclass cls, jstring text, jint fontFace, jdouble fontScale, jint thickness, jintArray baseLine)\r
-{\r
- try {\r
- LOGD("Core::n_1getTextSize()");\r
- jdoubleArray result;\r
- result = env->NewDoubleArray(2);\r
- if (result == NULL) {\r
- return NULL; /* out of memory error thrown */\r
- }\r
-\r
- const char* utf_text = env->GetStringUTFChars(text, 0);\r
- std::string n_text( utf_text ? utf_text : "" );\r
- env->ReleaseStringUTFChars(text, utf_text);\r
-\r
- int _baseLine;\r
- int* pbaseLine = 0;\r
-\r
- if (baseLine != NULL)\r
- pbaseLine = &_baseLine;\r
-\r
- cv::Size rsize = cv::getTextSize(n_text, (int)fontFace, (double)fontScale, (int)thickness, pbaseLine);\r
-\r
- jdouble fill[2];\r
- fill[0]=rsize.width;\r
- fill[1]=rsize.height;\r
-\r
- env->SetDoubleArrayRegion(result, 0, 2, fill);\r
-\r
- if (baseLine != NULL)\r
- env->SetIntArrayRegion(baseLine, 0, 1, pbaseLine);\r
-\r
- return result;\r
-\r
- } catch(cv::Exception e) {\r
- LOGD("Core::n_1getTextSize() catched cv::Exception: %s", e.what());\r
- jclass je = env->FindClass("org/opencv/core/CvException");\r
- if(!je) je = env->FindClass("java/lang/Exception");\r
- env->ThrowNew(je, e.what());\r
- return NULL;\r
- } catch (...) {\r
- LOGD("Core::n_1getTextSize() catched unknown exception (...)");\r
- jclass je = env->FindClass("java/lang/Exception");\r
- env->ThrowNew(je, "Unknown exception in JNI code {core::getTextSize()}");\r
- return NULL;\r
- }\r
-}\r
-\r
-""",\r
- }, # getTextSize\r
-## "checkRange" : #TBD\r
-## {'j_code' : '/* TBD: checkRange() */', 'jn_code' : '', 'cpp_code' : '' },\r
-\r
- "checkHardwareSupport" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "setUseOptimized" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "useOptimized" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
-\r
- }, # Core\r
-\r
- 'Highgui' :\r
- {\r
- "namedWindow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "destroyWindow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "destroyAllWindows" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "startWindowThread" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "setWindowProperty" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "getWindowProperty" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "getTrackbarPos" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "setTrackbarPos" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "imshow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "waitKey" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "moveWindow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- "resizeWindow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },\r
- }, # Highgui\r
-\r
-}\r
-\r
-# { class : { func : {arg_name : ctype} } }\r
-func_arg_fix = {\r
- '' : {\r
- 'randu' : { 'low' : 'double', 'high' : 'double', },\r
- 'randn' : { 'mean' : 'double', 'stddev' : 'double', },\r
- 'inRange' : { 'lowerb' : 'Scalar', 'upperb' : 'Scalar', },\r
- 'goodFeaturesToTrack' : { 'corners' : 'vector_Point', },\r
- 'findFundamentalMat' : { 'points1' : 'vector_Point2f', 'points2' : 'vector_Point2f', },\r
- 'cornerSubPix' : { 'corners' : 'vector_Point2f', },\r
- 'minEnclosingCircle' : { 'points' : 'vector_Point2f', },\r
- 'findHomography' : { 'srcPoints' : 'vector_Point2f', 'dstPoints' : 'vector_Point2f', },\r
- 'solvePnP' : { 'objectPoints' : 'vector_Point3f', 'imagePoints' : 'vector_Point2f', },\r
- 'solvePnPRansac' : { 'objectPoints' : 'vector_Point3f', 'imagePoints' : 'vector_Point2f', },\r
- 'calcOpticalFlowPyrLK' : { 'prevPts' : 'vector_Point2f', 'nextPts' : 'vector_Point2f',\r
- 'status' : 'vector_uchar', 'err' : 'vector_float', },\r
- 'fitEllipse' : { 'points' : 'vector_Point2f', },\r
- 'fillPoly' : { 'pts' : 'vector_vector_Point', },\r
- 'polylines' : { 'pts' : 'vector_vector_Point', },\r
- 'fillConvexPoly' : { 'points' : 'vector_Point', },\r
- 'boundingRect' : { 'points' : 'vector_Point', },\r
- 'approxPolyDP' : { 'curve' : 'vector_Point2f', 'approxCurve' : 'vector_Point2f', },\r
- 'arcLength' : { 'curve' : 'vector_Point2f', },\r
- 'pointPolygonTest' : { 'contour' : 'vector_Point2f', },\r
- 'minAreaRect' : { 'points' : 'vector_Point2f', },\r
- 'getAffineTransform' : { 'src' : 'vector_Point2f', 'dst' : 'vector_Point2f', },\r
- 'hconcat' : { 'src' : 'vector_Mat', },\r
- 'vconcat' : { 'src' : 'vector_Mat', },\r
- 'undistortPoints' : { 'src' : 'vector_Point2f', 'dst' : 'vector_Point2f' },\r
- 'checkRange' : {'pos' : '*'},\r
- 'meanStdDev' : {'mean' : 'vector_double', 'stddev' : 'vector_double'},\r
- 'drawContours' : {'contours' : 'vector_vector_Point'},\r
- 'findContours' : {'contours' : 'vector_vector_Point'},\r
- 'convexityDefects' : {'contour' : 'vector_Point'},\r
- 'isContourConvex' : { 'contour' : 'vector_Point2f', },\r
- }, # '', i.e. no class\r
-} # func_arg_fix\r
-\r
-class ConstInfo(object):\r
- def __init__(self, cname, name, val, addedManually=False):\r
- self.cname = cname\r
- self.name = re.sub(r"^Cv", "", name)\r
- self.value = val\r
- self.addedManually = addedManually\r
-\r
-\r
-class ClassPropInfo(object):\r
- def __init__(self, decl): # [f_ctype, f_name, '', '/RW']\r
- self.ctype = decl[0]\r
- self.name = decl[1]\r
- self.rw = "/RW" in decl[3]\r
-\r
-class ClassInfo(object):\r
- def __init__(self, decl): # [ 'class/struct cname', ': base', [modlist] ]\r
- name = decl[0]\r
- name = name[name.find(" ")+1:].strip()\r
- self.cname = self.name = self.jname = re.sub(r"^cv\.", "", name)\r
- self.cname = self.cname.replace(".", "::")\r
- self.methods = {}\r
- self.methods_suffixes = {}\r
- self.consts = [] # using a list to save the occurence order\r
- self.private_consts = []\r
- self.imports = set()\r
- self.props= []\r
- self.jname = self.name\r
- for m in decl[2]:\r
- if m.startswith("="):\r
- self.jname = m[1:]\r
- self.base = ''\r
- if decl[1]:\r
- self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()\r
-\r
-\r
-\r
-class ArgInfo(object):\r
- def __init__(self, arg_tuple): # [ ctype, name, def val, [mod], argno ]\r
- self.pointer = False\r
- ctype = arg_tuple[0]\r
- if ctype.endswith("*"):\r
- ctype = ctype[:-1]\r
- self.pointer = True\r
- if ctype == 'vector_Point2d':\r
- ctype = 'vector_Point2f'\r
- elif ctype == 'vector_Point3d':\r
- ctype = 'vector_Point3f'\r
- self.ctype = ctype\r
- self.name = arg_tuple[1]\r
- self.defval = arg_tuple[2]\r
- self.out = ""\r
- if "/O" in arg_tuple[3]:\r
- self.out = "O"\r
- if "/IO" in arg_tuple[3]:\r
- self.out = "IO"\r
-\r
-\r
-class FuncInfo(object):\r
- def __init__(self, decl): # [ funcname, return_ctype, [modifiers], [args] ]\r
- name = re.sub(r"^cv\.", "", decl[0])\r
- self.cname = name.replace(".", "::")\r
- classname = ""\r
- dpos = name.rfind(".")\r
- if dpos >= 0:\r
- classname = name[:dpos]\r
- name = name[dpos+1:]\r
- self.classname = classname\r
- self.jname = self.name = name\r
- if "[" in name:\r
- self.jname = "getelem"\r
- for m in decl[2]:\r
- if m.startswith("="):\r
- self.jname = m[1:]\r
- self.static = ["","static"][ "/S" in decl[2] ]\r
- self.ctype = decl[1] or ""\r
- self.args = []\r
- arg_fix_map = func_arg_fix.get(classname, {}).get(self.jname, {})\r
- for a in decl[3]:\r
- arg = a[:]\r
- arg[0] = arg_fix_map.get(arg[1], arg[0])\r
- ai = ArgInfo(arg)\r
- self.args.append(ai)\r
-\r
-\r
-\r
-class FuncFamilyInfo(object):\r
- def __init__(self, decl): # [ funcname, return_ctype, [modifiers], [args] ]\r
- self.funcs = []\r
- self.funcs.append( FuncInfo(decl) )\r
- self.jname = self.funcs[0].jname\r
- self.isconstructor = self.funcs[0].name == self.funcs[0].classname\r
-\r
-\r
-\r
- def add_func(self, fi):\r
- self.funcs.append( fi )\r
-\r
-\r
-class JavaWrapperGenerator(object):\r
- def __init__(self):\r
- self.clear()\r
-\r
- def clear(self):\r
- self.classes = { "Mat" : ClassInfo([ 'class Mat', '', [], [] ]) }\r
- self.module = ""\r
- self.Module = ""\r
- self.java_code= {} # { class : {j_code, jn_code} }\r
- self.cpp_code = None\r
- self.ported_func_list = []\r
- self.skipped_func_list = []\r
- self.def_args_hist = {} # { def_args_cnt : funcs_cnt }\r
- self.classes_map = []\r
- self.classes_simple = []\r
-\r
- def add_class_code_stream(self, class_name, cls_base = ''):\r
- jname = self.classes[class_name].jname\r
- self.java_code[class_name] = { "j_code" : StringIO(), "jn_code" : StringIO(), }\r
- if class_name != self.Module:\r
- if cls_base:\r
- self.java_code[class_name]["j_code"].write("""\r
-//\r
-// This file is auto-generated. Please don't modify it!\r
-//\r
-package org.opencv.%(m)s;\r
-\r
-$imports\r
-\r
-// C++: class %(c)s\r
-//javadoc: %(c)s\r
-public class %(jc)s extends %(base)s {\r
-\r
- protected %(jc)s(long addr) { super(addr); }\r
-\r
-""" % { 'm' : self.module, 'c' : class_name, 'jc' : jname, 'base' : cls_base })\r
- else: # not cls_base\r
- self.java_code[class_name]["j_code"].write("""\r
-//\r
-// This file is auto-generated. Please don't modify it!\r
-//\r
-package org.opencv.%(m)s;\r
-\r
-$imports\r
-\r
-// C++: class %(c)s\r
-//javadoc: %(c)s\r
-public class %(jc)s {\r
-\r
- protected final long nativeObj;\r
- protected %(jc)s(long addr) { nativeObj = addr; }\r
-\r
-""" % { 'm' : self.module, 'c' : class_name, 'jc' : jname })\r
- else: # class_name == self.Module\r
- self.java_code[class_name]["j_code"].write("""\r
-//\r
-// This file is auto-generated. Please don't modify it!\r
-//\r
-package org.opencv.%(m)s;\r
-\r
-$imports\r
-\r
-public class %(jc)s {\r
-""" % { 'm' : self.module, 'jc' : jname } )\r
-\r
- self.java_code[class_name]["jn_code"].write("""\r
- //\r
- // native stuff\r
- //\r
- static { System.loadLibrary("opencv_java"); }\r
-""" )\r
-\r
-\r
-\r
- def add_class(self, decl):\r
- classinfo = ClassInfo(decl)\r
- if classinfo.name in class_ignore_list:\r
- return\r
- name = classinfo.name\r
- if name in self.classes:\r
- print "Generator error: class %s (%s) is duplicated" % \\r
- (name, classinfo.cname)\r
- return\r
- self.classes[name] = classinfo\r
- if name in type_dict:\r
- print "Duplicated class: " + name\r
- return\r
- if '/Simple' in decl[2]:\r
- self.classes_simple.append(name)\r
- if ('/Map' in decl[2]):\r
- self.classes_map.append(name)\r
- #adding default c-tor\r
- ffi = FuncFamilyInfo(['cv.'+name+'.'+name, '', [], []])\r
- classinfo.methods[ffi.jname] = ffi\r
- type_dict[name] = \\r
- { "j_type" : classinfo.jname,\r
- "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),\r
- "jni_name" : "(*("+name+"*)%(n)s_nativeObj)", "jni_type" : "jlong",\r
- "suffix" : "J" }\r
- type_dict[name+'*'] = \\r
- { "j_type" : classinfo.jname,\r
- "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),\r
- "jni_name" : "("+name+"*)%(n)s_nativeObj", "jni_type" : "jlong",\r
- "suffix" : "J" }\r
-\r
- # missing_consts { Module : { public : [[name, val],...], private : [[]...] } }\r
- if name in missing_consts:\r
- if 'private' in missing_consts[name]:\r
- for (n, val) in missing_consts[name]['private']:\r
- classinfo.private_consts.append( ConstInfo(n, n, val, True) )\r
- if 'public' in missing_consts[name]:\r
- for (n, val) in missing_consts[name]['public']:\r
- classinfo.consts.append( ConstInfo(n, n, val, True) )\r
-\r
- # class props\r
- for p in decl[3]:\r
- if True: #"vector" not in p[0]:\r
- classinfo.props.append( ClassPropInfo(p) )\r
- else:\r
- print "Skipped property: [%s]" % name, p\r
-\r
- self.add_class_code_stream(name, classinfo.base)\r
-\r
-\r
- def add_const(self, decl): # [ "const cname", val, [], [] ]\r
- name = decl[0].replace("const ", "").strip()\r
- name = re.sub(r"^cv\.", "", name)\r
- cname = name.replace(".", "::")\r
- for c in const_ignore_list:\r
- if re.match(c, name):\r
- return\r
- # class member?\r
- dpos = name.rfind(".")\r
- if dpos >= 0:\r
- classname = name[:dpos]\r
- name = name[dpos+1:]\r
- else:\r
- classname = self.Module\r
- if classname not in self.classes:\r
- # this class isn't wrapped\r
- # skipping this const\r
- return\r
-\r
- consts = self.classes[classname].consts\r
- for c in const_private_list:\r
- if re.match(c, name):\r
- consts = self.classes[classname].private_consts\r
- break\r
-\r
- constinfo = ConstInfo(cname, name, decl[1])\r
- # checking duplication\r
- for list in self.classes[classname].consts, self.classes[classname].private_consts:\r
- for c in list:\r
- if c.name == constinfo.name:\r
- if c.addedManually:\r
- return\r
- print "Generator error: constant %s (%s) is duplicated" \\r
- % (constinfo.name, constinfo.cname)\r
- sys.exit(-1)\r
-\r
- consts.append(constinfo)\r
-\r
- def add_func(self, decl):\r
- ffi = FuncFamilyInfo(decl)\r
- classname = ffi.funcs[0].classname or self.Module\r
- if classname in class_ignore_list:\r
- return\r
- if classname in ManualFuncs and ffi.jname in ManualFuncs[classname]:\r
- return\r
- if classname not in self.classes:\r
- print "Generator error: the class %s for method %s is missing" % \\r
- (classname, ffi.jname)\r
- sys.exit(-1)\r
- func_map = self.classes[classname].methods\r
- if ffi.jname in func_map:\r
- func_map[ffi.jname].add_func(ffi.funcs[0])\r
- else:\r
- func_map[ffi.jname] = ffi\r
- # calc args with def val\r
- cnt = len([a for a in ffi.funcs[0].args if a.defval])\r
- self.def_args_hist[cnt] = self.def_args_hist.get(cnt, 0) + 1\r
-\r
- def save(self, path, buf):\r
- f = open(path, "wt")\r
- f.write(buf)\r
- f.close()\r
-\r
- def gen(self, srcfiles, module, output_path):\r
- self.clear()\r
- self.module = module\r
- self.Module = module.capitalize()\r
- parser = hdr_parser.CppHeaderParser()\r
-\r
- self.add_class( ['class ' + self.Module, '', [], []] ) # [ 'class/struct cname', ':bases', [modlist] [props] ]\r
-\r
- # scan the headers and build more descriptive maps of classes, consts, functions\r
- for hdr in srcfiles:\r
- decls = parser.parse(hdr)\r
- for decl in decls:\r
- name = decl[0]\r
- if name.startswith("struct") or name.startswith("class"):\r
- self.add_class(decl)\r
- elif name.startswith("const"):\r
- self.add_const(decl)\r
- else: # function\r
- self.add_func(decl)\r
-\r
- self.cpp_code = StringIO()\r
- self.cpp_code.write("""\r
-//\r
-// This file is auto-generated, please don't edit!\r
-//\r
-\r
-#include <jni.h>\r
-\r
-#include "converters.h"\r
-\r
-#ifdef DEBUG\r
-#include <android/log.h>\r
-#define MODULE_LOG_TAG "OpenCV.%(m)s"\r
-#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, MODULE_LOG_TAG, __VA_ARGS__))\r
-#else //DEBUG\r
-#define LOGD(...)\r
-#endif //DEBUG\r
-\r
-#include "opencv2/%(m)s/%(m)s.hpp"\r
-\r
-using namespace cv;\r
-\r
-extern "C" {\r
-\r
-""" % {'m' : module} )\r
-\r
- # generate code for the classes\r
- for name in self.classes.keys():\r
- if name == "Mat":\r
- continue\r
- self.gen_class(name)\r
- # saving code streams\r
- imports = "\n".join([ "import %s;" % c for c in \\r
- sorted(self.classes[name].imports) if not c.startswith('org.opencv.'+self.module) ])\r
- self.java_code[name]["j_code"].write("\n\n%s\n}\n" % self.java_code[name]["jn_code"].getvalue())\r
- java_code = self.java_code[name]["j_code"].getvalue()\r
- java_code = Template(java_code).substitute(imports = imports)\r
- self.save("%s/%s+%s.java" % (output_path, module, self.classes[name].jname), java_code)\r
-\r
- self.cpp_code.write( '\n} // extern "C"\n' )\r
- self.save(output_path+"/"+module+".cpp", self.cpp_code.getvalue())\r
-\r
- # report\r
- report = StringIO()\r
- report.write("PORTED FUNCs LIST (%i of %i):\n\n" % \\r
- (len(self.ported_func_list), len(self.ported_func_list)+ len(self.skipped_func_list))\r
- )\r
- report.write("\n".join(self.ported_func_list))\r
- report.write("\n\nSKIPPED FUNCs LIST (%i of %i):\n\n" % \\r
- (len(self.skipped_func_list), len(self.ported_func_list)+ len(self.skipped_func_list))\r
- )\r
- report.write("".join(self.skipped_func_list))\r
-\r
- for i in self.def_args_hist.keys():\r
- report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i]))\r
-\r
- report.write("\n\nclass as MAP:\n\t" + "\n\t".join(self.classes_map))\r
- report.write("\n\nclass SIMPLE:\n\t" + "\n\t".join(self.classes_simple))\r
-\r
- self.save(output_path+"/"+module+".txt", report.getvalue())\r
-\r
- print "Done %i of %i funcs." % (len(self.ported_func_list), len(self.ported_func_list)+ len(self.skipped_func_list))\r
-\r
-\r
-\r
- def get_imports(self, scope_classname, ctype):\r
- imports = self.classes[scope_classname or self.Module].imports\r
- if ctype.startswith('vector'):\r
- imports.add("org.opencv.core.Mat")\r
- if type_dict[ctype]['j_type'].startswith('MatOf'):\r
- imports.add("org.opencv.core." + type_dict[ctype]['j_type'])\r
- return #TMP\r
- else:\r
- imports.add("java.util.List")\r
- imports.add("org.opencv.utils.Converters")\r
- ctype = ctype.replace('vector_', '')\r
- j_type = ''\r
- if ctype in type_dict:\r
- j_type = type_dict[ctype]['j_type']\r
- if j_type in ( "CvType", "Mat", "Point", "Point3", "Range", "Rect", "RotatedRect", "Scalar", "Size", "TermCriteria" ):\r
- imports.add("org.opencv.core." + j_type)\r
- if j_type == 'String':\r
- imports.add("java.lang.String")\r
-\r
-\r
-\r
- def gen_func(self, fi, prop_name=''):\r
- j_code = self.java_code[fi.classname or self.Module]["j_code"]\r
- jn_code = self.java_code[fi.classname or self.Module]["jn_code"]\r
- cpp_code = self.cpp_code\r
-\r
- # c_decl\r
- # e.g: void add(Mat src1, Mat src2, Mat dst, Mat mask = Mat(), int dtype = -1)\r
- if prop_name:\r
- c_decl = "%s %s::%s" % (fi.ctype, fi.classname, prop_name)\r
- else:\r
- decl_args = []\r
- for a in fi.args:\r
- s = a.ctype or ' _hidden_ '\r
- if a.pointer:\r
- s += "*"\r
- elif a.out:\r
- s += "&"\r
- s += " " + a.name\r
- if a.defval:\r
- s += " = "+a.defval\r
- decl_args.append(s)\r
- c_decl = "%s %s %s(%s)" % ( fi.static, fi.ctype, fi.cname, ", ".join(decl_args) )\r
-\r
- # java comment\r
- j_code.write( "\n //\n // C++: %s\n //\n\n" % c_decl )\r
- # check if we 'know' all the types\r
- if fi.ctype not in type_dict: # unsupported ret type\r
- msg = "// Return type '%s' is not supported, skipping the function\n\n" % fi.ctype\r
- self.skipped_func_list.append(c_decl + "\n" + msg)\r
- j_code.write( " "*4 + msg )\r
- print "SKIP:", c_decl, "\n\tdue to RET type", fi.ctype\r
- return\r
- for a in fi.args:\r
- if a.ctype not in type_dict:\r
- msg = "// Unknown type '%s' (%s), skipping the function\n\n" % (a.ctype, a.out or "I")\r
- self.skipped_func_list.append(c_decl + "\n" + msg)\r
- j_code.write( " "*4 + msg )\r
- print "SKIP:", c_decl, "\n\tdue to ARG type", a.ctype, "/" + (a.out or "I")\r
- return\r
-\r
- self.ported_func_list.append(c_decl)\r
-\r
- # jn & cpp comment\r
- jn_code.write( "\n // C++: %s\n" % c_decl )\r
- cpp_code.write( "\n//\n// %s\n//\n" % c_decl )\r
-\r
- # java args\r
- args = fi.args[:] # copy\r
- suffix_counter = int( self.classes[fi.classname or self.Module].methods_suffixes.get(fi.jname, -1) )\r
- while True:\r
- suffix_counter += 1\r
- self.classes[fi.classname or self.Module].methods_suffixes[fi.jname] = suffix_counter\r
- # java native method args\r
- jn_args = []\r
- # jni (cpp) function args\r
- jni_args = [ArgInfo([ "env", "env", "", [], "" ]), ArgInfo([ "cls", "cls", "", [], "" ])]\r
- j_prologue = []\r
- j_epilogue = []\r
- c_prologue = []\r
- c_epilogue = []\r
- if type_dict[fi.ctype]["jni_type"] == "jdoubleArray":\r
- fields = type_dict[fi.ctype]["jn_args"]\r
- c_epilogue.append( \\r
- ("jdoubleArray _da_retval_ = env->NewDoubleArray(%(cnt)i); " +\r
- "jdouble _tmp_retval_[%(cnt)i] = {%(args)s}; " +\r
- "env->SetDoubleArrayRegion(_da_retval_, 0, %(cnt)i, _tmp_retval_);") %\r
- { "cnt" : len(fields), "args" : ", ".join(["_retval_" + f[1] for f in fields]) } )\r
- if fi.classname and fi.ctype and not fi.static: # non-static class method except c-tor\r
- # adding 'self'\r
- jn_args.append ( ArgInfo([ "__int64", "nativeObj", "", [], "" ]) )\r
- jni_args.append( ArgInfo([ "__int64", "self", "", [], "" ]) )\r
- self.get_imports(fi.classname, fi.ctype)\r
- for a in args:\r
- if not a.ctype: # hidden\r
- continue\r
- self.get_imports(fi.classname, a.ctype)\r
- if "vector" in a.ctype: # pass as Mat\r
- jn_args.append ( ArgInfo([ "__int64", "%s_mat.nativeObj" % a.name, "", [], "" ]) )\r
- jni_args.append ( ArgInfo([ "__int64", "%s_mat_nativeObj" % a.name, "", [], "" ]) )\r
- c_prologue.append( type_dict[a.ctype]["jni_var"] % {"n" : a.name} + ";" )\r
- c_prologue.append( "Mat& %(n)s_mat = *((Mat*)%(n)s_mat_nativeObj)" % {"n" : a.name} + ";" )\r
- if "I" in a.out or not a.out:\r
- if a.ctype.startswith("vector_vector_"):\r
- self.classes[fi.classname or self.Module].imports.add("java.util.ArrayList")\r
- j_prologue.append( "List<Mat> %(n)s_tmplm = new ArrayList<Mat>((%(n)s != null) ? %(n)s.size() : 0);" % {"n" : a.name } )\r
- j_prologue.append( "Mat %(n)s_mat = Converters.%(t)s_to_Mat(%(n)s, %(n)s_tmplm);" % {"n" : a.name, "t" : a.ctype} )\r
- else:\r
- if not type_dict[a.ctype]["j_type"].startswith("MatOf"):\r
- j_prologue.append( "Mat %(n)s_mat = Converters.%(t)s_to_Mat(%(n)s);" % {"n" : a.name, "t" : a.ctype} )\r
- else:\r
- j_prologue.append( "Mat %s_mat = %s;" % (a.name, a.name) )\r
- c_prologue.append( "Mat_to_%(t)s( %(n)s_mat, %(n)s );" % {"n" : a.name, "t" : a.ctype} )\r
- else:\r
- if not type_dict[a.ctype]["j_type"].startswith("MatOf"):\r
- j_prologue.append( "Mat %s_mat = new Mat();" % a.name )\r
- else:\r
- j_prologue.append( "Mat %s_mat = %s;" % (a.name, a.name) )\r
- if "O" in a.out:\r
- if not type_dict[a.ctype]["j_type"].startswith("MatOf"):\r
- j_epilogue.append("Converters.Mat_to_%(t)s(%(n)s_mat, %(n)s);" % {"t" : a.ctype, "n" : a.name})\r
- c_epilogue.append( "%(t)s_to_Mat( %(n)s, %(n)s_mat );" % {"n" : a.name, "t" : a.ctype} )\r
- else:\r
- fields = type_dict[a.ctype].get("jn_args", ((a.ctype, ""),))\r
- if "I" in a.out or not a.out or a.ctype in self.classes: # input arg, pass by primitive fields\r
- for f in fields:\r
- jn_args.append ( ArgInfo([ f[0], a.name + f[1], "", [], "" ]) )\r
- jni_args.append( ArgInfo([ f[0], a.name + f[1].replace(".","_").replace("[","").replace("]",""), "", [], "" ]) )\r
- if a.out and a.ctype not in self.classes: # out arg, pass as double[]\r
- jn_args.append ( ArgInfo([ "double[]", "%s_out" % a.name, "", [], "" ]) )\r
- jni_args.append ( ArgInfo([ "double[]", "%s_out" % a.name, "", [], "" ]) )\r
- j_prologue.append( "double[] %s_out = new double[%i];" % (a.name, len(fields)) )\r
- c_epilogue.append( \\r
- "jdouble tmp_%(n)s[%(cnt)i] = {%(args)s}; env->SetDoubleArrayRegion(%(n)s_out, 0, %(cnt)i, tmp_%(n)s);" %\r
- { "n" : a.name, "cnt" : len(fields), "args" : ", ".join([a.name + f[1] for f in fields]) } )\r
- if a.ctype in ('bool', 'int', 'long', 'float', 'double'):\r
- j_epilogue.append('if(%(n)s!=null) %(n)s[0] = (%(t)s)%(n)s_out[0];' % {'n':a.name,'t':a.ctype})\r
- else:\r
- set_vals = []\r
- i = 0\r
- for f in fields:\r
- set_vals.append( "%(n)s%(f)s = %(t)s%(n)s_out[%(i)i]" %\r
- {"n" : a.name, "t": ("("+type_dict[f[0]]["j_type"]+")", "")[f[0]=="double"], "f" : f[1], "i" : i}\r
- )\r
- i += 1\r
- j_epilogue.append( "if("+a.name+"!=null){ " + "; ".join(set_vals) + "; } ")\r
-\r
-\r
- # java part:\r
- # private java NATIVE method decl\r
- # e.g.\r
- # private static native void add_0(long src1, long src2, long dst, long mask, int dtype);\r
- jn_code.write( Template(\\r
- " private static native $type $name($args);\n").substitute(\\r
- type = type_dict[fi.ctype].get("jn_type", "double[]"), \\r
- name = fi.jname + '_' + str(suffix_counter), \\r
- args = ", ".join(["%s %s" % (type_dict[a.ctype]["jn_type"], a.name.replace(".","_").replace("[","").replace("]","")) for a in jn_args])\r
- ) );\r
-\r
- # java part:\r
-\r
- #java doc comment\r
- f_name = fi.name\r
- if fi.classname:\r
- f_name = fi.classname + "::" + fi.name\r
- java_doc = "//javadoc: " + f_name + "(%s)" % ", ".join([a.name for a in args])\r
- j_code.write(" "*4 + java_doc + "\n")\r
-\r
- # public java wrapper method impl (calling native one above)\r
- # e.g.\r
- # public static void add( Mat src1, Mat src2, Mat dst, Mat mask, int dtype )\r
- # { add_0( src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj, dtype ); }\r
- ret_type = fi.ctype\r
- if fi.ctype.endswith('*'):\r
- ret_type = ret_type[:-1]\r
- ret_val = type_dict[ret_type]["j_type"] + " retVal = "\r
- tail = ""\r
- ret = "return retVal;"\r
- if ret_type.startswith('vector'):\r
- tail = ")"\r
- j_type = type_dict[ret_type]["j_type"]\r
- if j_type.startswith('MatOf'):\r
- ret_val += "new " + j_type + "("\r
- m_t = re.match('vector_(\w+)', ret_type)\r
- m_ch = re.match('vector_Vec(\d+)', ret_type)\r
- if m_ch:\r
- ret_val += m_ch.group(1) + ', '\r
- elif m_t.group(1) in ('char', 'uchar', 'int', 'float', 'double'):\r
- ret_val += '1, '\r
- else:\r
- ret_val = "Mat retValMat = new Mat("\r
- j_prologue.append( j_type + ' retVal = new Array' + j_type+'();')\r
- self.classes[fi.classname or self.Module].imports.add('java.util.ArrayList')\r
- j_epilogue.append('Converters.Mat_to_' + ret_type + '(retValMat, retVal);')\r
- elif ret_type == "void":\r
- ret_val = ""\r
- ret = "return;"\r
- elif ret_type == "": # c-tor\r
- if fi.classname and self.classes[fi.classname].base:\r
- ret_val = "super( "\r
- tail = " )"\r
- else:\r
- ret_val = "nativeObj = "\r
- ret = "return;"\r
- elif ret_type in self.classes: # wrapped class\r
- ret_val = type_dict[ret_type]["j_type"] + " retVal = new " + self.classes[ret_type].jname + "("\r
- tail = ")"\r
- elif "jn_type" not in type_dict[ret_type]:\r
- ret_val = type_dict[fi.ctype]["j_type"] + " retVal = new " + type_dict[ret_type]["j_type"] + "("\r
- tail = ")"\r
-\r
- static = "static"\r
- if fi.classname:\r
- static = fi.static\r
-\r
- j_args = []\r
- for a in args:\r
- if not a.ctype: #hidden\r
- continue\r
- jt = type_dict[a.ctype]["j_type"]\r
- if a.out and a.ctype in ('bool', 'int', 'long', 'float', 'double'):\r
- jt += '[]'\r
- j_args.append( jt + ' ' + a.name )\r
-\r
- j_code.write( Template(\\r
-""" public $static $j_type $j_name($j_args)\r
- {\r
- $prologue\r
- $ret_val$jn_name($jn_args_call)$tail;\r
- $epilogue\r
- $ret\r
- }\r
-\r
-"""\r
- ).substitute(\\r
- ret = ret, \\r
- ret_val = ret_val, \\r
- tail = tail, \\r
- prologue = "\n ".join(j_prologue), \\r
- epilogue = "\n ".join(j_epilogue), \\r
- static=static, \\r
- j_type=type_dict[fi.ctype]["j_type"], \\r
- j_name=fi.jname, \\r
- j_args=", ".join(j_args), \\r
- jn_name=fi.jname + '_' + str(suffix_counter), \\r
- jn_args_call=", ".join( [a.name for a in jn_args] ),\\r
- )\r
- )\r
-\r
-\r
- # cpp part:\r
- # jni_func(..) { _retval_ = cv_func(..); return _retval_; }\r
- ret = "return _retval_;"\r
- default = "return 0;"\r
- if fi.ctype == "void":\r
- ret = "return;"\r
- default = "return;"\r
- elif not fi.ctype: # c-tor\r
- ret = "return (jlong) _retval_;"\r
- elif fi.ctype.startswith('vector'): # c-tor\r
- ret = "return (jlong) _retval_;"\r
- elif fi.ctype == "string":\r
- ret = "return env->NewStringUTF(_retval_.c_str());"\r
- default = 'return env->NewStringUTF("");'\r
- elif fi.ctype in self.classes: # wrapped class:\r
- ret = "return (jlong) new %s(_retval_);" % fi.ctype\r
- elif ret_type in self.classes: # pointer to wrapped class:\r
- ret = "return (jlong) _retval_;"\r
- elif type_dict[fi.ctype]["jni_type"] == "jdoubleArray":\r
- ret = "return _da_retval_;"\r
-\r
- # hack: replacing func call with property set/get\r
- name = fi.name\r
- if prop_name:\r
- if args:\r
- name = prop_name + " = "\r
- else:\r
- name = prop_name + ";//"\r
-\r
- cvname = "cv::" + name\r
- retval = fi.ctype + " _retval_ = "\r
- if fi.ctype == "void":\r
- retval = ""\r
- elif fi.ctype.startswith('vector'):\r
- retval = type_dict[fi.ctype]['jni_var'] % {"n" : '_ret_val_vector_'} + " = "\r
- c_epilogue.append("Mat* _retval_ = new Mat();")\r
- c_epilogue.append(fi.ctype+"_to_Mat(_ret_val_vector_, *_retval_);")\r
- if fi.classname:\r
- if not fi.ctype: # c-tor\r
- retval = fi.classname + "* _retval_ = "\r
- cvname = "new " + fi.classname\r
- elif fi.static:\r
- cvname = "%s::%s" % (fi.classname, name)\r
- else:\r
- cvname = "me->" + name\r
- c_prologue.append(\\r
- "%(cls)s* me = (%(cls)s*) self; //TODO: check for NULL" \\r
- % { "cls" : fi.classname} \\r
- )\r
- cvargs = []\r
- for a in args:\r
- if a.pointer:\r
- jni_name = "&%(n)s"\r
- else:\r
- jni_name = "%(n)s"\r
- if not a.ctype: # hidden\r
- jni_name = a.defval\r
- cvargs.append( type_dict[a.ctype].get("jni_name", jni_name) % {"n" : a.name})\r
- if "vector" not in a.ctype :\r
- if ("I" in a.out or not a.out or a.ctype in self.classes) and "jni_var" in type_dict[a.ctype]: # complex type\r
- c_prologue.append(type_dict[a.ctype]["jni_var"] % {"n" : a.name} + ";")\r
- if a.out and "I" not in a.out and a.ctype not in self.classes and a.ctype:\r
- c_prologue.append("%s %s;" % (a.ctype, a.name))\r
-\r
- rtype = type_dict[fi.ctype].get("jni_type", "jdoubleArray")\r
- clazz = self.Module\r
- if fi.classname:\r
- clazz = self.classes[fi.classname].jname\r
- cpp_code.write ( Template( \\r
-"""\r
-\r
-JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname\r
- ($args)\r
-{\r
- try {\r
- LOGD("$module::$fname()");\r
- $prologue\r
- $retval$cvname( $cvargs );\r
- $epilogue\r
- $ret\r
- } catch(cv::Exception e) {\r
- LOGD("$module::$fname() catched cv::Exception: %s", e.what());\r
- jclass je = env->FindClass("org/opencv/core/CvException");\r
- if(!je) je = env->FindClass("java/lang/Exception");\r
- env->ThrowNew(je, e.what());\r
- $default\r
- } catch (...) {\r
- LOGD("$module::$fname() catched unknown exception (...)");\r
- jclass je = env->FindClass("java/lang/Exception");\r
- env->ThrowNew(je, "Unknown exception in JNI code {$module::$fname()}");\r
- $default\r
- }\r
-}\r
-\r
-\r
-""" ).substitute( \\r
- rtype = rtype, \\r
- module = self.module, \\r
- clazz = clazz.replace('_', '_1'), \\r
- fname = (fi.jname + '_' + str(suffix_counter)).replace('_', '_1'), \\r
- args = ", ".join(["%s %s" % (type_dict[a.ctype].get("jni_type"), a.name) for a in jni_args]), \\r
- prologue = "\n ".join(c_prologue), \\r
- epilogue = " ".join(c_epilogue), \\r
- ret = ret, \\r
- cvname = cvname, \\r
- cvargs = ", ".join(cvargs), \\r
- default = default, \\r
- retval = retval, \\r
- ) )\r
-\r
- # processing args with default values\r
- if not args or not args[-1].defval:\r
- break\r
- while args and args[-1].defval:\r
- # 'smart' overloads filtering\r
- a = args.pop()\r
- if a.name in ('mask', 'dtype', 'ddepth', 'lineType', 'borderType', 'borderMode', 'criteria'):\r
- break\r
-\r
-\r
-\r
- def gen_class(self, name):\r
- # generate code for the class\r
- ci = self.classes[name]\r
- # constants\r
- if ci.private_consts:\r
- self.java_code[name]['j_code'].write("""\r
- private static final int\r
- %s;\n\n""" % (",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in ci.private_consts])\r
- )\r
- if ci.consts:\r
- self.java_code[name]['j_code'].write("""\r
- public static final int\r
- %s;\n\n""" % (",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in ci.consts])\r
- )\r
- # c-tors\r
- fflist = ci.methods.items()\r
- fflist.sort()\r
- for n, ffi in fflist:\r
- if ffi.isconstructor:\r
- for fi in ffi.funcs:\r
- fi.jname = ci.jname\r
- self.gen_func(fi)\r
- # other methods\r
- for n, ffi in fflist:\r
- if not ffi.isconstructor:\r
- for fi in ffi.funcs:\r
- self.gen_func(fi)\r
- # props\r
- for pi in ci.props:\r
- # getter\r
- getter_name = name + ".get_" + pi.name\r
- #print getter_name\r
- fi = FuncInfo( [getter_name, pi.ctype, [], []] ) # [ funcname, return_ctype, [modifiers], [args] ]\r
- self.gen_func(fi, pi.name)\r
- if pi.rw:\r
- #setter\r
- setter_name = name + ".set_" + pi.name\r
- #print setter_name\r
- fi = FuncInfo( [ setter_name, "void", [], [ [pi.ctype, pi.name, "", [], ""] ] ] )\r
- self.gen_func(fi, pi.name)\r
-\r
- # manual ports\r
- if name in ManualFuncs:\r
- for func in ManualFuncs[name].keys():\r
- self.java_code[name]["j_code"].write ( ManualFuncs[name][func]["j_code"] )\r
- self.java_code[name]["jn_code"].write( ManualFuncs[name][func]["jn_code"] )\r
- self.cpp_code.write( ManualFuncs[name][func]["cpp_code"] )\r
-\r
- if name != self.Module:\r
- # finalize()\r
- self.java_code[name]["j_code"].write(\r
-"""\r
- @Override\r
- protected void finalize() throws Throwable {\r
- delete(nativeObj);\r
- }\r
-""" )\r
-\r
- self.java_code[name]["jn_code"].write(\r
-"""\r
- // native support for java finalize()\r
- private static native void delete(long nativeObj);\r
-""" )\r
-\r
- # native support for java finalize()\r
- self.cpp_code.write( \\r
-"""\r
-//\r
-// native support for java finalize()\r
-// static void %(cls)s::delete( __int64 self )\r
-//\r
-\r
-JNIEXPORT void JNICALL Java_org_opencv_%(module)s_%(j_cls)s_delete\r
- (JNIEnv* env, jclass cls, jlong self)\r
-{\r
- delete (%(cls)s*) self;\r
-}\r
-\r
-""" % {"module" : module, "cls" : name, "j_cls" : ci.jname}\r
- )\r
-\r
-\r
-if __name__ == "__main__":\r
- if len(sys.argv) < 4:\r
- print "Usage:\n", \\r
- os.path.basename(sys.argv[0]), \\r
- "<full path to hdr_parser.py> <module name> <C++ header> [<C++ header>...]"\r
- print "Current args are: ", ", ".join(["'"+a+"'" for a in sys.argv])\r
- exit(0)\r
-\r
- dstdir = "."\r
- hdr_parser_path = os.path.abspath(sys.argv[1])\r
- if hdr_parser_path.endswith(".py"):\r
- hdr_parser_path = os.path.dirname(hdr_parser_path)\r
- sys.path.append(hdr_parser_path)\r
- import hdr_parser\r
- module = sys.argv[2]\r
- srcfiles = sys.argv[3:]\r
- print "Generating module '" + module + "' from headers:\n\t" + "\n\t".join(srcfiles)\r
- generator = JavaWrapperGenerator()\r
- generator.gen(srcfiles, module, dstdir)\r
-\r
+import sys, re, os.path
+from string import Template
+
+try:
+ from cStringIO import StringIO
+except:
+ from StringIO import StringIO
+
+class_ignore_list = (
+ #core
+ "FileNode", "FileStorage", "KDTree",
+ #highgui
+ "VideoWriter", "VideoCapture",
+ #features2d
+ #"KeyPoint", "MSER", "StarDetector", "SURF", "DMatch",
+ #ml
+ #"EM",
+)
+
+const_ignore_list = (
+ "CV_CAP_OPENNI",
+ "CV_CAP_PROP_OPENNI_",
+ "WINDOW_AUTOSIZE",
+ "CV_WND_PROP_",
+ "CV_WINDOW_",
+ "CV_EVENT_",
+ "CV_GUI_",
+ "CV_PUSH_BUTTON",
+ "CV_CHECKBOX",
+ "CV_RADIOBOX",
+
+ #attention!
+ #the following constants are added to this list using code automatic generation
+ #TODO: should be checked
+ "CV_CAP_ANY",
+ "CV_CAP_MIL",
+ "CV_CAP_VFW",
+ "CV_CAP_V4L",
+ "CV_CAP_V4L2",
+ "CV_CAP_FIREWARE",
+ "CV_CAP_FIREWIRE",
+ "CV_CAP_IEEE1394",
+ "CV_CAP_DC1394",
+ "CV_CAP_CMU1394",
+ "CV_CAP_STEREO",
+ "CV_CAP_TYZX",
+ "CV_TYZX_LEFT",
+ "CV_TYZX_RIGHT",
+ "CV_TYZX_COLOR",
+ "CV_TYZX_Z",
+ "CV_CAP_QT",
+ "CV_CAP_UNICAP",
+ "CV_CAP_DSHOW",
+ "CV_CAP_PVAPI",
+ "CV_CAP_PROP_DC1394_OFF",
+ "CV_CAP_PROP_DC1394_MODE_MANUAL",
+ "CV_CAP_PROP_DC1394_MODE_AUTO",
+ "CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO",
+ "CV_CAP_PROP_POS_MSEC",
+ "CV_CAP_PROP_POS_FRAMES",
+ "CV_CAP_PROP_POS_AVI_RATIO",
+ "CV_CAP_PROP_FPS",
+ "CV_CAP_PROP_FOURCC",
+ "CV_CAP_PROP_FRAME_COUNT",
+ "CV_CAP_PROP_FORMAT",
+ "CV_CAP_PROP_MODE",
+ "CV_CAP_PROP_BRIGHTNESS",
+ "CV_CAP_PROP_CONTRAST",
+ "CV_CAP_PROP_SATURATION",
+ "CV_CAP_PROP_HUE",
+ "CV_CAP_PROP_GAIN",
+ "CV_CAP_PROP_EXPOSURE",
+ "CV_CAP_PROP_CONVERT_RGB",
+ "CV_CAP_PROP_WHITE_BALANCE_BLUE_U",
+ "CV_CAP_PROP_RECTIFICATION",
+ "CV_CAP_PROP_MONOCROME",
+ "CV_CAP_PROP_SHARPNESS",
+ "CV_CAP_PROP_AUTO_EXPOSURE",
+ "CV_CAP_PROP_GAMMA",
+ "CV_CAP_PROP_TEMPERATURE",
+ "CV_CAP_PROP_TRIGGER",
+ "CV_CAP_PROP_TRIGGER_DELAY",
+ "CV_CAP_PROP_WHITE_BALANCE_RED_V",
+ "CV_CAP_PROP_MAX_DC1394",
+ "CV_CAP_GSTREAMER_QUEUE_LENGTH",
+ "CV_CAP_PROP_PVAPI_MULTICASTIP",
+ "CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING",
+ "EVENT_.*",
+ "CV_L?(BGRA?|RGBA?|GRAY|XYZ|YCrCb|Luv|Lab|HLS|YUV|HSV)\d*2L?(BGRA?|RGBA?|GRAY|XYZ|YCrCb|Luv|Lab|HLS|YUV|HSV).*",
+ "CV_COLORCVT_MAX",
+ "CV_.*Bayer.*",
+ "CV_YUV420(i|sp|p)2.+",
+ "CV_TM_.+",
+ "CV_FLOODFILL_.+",
+ "CV_ADAPTIVE_THRESH_.+",
+ "WINDOW_.+",
+ "WND_PROP_.+",
+)
+
+const_private_list = (
+ "CV_MOP_.+",
+ "CV_INTER_.+",
+ "CV_THRESH_.+",
+ "CV_INPAINT_.+",
+ "CV_RETR_.+",
+ "CV_CHAIN_APPROX_.+",
+ "OPPONENTEXTRACTOR",
+ "GRIDRETECTOR",
+ "PYRAMIDDETECTOR",
+ "DYNAMICDETECTOR",
+)
+
+# { Module : { public : [[name, val],...], private : [[]...] } }
+missing_consts = \
+{
+ 'Core' :
+ {
+ 'private' :
+ (
+ ('CV_8U', 0 ), ('CV_8S', 1 ),
+ ('CV_16U', 2 ), ('CV_16S', 3 ),
+ ('CV_32S', 4 ),
+ ('CV_32F', 5 ), ('CV_64F', 6 ),
+ ('CV_USRTYPE1', 7 ),
+ ), # private
+ 'public' :
+ (
+ ('SVD_MODIFY_A', 1), ('SVD_NO_UV', 2), ('SVD_FULL_UV', 4),
+ ('FILLED', -1),
+ ('LINE_AA', 16), ('LINE_8', 8), ('LINE_4', 4),
+ ('REDUCE_SUM', 0), ('REDUCE_AVG', 1), ('REDUCE_MAX', 2), ('REDUCE_MIN', 3),
+ ) #public
+ }, # Core
+
+ "Imgproc":
+ {
+ 'private' :
+ (
+ ('IPL_BORDER_CONSTANT', 0 ),
+ ('IPL_BORDER_REPLICATE', 1 ),
+ ('IPL_BORDER_REFLECT', 2 ),
+ ('IPL_BORDER_WRAP', 3 ),
+ ('IPL_BORDER_REFLECT_101', 4 ),
+ ('IPL_BORDER_TRANSPARENT', 5 ),
+ ) # private
+ }, # Imgproc
+
+ "Calib3d":
+ {
+ 'private' :
+ (
+ ('CV_LMEDS', 4),
+ ('CV_RANSAC', 8),
+ ('CV_FM_LMEDS', 'CV_LMEDS'),
+ ('CV_FM_RANSAC','CV_RANSAC'),
+ ('CV_FM_7POINT', 1),
+ ('CV_FM_8POINT', 2),
+ ('CV_CALIB_USE_INTRINSIC_GUESS', 1),
+ ('CV_CALIB_FIX_ASPECT_RATIO', 2),
+ ('CV_CALIB_FIX_PRINCIPAL_POINT', 4),
+ ('CV_CALIB_ZERO_TANGENT_DIST', 8),
+ ('CV_CALIB_FIX_FOCAL_LENGTH', 16),
+ ('CV_CALIB_FIX_K1', 32),
+ ('CV_CALIB_FIX_K2', 64),
+ ('CV_CALIB_FIX_K3', 128),
+ ('CV_CALIB_FIX_K4', 2048),
+ ('CV_CALIB_FIX_K5', 4096),
+ ('CV_CALIB_FIX_K6', 8192),
+ ('CV_CALIB_RATIONAL_MODEL', 16384),
+ ('CV_CALIB_FIX_INTRINSIC', 256),
+ ('CV_CALIB_SAME_FOCAL_LENGTH', 512),
+ ('CV_CALIB_ZERO_DISPARITY', 1024),
+ ) # public
+ }, # Calib3d
+
+}
+
+
+# c_type : { java/jni correspondence }
+type_dict = {
+# "simple" : { j_type : "?", jn_type : "?", jni_type : "?", suffix : "?" },
+ "" : { "j_type" : "", "jn_type" : "long", "jni_type" : "jlong" }, # c-tor ret_type
+ "void" : { "j_type" : "void", "jn_type" : "void", "jni_type" : "void" },
+ "env" : { "j_type" : "", "jn_type" : "", "jni_type" : "JNIEnv*"},
+ "cls" : { "j_type" : "", "jn_type" : "", "jni_type" : "jclass"},
+ "bool" : { "j_type" : "boolean", "jn_type" : "boolean", "jni_type" : "jboolean", "suffix" : "Z" },
+ "int" : { "j_type" : "int", "jn_type" : "int", "jni_type" : "jint", "suffix" : "I" },
+ "long" : { "j_type" : "int", "jn_type" : "int", "jni_type" : "jint", "suffix" : "I" },
+ "float" : { "j_type" : "float", "jn_type" : "float", "jni_type" : "jfloat", "suffix" : "F" },
+ "double" : { "j_type" : "double", "jn_type" : "double", "jni_type" : "jdouble", "suffix" : "D" },
+ "size_t" : { "j_type" : "long", "jn_type" : "long", "jni_type" : "jlong", "suffix" : "J" },
+ "__int64" : { "j_type" : "long", "jn_type" : "long", "jni_type" : "jlong", "suffix" : "J" },
+ "int64" : { "j_type" : "long", "jn_type" : "long", "jni_type" : "jlong", "suffix" : "J" },
+ "double[]": { "j_type" : "double[]", "jn_type" : "double[]", "jni_type" : "jdoubleArray", "suffix" : "_3D" },
+
+# "complex" : { j_type : "?", jn_args : (("", ""),), jn_name : "", jni_var : "", jni_name : "", "suffix" : "?" },
+
+ "vector_Point" : { "j_type" : "MatOfPoint", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point> %(n)s", "suffix" : "J" },
+ "vector_Point2f" : { "j_type" : "MatOfPoint2f", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point2f> %(n)s", "suffix" : "J" },
+ #"vector_Point2d" : { "j_type" : "MatOfPoint2d", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point2d> %(n)s", "suffix" : "J" },
+ "vector_Point3i" : { "j_type" : "MatOfPoint3", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point3i> %(n)s", "suffix" : "J" },
+ "vector_Point3f" : { "j_type" : "MatOfPoint3f", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point3f> %(n)s", "suffix" : "J" },
+ #"vector_Point3d" : { "j_type" : "MatOfPoint3d", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Point3d> %(n)s", "suffix" : "J" },
+ "vector_KeyPoint" : { "j_type" : "MatOfKeyPoint", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<KeyPoint> %(n)s", "suffix" : "J" },
+ "vector_DMatch" : { "j_type" : "MatOfDMatch", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<DMatch> %(n)s", "suffix" : "J" },
+ "vector_Rect" : { "j_type" : "MatOfRect", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Rect> %(n)s", "suffix" : "J" },
+ "vector_uchar" : { "j_type" : "MatOfByte", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<uchar> %(n)s", "suffix" : "J" },
+ "vector_char" : { "j_type" : "MatOfByte", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<char> %(n)s", "suffix" : "J" },
+ "vector_int" : { "j_type" : "MatOfInt", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<int> %(n)s", "suffix" : "J" },
+ "vector_float" : { "j_type" : "MatOfFloat", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<float> %(n)s", "suffix" : "J" },
+ "vector_double" : { "j_type" : "MatOfDouble", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<double> %(n)s", "suffix" : "J" },
+ "vector_Vec4i" : { "j_type" : "MatOfInt4", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Vec4i> %(n)s", "suffix" : "J" },
+ "vector_Vec4f" : { "j_type" : "MatOfFloat4", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Vec4f> %(n)s", "suffix" : "J" },
+ "vector_Vec6f" : { "j_type" : "MatOfFloat6", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Vec6f> %(n)s", "suffix" : "J" },
+
+ "vector_Mat" : { "j_type" : "List<Mat>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector<Mat> %(n)s", "suffix" : "J" },
+
+ "vector_vector_KeyPoint": { "j_type" : "List<MatOfKeyPoint>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<KeyPoint> > %(n)s" },
+ "vector_vector_DMatch" : { "j_type" : "List<MatOfDMatch>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<DMatch> > %(n)s" },
+ "vector_vector_char" : { "j_type" : "List<MatOfByte>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<char> > %(n)s" },
+ "vector_vector_Point" : { "j_type" : "List<MatOfPoint>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<Point> > %(n)s" },
+ "vector_vector_Point2f" : { "j_type" : "List<MatOfPoint2f>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<Point2f> > %(n)s" },
+ "vector_vector_Point3f" : { "j_type" : "List<MatOfPoint3f>", "jn_type" : "long", "jni_type" : "jlong", "jni_var" : "vector< vector<Point3f> > %(n)s" },
+
+ "Mat" : { "j_type" : "Mat", "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),
+ "jni_var" : "Mat& %(n)s = *((Mat*)%(n)s_nativeObj)",
+ "jni_type" : "jlong", #"jni_name" : "*%(n)s",
+ "suffix" : "J" },
+
+ "Point" : { "j_type" : "Point", "jn_args" : (("double", ".x"), ("double", ".y")),
+ "jni_var" : "Point %(n)s((int)%(n)s_x, (int)%(n)s_y)", "jni_type" : "jdoubleArray",
+ "suffix" : "DD"},
+ "Point2f" : { "j_type" : "Point", "jn_args" : (("double", ".x"), ("double", ".y")),
+ "jni_var" : "Point2f %(n)s((float)%(n)s_x, (float)%(n)s_y)", "jni_type" : "jdoubleArray",
+ "suffix" : "DD"},
+ "Point2d" : { "j_type" : "Point", "jn_args" : (("double", ".x"), ("double", ".y")),
+ "jni_var" : "Point2d %(n)s(%(n)s_x, %(n)s_y)", "jni_type" : "jdoubleArray",
+ "suffix" : "DD"},
+ "Point3i" : { "j_type" : "Point3", "jn_args" : (("double", ".x"), ("double", ".y"), ("double", ".z")),
+ "jni_var" : "Point3i %(n)s((int)%(n)s_x, (int)%(n)s_y, (int)%(n)s_z)", "jni_type" : "jdoubleArray",
+ "suffix" : "DDD"},
+ "Point3f" : { "j_type" : "Point3", "jn_args" : (("double", ".x"), ("double", ".y"), ("double", ".z")),
+ "jni_var" : "Point3f %(n)s((float)%(n)s_x, (float)%(n)s_y, (float)%(n)s_z)", "jni_type" : "jdoubleArray",
+ "suffix" : "DDD"},
+ "Point3d" : { "j_type" : "Point3", "jn_args" : (("double", ".x"), ("double", ".y"), ("double", ".z")),
+ "jni_var" : "Point3d %(n)s(%(n)s_x, %(n)s_y, %(n)s_z)", "jni_type" : "jdoubleArray",
+ "suffix" : "DDD"},
+ "KeyPoint": { "j_type" : "KeyPoint", "jn_args" : (("float", ".x"), ("float", ".y"), ("float", ".size"),
+ ("float", ".angle"), ("float", ".response"), ("int", ".octave"), ("int", ".class_id")),
+ "jni_var" : "KeyPoint %(n)s(%(n)s_x, %(n)s_y, %(n)s_size, %(n)s_angle, %(n)s_response, %(n)s_octave, %(n)s_class_id)",
+ "jni_type" : "jdoubleArray",
+ "suffix" : "FFFFFII"},
+ "DMatch" : { "j_type" : "DMatch", "jn_args" : ( ('int', 'queryIdx'), ('int', 'trainIdx'),
+ ('int', 'imgIdx'), ('float', 'distance'), ),
+ "jni_var" : "DMatch %(n)s(%(n)s_queryIdx, %(n)s_trainIdx, %(n)s_imgIdx, %(n)s_distance)",
+ "jni_type" : "jdoubleArray",
+ "suffix" : "IIIF"},
+ "Rect" : { "j_type" : "Rect", "jn_args" : (("int", ".x"), ("int", ".y"), ("int", ".width"), ("int", ".height")),
+ "jni_var" : "Rect %(n)s(%(n)s_x, %(n)s_y, %(n)s_width, %(n)s_height)", "jni_type" : "jdoubleArray",
+ "suffix" : "IIII"},
+ "Size" : { "j_type" : "Size", "jn_args" : (("double", ".width"), ("double", ".height")),
+ "jni_var" : "Size %(n)s((int)%(n)s_width, (int)%(n)s_height)", "jni_type" : "jdoubleArray",
+ "suffix" : "DD"},
+ "Size2f" : { "j_type" : "Size", "jn_args" : (("double", ".width"), ("double", ".height")),
+ "jni_var" : "Size2f %(n)s((float)%(n)s_width, (float)%(n)s_height)", "jni_type" : "jdoubleArray",
+ "suffix" : "DD"},
+ "RotatedRect": { "j_type" : "RotatedRect", "jn_args" : (("double", ".center.x"), ("double", ".center.y"), ("double", ".size.width"), ("double", ".size.height"), ("double", ".angle")),
+ "jni_var" : "RotatedRect %(n)s(cv::Point2f(%(n)s_center_x, %(n)s_center_y), cv::Size2f(%(n)s_size_width, %(n)s_size_height), %(n)s_angle)",
+ "jni_type" : "jdoubleArray", "suffix" : "DDDDD"},
+ "Scalar" : { "j_type" : "Scalar", "jn_args" : (("double", ".val[0]"), ("double", ".val[1]"), ("double", ".val[2]"), ("double", ".val[3]")),
+ "jni_var" : "Scalar %(n)s(%(n)s_val0, %(n)s_val1, %(n)s_val2, %(n)s_val3)", "jni_type" : "jdoubleArray",
+ "suffix" : "DDDD"},
+ "Range" : { "j_type" : "Range", "jn_args" : (("int", ".start"), ("int", ".end")),
+ "jni_var" : "Range %(n)s(%(n)s_start, %(n)s_end)", "jni_type" : "jdoubleArray",
+ "suffix" : "II"},
+ "CvSlice" : { "j_type" : "Range", "jn_args" : (("int", ".start"), ("int", ".end")),
+ "jni_var" : "Range %(n)s(%(n)s_start, %(n)s_end)", "jni_type" : "jdoubleArray",
+ "suffix" : "II"},
+ "string" : { "j_type" : "String", "jn_type" : "String",
+ "jni_type" : "jstring", "jni_name" : "n_%(n)s",
+ "jni_var" : 'const char* utf_%(n)s = env->GetStringUTFChars(%(n)s, 0); std::string n_%(n)s( utf_%(n)s ? utf_%(n)s : "" ); env->ReleaseStringUTFChars(%(n)s, utf_%(n)s)',
+ "suffix" : "Ljava_lang_String_2"},
+ "String" : { "j_type" : "String", "jn_type" : "String",
+ "jni_type" : "jstring", "jni_name" : "n_%(n)s",
+ "jni_var" : 'const char* utf_%(n)s = env->GetStringUTFChars(%(n)s, 0); String n_%(n)s( utf_%(n)s ? utf_%(n)s : "" ); env->ReleaseStringUTFChars(%(n)s, utf_%(n)s)',
+ "suffix" : "Ljava_lang_String_2"},
+ "c_string": { "j_type" : "String", "jn_type" : "String",
+ "jni_type" : "jstring", "jni_name" : "n_%(n)s.c_str()",
+ "jni_var" : 'const char* utf_%(n)s = env->GetStringUTFChars(%(n)s, 0); std::string n_%(n)s( utf_%(n)s ? utf_%(n)s : "" ); env->ReleaseStringUTFChars(%(n)s, utf_%(n)s)',
+ "suffix" : "Ljava_lang_String_2"},
+"TermCriteria": { "j_type" : "TermCriteria", "jn_args" : (("int", ".type"), ("int", ".maxCount"), ("double", ".epsilon")),
+ "jni_var" : "TermCriteria %(n)s(%(n)s_type, %(n)s_maxCount, %(n)s_epsilon)", "jni_type" : "jdoubleArray",
+ "suffix" : "IID"},
+"CvTermCriteria": { "j_type" : "TermCriteria", "jn_args" : (("int", ".type"), ("int", ".maxCount"), ("double", ".epsilon")),
+ "jni_var" : "TermCriteria %(n)s(%(n)s_type, %(n)s_maxCount, %(n)s_epsilon)", "jni_type" : "jdoubleArray",
+ "suffix" : "IID"},
+ "Vec2d" : { "j_type" : "double[]", "jn_args" : (("double", ".val[0]"), ("double", ".val[1]")),
+ "jn_type" : "double[]",
+ "jni_var" : "Vec2d %(n)s(%(n)s_val0, %(n)s_val1)", "jni_type" : "jdoubleArray",
+ "suffix" : "DD"},
+ "Vec3d" : { "j_type" : "double[]", "jn_args" : (("double", ".val[0]"), ("double", ".val[1]"), ("double", ".val[2]")),
+ "jn_type" : "double[]",
+ "jni_var" : "Vec3d %(n)s(%(n)s_val0, %(n)s_val1, %(n)s_val2)", "jni_type" : "jdoubleArray",
+ "suffix" : "DDD"},
+
+}
+
+# { class : { func : {j_code, jn_code, cpp_code} } }
+ManualFuncs = {
+ 'Core' :
+ {
+ 'minMaxLoc' : {
+ 'j_code' : """
+ // manual port
+ public static class MinMaxLocResult {
+ public double minVal;
+ public double maxVal;
+ public Point minLoc;
+ public Point maxLoc;
+
+ public MinMaxLocResult() {
+ minVal=0; maxVal=0;
+ minLoc=new Point();
+ maxLoc=new Point();
+ }
+ }
+
+ // C++: minMaxLoc(Mat src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, InputArray mask=noArray())
+
+ //javadoc: minMaxLoc(src, mask)
+ public static MinMaxLocResult minMaxLoc(Mat src, Mat mask) {
+ MinMaxLocResult res = new MinMaxLocResult();
+ long maskNativeObj=0;
+ if (mask != null) {
+ maskNativeObj=mask.nativeObj;
+ }
+ double resarr[] = n_minMaxLocManual(src.nativeObj, maskNativeObj);
+ res.minVal=resarr[0];
+ res.maxVal=resarr[1];
+ res.minLoc.x=resarr[2];
+ res.minLoc.y=resarr[3];
+ res.maxLoc.x=resarr[4];
+ res.maxLoc.y=resarr[5];
+ return res;
+ }
+
+ //javadoc: minMaxLoc(src)
+ public static MinMaxLocResult minMaxLoc(Mat src) {
+ return minMaxLoc(src, null);
+ }
+
+""",
+ 'jn_code' :
+""" private static native double[] n_minMaxLocManual(long src_nativeObj, long mask_nativeObj);\n""",
+ 'cpp_code' :
+"""
+// C++: minMaxLoc(Mat src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0, InputArray mask=noArray())
+
+JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Core_n_1minMaxLocManual
+ (JNIEnv* env, jclass cls, jlong src_nativeObj, jlong mask_nativeObj)
+{
+ try {
+ LOGD("Core::n_1minMaxLoc()");
+ jdoubleArray result;
+ result = env->NewDoubleArray(6);
+ if (result == NULL) {
+ return NULL; /* out of memory error thrown */
+ }
+
+ Mat& src = *((Mat*)src_nativeObj);
+
+ double minVal, maxVal;
+ Point minLoc, maxLoc;
+ if (mask_nativeObj != 0) {
+ Mat& mask = *((Mat*)mask_nativeObj);
+ minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc, mask);
+ } else {
+ minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc);
+ }
+
+ jdouble fill[6];
+ fill[0]=minVal;
+ fill[1]=maxVal;
+ fill[2]=minLoc.x;
+ fill[3]=minLoc.y;
+ fill[4]=maxLoc.x;
+ fill[5]=maxLoc.y;
+
+ env->SetDoubleArrayRegion(result, 0, 6, fill);
+
+ return result;
+
+ } catch(cv::Exception e) {
+ LOGD("Core::n_1minMaxLoc() catched cv::Exception: %s", e.what());
+ jclass je = env->FindClass("org/opencv/core/CvException");
+ if(!je) je = env->FindClass("java/lang/Exception");
+ env->ThrowNew(je, e.what());
+ return NULL;
+ } catch (...) {
+ LOGD("Core::n_1minMaxLoc() catched unknown exception (...)");
+ jclass je = env->FindClass("java/lang/Exception");
+ env->ThrowNew(je, "Unknown exception in JNI code {core::minMaxLoc()}");
+ return NULL;
+ }
+}
+
+""",
+ }, # minMaxLoc
+
+ 'getTextSize' :
+ {
+ 'j_code' :
+"""
+ // C++: Size getTextSize(const string& text, int fontFace, double fontScale, int thickness, int* baseLine);
+ //javadoc:getTextSize(text, fontFace, fontScale, thickness, baseLine)
+ public static Size getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine) {
+ if(baseLine != null && baseLine.length != 1)
+ throw new java.lang.IllegalArgumentException("'baseLine' must be 'int[1]' or 'null'.");
+ Size retVal = new Size(n_getTextSize(text, fontFace, fontScale, thickness, baseLine));
+ return retVal;
+ }
+""",
+ 'jn_code' :
+""" private static native double[] n_getTextSize(String text, int fontFace, double fontScale, int thickness, int[] baseLine);\n""",
+ 'cpp_code' :
+"""
+// C++: Size getTextSize(const string& text, int fontFace, double fontScale, int thickness, int* baseLine);
+
+JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Core_n_1getTextSize
+ (JNIEnv* env, jclass cls, jstring text, jint fontFace, jdouble fontScale, jint thickness, jintArray baseLine)
+{
+ try {
+ LOGD("Core::n_1getTextSize()");
+ jdoubleArray result;
+ result = env->NewDoubleArray(2);
+ if (result == NULL) {
+ return NULL; /* out of memory error thrown */
+ }
+
+ const char* utf_text = env->GetStringUTFChars(text, 0);
+ std::string n_text( utf_text ? utf_text : "" );
+ env->ReleaseStringUTFChars(text, utf_text);
+
+ int _baseLine;
+ int* pbaseLine = 0;
+
+ if (baseLine != NULL)
+ pbaseLine = &_baseLine;
+
+ cv::Size rsize = cv::getTextSize(n_text, (int)fontFace, (double)fontScale, (int)thickness, pbaseLine);
+
+ jdouble fill[2];
+ fill[0]=rsize.width;
+ fill[1]=rsize.height;
+
+ env->SetDoubleArrayRegion(result, 0, 2, fill);
+
+ if (baseLine != NULL)
+ env->SetIntArrayRegion(baseLine, 0, 1, pbaseLine);
+
+ return result;
+
+ } catch(cv::Exception e) {
+ LOGD("Core::n_1getTextSize() catched cv::Exception: %s", e.what());
+ jclass je = env->FindClass("org/opencv/core/CvException");
+ if(!je) je = env->FindClass("java/lang/Exception");
+ env->ThrowNew(je, e.what());
+ return NULL;
+ } catch (...) {
+ LOGD("Core::n_1getTextSize() catched unknown exception (...)");
+ jclass je = env->FindClass("java/lang/Exception");
+ env->ThrowNew(je, "Unknown exception in JNI code {core::getTextSize()}");
+ return NULL;
+ }
+}
+
+""",
+ }, # getTextSize
+## "checkRange" : #TBD
+## {'j_code' : '/* TBD: checkRange() */', 'jn_code' : '', 'cpp_code' : '' },
+
+ "checkHardwareSupport" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "setUseOptimized" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "useOptimized" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+
+ }, # Core
+
+ 'Highgui' :
+ {
+ "namedWindow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "destroyWindow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "destroyAllWindows" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "startWindowThread" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "setWindowProperty" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "getWindowProperty" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "getTrackbarPos" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "setTrackbarPos" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "imshow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "waitKey" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "moveWindow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ "resizeWindow" : {'j_code' : '', 'jn_code' : '', 'cpp_code' : '' },
+ }, # Highgui
+
+}
+
+# { class : { func : {arg_name : ctype} } }
+func_arg_fix = {
+ '' : {
+ 'randu' : { 'low' : 'double', 'high' : 'double', },
+ 'randn' : { 'mean' : 'double', 'stddev' : 'double', },
+ 'inRange' : { 'lowerb' : 'Scalar', 'upperb' : 'Scalar', },
+ 'goodFeaturesToTrack' : { 'corners' : 'vector_Point', },
+ 'findFundamentalMat' : { 'points1' : 'vector_Point2f', 'points2' : 'vector_Point2f', },
+ 'cornerSubPix' : { 'corners' : 'vector_Point2f', },
+ 'minEnclosingCircle' : { 'points' : 'vector_Point2f', },
+ 'findHomography' : { 'srcPoints' : 'vector_Point2f', 'dstPoints' : 'vector_Point2f', },
+ 'solvePnP' : { 'objectPoints' : 'vector_Point3f', 'imagePoints' : 'vector_Point2f',
+ 'distCoeffs' : 'vector_double' },
+ 'solvePnPRansac' : { 'objectPoints' : 'vector_Point3f', 'imagePoints' : 'vector_Point2f',
+ 'distCoeffs' : 'vector_double' },
+ 'calcOpticalFlowPyrLK' : { 'prevPts' : 'vector_Point2f', 'nextPts' : 'vector_Point2f',
+ 'status' : 'vector_uchar', 'err' : 'vector_float', },
+ 'fitEllipse' : { 'points' : 'vector_Point2f', },
+ 'fillPoly' : { 'pts' : 'vector_vector_Point', },
+ 'polylines' : { 'pts' : 'vector_vector_Point', },
+ 'fillConvexPoly' : { 'points' : 'vector_Point', },
+ 'boundingRect' : { 'points' : 'vector_Point', },
+ 'approxPolyDP' : { 'curve' : 'vector_Point2f', 'approxCurve' : 'vector_Point2f', },
+ 'arcLength' : { 'curve' : 'vector_Point2f', },
+ 'pointPolygonTest' : { 'contour' : 'vector_Point2f', },
+ 'minAreaRect' : { 'points' : 'vector_Point2f', },
+ 'getAffineTransform' : { 'src' : 'vector_Point2f', 'dst' : 'vector_Point2f', },
+ 'hconcat' : { 'src' : 'vector_Mat', },
+ 'vconcat' : { 'src' : 'vector_Mat', },
+ 'undistortPoints' : { 'src' : 'vector_Point2f', 'dst' : 'vector_Point2f' },
+ 'checkRange' : {'pos' : '*'},
+ 'meanStdDev' : {'mean' : 'vector_double', 'stddev' : 'vector_double'},
+ 'drawContours' : {'contours' : 'vector_vector_Point'},
+ 'findContours' : {'contours' : 'vector_vector_Point'},
+ 'convexityDefects' : {'contour' : 'vector_Point', 'convexhull' : 'vector_int', 'convexityDefects' : 'vector_Vec4i'},
+ 'isContourConvex' : { 'contour' : 'vector_Point', },
+ 'convexHull' : {'points' : 'vector_Point', 'hull' : 'vector_int', 'returnPoints' : ''},
+ 'projectPoints' : { 'objectPoints' : 'vector_Point3f', 'imagePoints' : 'vector_Point2f',
+ 'distCoeffs' : 'vector_double' },
+ 'initCameraMatrix2D' : {'objectPoints' : 'vector_vector_Point3f', 'imagePoints' : 'vector_vector_Point2f', },
+ 'findChessboardCorners' : { 'corners' : 'vector_Point2f' },
+ 'drawChessboardCorners' : { 'corners' : 'vector_Point2f' },
+ }, # '', i.e. no class
+} # func_arg_fix
+
+class ConstInfo(object):
+ def __init__(self, cname, name, val, addedManually=False):
+ self.cname = cname
+ self.name = re.sub(r"^Cv", "", name)
+ self.value = val
+ self.addedManually = addedManually
+
+
+class ClassPropInfo(object):
+ def __init__(self, decl): # [f_ctype, f_name, '', '/RW']
+ self.ctype = decl[0]
+ self.name = decl[1]
+ self.rw = "/RW" in decl[3]
+
+class ClassInfo(object):
+ def __init__(self, decl): # [ 'class/struct cname', ': base', [modlist] ]
+ name = decl[0]
+ name = name[name.find(" ")+1:].strip()
+ self.cname = self.name = self.jname = re.sub(r"^cv\.", "", name)
+ self.cname = self.cname.replace(".", "::")
+ self.methods = {}
+ self.methods_suffixes = {}
+ self.consts = [] # using a list to save the occurence order
+ self.private_consts = []
+ self.imports = set()
+ self.props= []
+ self.jname = self.name
+ for m in decl[2]:
+ if m.startswith("="):
+ self.jname = m[1:]
+ self.base = ''
+ if decl[1]:
+ self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip()
+
+
+
+class ArgInfo(object):
+ def __init__(self, arg_tuple): # [ ctype, name, def val, [mod], argno ]
+ self.pointer = False
+ ctype = arg_tuple[0]
+ if ctype.endswith("*"):
+ ctype = ctype[:-1]
+ self.pointer = True
+ if ctype == 'vector_Point2d':
+ ctype = 'vector_Point2f'
+ elif ctype == 'vector_Point3d':
+ ctype = 'vector_Point3f'
+ self.ctype = ctype
+ self.name = arg_tuple[1]
+ self.defval = arg_tuple[2]
+ self.out = ""
+ if "/O" in arg_tuple[3]:
+ self.out = "O"
+ if "/IO" in arg_tuple[3]:
+ self.out = "IO"
+
+
+class FuncInfo(object):
+ def __init__(self, decl): # [ funcname, return_ctype, [modifiers], [args] ]
+ name = re.sub(r"^cv\.", "", decl[0])
+ self.cname = name.replace(".", "::")
+ classname = ""
+ dpos = name.rfind(".")
+ if dpos >= 0:
+ classname = name[:dpos]
+ name = name[dpos+1:]
+ self.classname = classname
+ self.jname = self.name = name
+ if "[" in name:
+ self.jname = "getelem"
+ for m in decl[2]:
+ if m.startswith("="):
+ self.jname = m[1:]
+ self.static = ["","static"][ "/S" in decl[2] ]
+ self.ctype = re.sub(r"^CvTermCriteria", "TermCriteria", decl[1] or "")
+ self.args = []
+ arg_fix_map = func_arg_fix.get(classname, {}).get(self.jname, {})
+ for a in decl[3]:
+ arg = a[:]
+ arg[0] = arg_fix_map.get(arg[1], arg[0])
+ ai = ArgInfo(arg)
+ self.args.append(ai)
+
+
+
+class FuncFamilyInfo(object):
+ def __init__(self, decl): # [ funcname, return_ctype, [modifiers], [args] ]
+ self.funcs = []
+ self.funcs.append( FuncInfo(decl) )
+ self.jname = self.funcs[0].jname
+ self.isconstructor = self.funcs[0].name == self.funcs[0].classname
+
+
+
+ def add_func(self, fi):
+ self.funcs.append( fi )
+
+
+class JavaWrapperGenerator(object):
+ def __init__(self):
+ self.clear()
+
+ def clear(self):
+ self.classes = { "Mat" : ClassInfo([ 'class Mat', '', [], [] ]) }
+ self.module = ""
+ self.Module = ""
+ self.java_code= {} # { class : {j_code, jn_code} }
+ self.cpp_code = None
+ self.ported_func_list = []
+ self.skipped_func_list = []
+ self.def_args_hist = {} # { def_args_cnt : funcs_cnt }
+ self.classes_map = []
+ self.classes_simple = []
+
+ def add_class_code_stream(self, class_name, cls_base = ''):
+ jname = self.classes[class_name].jname
+ self.java_code[class_name] = { "j_code" : StringIO(), "jn_code" : StringIO(), }
+ if class_name != self.Module:
+ if cls_base:
+ self.java_code[class_name]["j_code"].write("""
+//
+// This file is auto-generated. Please don't modify it!
+//
+package org.opencv.%(m)s;
+
+$imports
+
+// C++: class %(c)s
+//javadoc: %(c)s
+public class %(jc)s extends %(base)s {
+
+ protected %(jc)s(long addr) { super(addr); }
+
+""" % { 'm' : self.module, 'c' : class_name, 'jc' : jname, 'base' : cls_base })
+ else: # not cls_base
+ self.java_code[class_name]["j_code"].write("""
+//
+// This file is auto-generated. Please don't modify it!
+//
+package org.opencv.%(m)s;
+
+$imports
+
+// C++: class %(c)s
+//javadoc: %(c)s
+public class %(jc)s {
+
+ protected final long nativeObj;
+ protected %(jc)s(long addr) { nativeObj = addr; }
+
+""" % { 'm' : self.module, 'c' : class_name, 'jc' : jname })
+ else: # class_name == self.Module
+ self.java_code[class_name]["j_code"].write("""
+//
+// This file is auto-generated. Please don't modify it!
+//
+package org.opencv.%(m)s;
+
+$imports
+
+public class %(jc)s {
+""" % { 'm' : self.module, 'jc' : jname } )
+
+ self.java_code[class_name]["jn_code"].write("""
+ //
+ // native stuff
+ //
+ static { System.loadLibrary("opencv_java"); }
+""" )
+
+
+
+ def add_class(self, decl):
+ classinfo = ClassInfo(decl)
+ if classinfo.name in class_ignore_list:
+ return
+ name = classinfo.name
+ if name in self.classes:
+ print "Generator error: class %s (%s) is duplicated" % \
+ (name, classinfo.cname)
+ return
+ self.classes[name] = classinfo
+ if name in type_dict:
+ print "Duplicated class: " + name
+ return
+ if '/Simple' in decl[2]:
+ self.classes_simple.append(name)
+ if ('/Map' in decl[2]):
+ self.classes_map.append(name)
+ #adding default c-tor
+ ffi = FuncFamilyInfo(['cv.'+name+'.'+name, '', [], []])
+ classinfo.methods[ffi.jname] = ffi
+ type_dict[name] = \
+ { "j_type" : classinfo.jname,
+ "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),
+ "jni_name" : "(*("+name+"*)%(n)s_nativeObj)", "jni_type" : "jlong",
+ "suffix" : "J" }
+ type_dict[name+'*'] = \
+ { "j_type" : classinfo.jname,
+ "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),),
+ "jni_name" : "("+name+"*)%(n)s_nativeObj", "jni_type" : "jlong",
+ "suffix" : "J" }
+
+ # missing_consts { Module : { public : [[name, val],...], private : [[]...] } }
+ if name in missing_consts:
+ if 'private' in missing_consts[name]:
+ for (n, val) in missing_consts[name]['private']:
+ classinfo.private_consts.append( ConstInfo(n, n, val, True) )
+ if 'public' in missing_consts[name]:
+ for (n, val) in missing_consts[name]['public']:
+ classinfo.consts.append( ConstInfo(n, n, val, True) )
+
+ # class props
+ for p in decl[3]:
+ if True: #"vector" not in p[0]:
+ classinfo.props.append( ClassPropInfo(p) )
+ else:
+ print "Skipped property: [%s]" % name, p
+
+ self.add_class_code_stream(name, classinfo.base)
+ if classinfo.base:
+ self.get_imports(name, classinfo.base)
+
+
+ def add_const(self, decl): # [ "const cname", val, [], [] ]
+ name = decl[0].replace("const ", "").strip()
+ name = re.sub(r"^cv\.", "", name)
+ cname = name.replace(".", "::")
+ for c in const_ignore_list:
+ if re.match(c, name):
+ return
+ # class member?
+ dpos = name.rfind(".")
+ if dpos >= 0:
+ classname = name[:dpos]
+ name = name[dpos+1:]
+ else:
+ classname = self.Module
+ if classname not in self.classes:
+ # this class isn't wrapped
+ # skipping this const
+ return
+
+ consts = self.classes[classname].consts
+ for c in const_private_list:
+ if re.match(c, name):
+ consts = self.classes[classname].private_consts
+ break
+
+ constinfo = ConstInfo(cname, name, decl[1])
+ # checking duplication
+ for list in self.classes[classname].consts, self.classes[classname].private_consts:
+ for c in list:
+ if c.name == constinfo.name:
+ if c.addedManually:
+ return
+ print "Generator error: constant %s (%s) is duplicated" \
+ % (constinfo.name, constinfo.cname)
+ sys.exit(-1)
+
+ consts.append(constinfo)
+
+ def add_func(self, decl):
+ ffi = FuncFamilyInfo(decl)
+ classname = ffi.funcs[0].classname or self.Module
+ if classname in class_ignore_list:
+ return
+ if classname in ManualFuncs and ffi.jname in ManualFuncs[classname]:
+ return
+ if classname not in self.classes:
+ print "Generator error: the class %s for method %s is missing" % \
+ (classname, ffi.jname)
+ sys.exit(-1)
+ func_map = self.classes[classname].methods
+ if ffi.jname in func_map:
+ func_map[ffi.jname].add_func(ffi.funcs[0])
+ else:
+ func_map[ffi.jname] = ffi
+ # calc args with def val
+ cnt = len([a for a in ffi.funcs[0].args if a.defval])
+ self.def_args_hist[cnt] = self.def_args_hist.get(cnt, 0) + 1
+
+ def save(self, path, buf):
+ f = open(path, "wt")
+ f.write(buf)
+ f.close()
+
+ def gen(self, srcfiles, module, output_path):
+ self.clear()
+ self.module = module
+ self.Module = module.capitalize()
+ parser = hdr_parser.CppHeaderParser()
+
+ self.add_class( ['class ' + self.Module, '', [], []] ) # [ 'class/struct cname', ':bases', [modlist] [props] ]
+
+ # scan the headers and build more descriptive maps of classes, consts, functions
+ for hdr in srcfiles:
+ decls = parser.parse(hdr)
+ for decl in decls:
+ name = decl[0]
+ if name.startswith("struct") or name.startswith("class"):
+ self.add_class(decl)
+ elif name.startswith("const"):
+ self.add_const(decl)
+ else: # function
+ self.add_func(decl)
+
+ self.cpp_code = StringIO()
+ self.cpp_code.write("""
+//
+// This file is auto-generated, please don't edit!
+//
+
+#include <jni.h>
+
+#include "converters.h"
+
+#ifdef DEBUG
+#include <android/log.h>
+#define MODULE_LOG_TAG "OpenCV.%(m)s"
+#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, MODULE_LOG_TAG, __VA_ARGS__))
+#else //DEBUG
+#define LOGD(...)
+#endif //DEBUG
+
+#include "opencv2/%(m)s/%(m)s.hpp"
+
+using namespace cv;
+
+extern "C" {
+
+""" % {'m' : module} )
+
+ # generate code for the classes
+ for name in self.classes.keys():
+ if name == "Mat":
+ continue
+ self.gen_class(name)
+ # saving code streams
+ imports = "\n".join([ "import %s;" % c for c in \
+ sorted(self.classes[name].imports) if not c.startswith('org.opencv.'+self.module) ])
+ self.java_code[name]["j_code"].write("\n\n%s\n}\n" % self.java_code[name]["jn_code"].getvalue())
+ java_code = self.java_code[name]["j_code"].getvalue()
+ java_code = Template(java_code).substitute(imports = imports)
+ self.save("%s/%s+%s.java" % (output_path, module, self.classes[name].jname), java_code)
+
+ self.cpp_code.write( '\n} // extern "C"\n' )
+ self.save(output_path+"/"+module+".cpp", self.cpp_code.getvalue())
+
+ # report
+ report = StringIO()
+ report.write("PORTED FUNCs LIST (%i of %i):\n\n" % \
+ (len(self.ported_func_list), len(self.ported_func_list)+ len(self.skipped_func_list))
+ )
+ report.write("\n".join(self.ported_func_list))
+ report.write("\n\nSKIPPED FUNCs LIST (%i of %i):\n\n" % \
+ (len(self.skipped_func_list), len(self.ported_func_list)+ len(self.skipped_func_list))
+ )
+ report.write("".join(self.skipped_func_list))
+
+ for i in self.def_args_hist.keys():
+ report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i]))
+
+ report.write("\n\nclass as MAP:\n\t" + "\n\t".join(self.classes_map))
+ report.write("\n\nclass SIMPLE:\n\t" + "\n\t".join(self.classes_simple))
+
+ self.save(output_path+"/"+module+".txt", report.getvalue())
+
+ print "Done %i of %i funcs." % (len(self.ported_func_list), len(self.ported_func_list)+ len(self.skipped_func_list))
+
+
+
+ def get_imports(self, scope_classname, ctype):
+ imports = self.classes[scope_classname or self.Module].imports
+ if ctype.startswith('vector_vector'):
+ imports.add("org.opencv.core.Mat")
+ imports.add("java.util.List")
+ imports.add("org.opencv.utils.Converters")
+ self.get_imports(scope_classname, ctype.replace('vector_vector', 'vector'))
+ return
+ if ctype.startswith('vector'):
+ imports.add("org.opencv.core.Mat")
+ if type_dict[ctype]['j_type'].startswith('MatOf'):
+ imports.add("org.opencv.core." + type_dict[ctype]['j_type'])
+ return
+ else:
+ imports.add("java.util.List")
+ imports.add("org.opencv.utils.Converters")
+ self.get_imports(scope_classname, ctype.replace('vector_', ''))
+ return
+ j_type = ''
+ if ctype in type_dict:
+ j_type = type_dict[ctype]['j_type']
+ elif ctype in ("Algorithm"):
+ j_type = ctype
+ if j_type in ( "CvType", "Mat", "Point", "Point3", "Range", "Rect", "RotatedRect", "Scalar", "Size", "TermCriteria", "Algorithm" ):
+ imports.add("org.opencv.core." + j_type)
+ if j_type == 'String':
+ imports.add("java.lang.String")
+ return
+
+
+
+ def gen_func(self, fi, prop_name=''):
+ j_code = self.java_code[fi.classname or self.Module]["j_code"]
+ jn_code = self.java_code[fi.classname or self.Module]["jn_code"]
+ cpp_code = self.cpp_code
+
+ # c_decl
+ # e.g: void add(Mat src1, Mat src2, Mat dst, Mat mask = Mat(), int dtype = -1)
+ if prop_name:
+ c_decl = "%s %s::%s" % (fi.ctype, fi.classname, prop_name)
+ else:
+ decl_args = []
+ for a in fi.args:
+ s = a.ctype or ' _hidden_ '
+ if a.pointer:
+ s += "*"
+ elif a.out:
+ s += "&"
+ s += " " + a.name
+ if a.defval:
+ s += " = "+a.defval
+ decl_args.append(s)
+ c_decl = "%s %s %s(%s)" % ( fi.static, fi.ctype, fi.cname, ", ".join(decl_args) )
+
+ # java comment
+ j_code.write( "\n //\n // C++: %s\n //\n\n" % c_decl )
+ # check if we 'know' all the types
+ if fi.ctype not in type_dict: # unsupported ret type
+ msg = "// Return type '%s' is not supported, skipping the function\n\n" % fi.ctype
+ self.skipped_func_list.append(c_decl + "\n" + msg)
+ j_code.write( " "*4 + msg )
+ print "SKIP:", c_decl, "\n\tdue to RET type", fi.ctype
+ return
+ for a in fi.args:
+ if a.ctype not in type_dict:
+ msg = "// Unknown type '%s' (%s), skipping the function\n\n" % (a.ctype, a.out or "I")
+ self.skipped_func_list.append(c_decl + "\n" + msg)
+ j_code.write( " "*4 + msg )
+ print "SKIP:", c_decl, "\n\tdue to ARG type", a.ctype, "/" + (a.out or "I")
+ return
+
+ self.ported_func_list.append(c_decl)
+
+ # jn & cpp comment
+ jn_code.write( "\n // C++: %s\n" % c_decl )
+ cpp_code.write( "\n//\n// %s\n//\n" % c_decl )
+
+ # java args
+ args = fi.args[:] # copy
+ suffix_counter = int( self.classes[fi.classname or self.Module].methods_suffixes.get(fi.jname, -1) )
+ while True:
+ suffix_counter += 1
+ self.classes[fi.classname or self.Module].methods_suffixes[fi.jname] = suffix_counter
+ # java native method args
+ jn_args = []
+ # jni (cpp) function args
+ jni_args = [ArgInfo([ "env", "env", "", [], "" ]), ArgInfo([ "cls", "cls", "", [], "" ])]
+ j_prologue = []
+ j_epilogue = []
+ c_prologue = []
+ c_epilogue = []
+ if type_dict[fi.ctype]["jni_type"] == "jdoubleArray":
+ fields = type_dict[fi.ctype]["jn_args"]
+ c_epilogue.append( \
+ ("jdoubleArray _da_retval_ = env->NewDoubleArray(%(cnt)i); " +
+ "jdouble _tmp_retval_[%(cnt)i] = {%(args)s}; " +
+ "env->SetDoubleArrayRegion(_da_retval_, 0, %(cnt)i, _tmp_retval_);") %
+ { "cnt" : len(fields), "args" : ", ".join(["_retval_" + f[1] for f in fields]) } )
+ if fi.classname and fi.ctype and not fi.static: # non-static class method except c-tor
+ # adding 'self'
+ jn_args.append ( ArgInfo([ "__int64", "nativeObj", "", [], "" ]) )
+ jni_args.append( ArgInfo([ "__int64", "self", "", [], "" ]) )
+ self.get_imports(fi.classname, fi.ctype)
+ for a in args:
+ if not a.ctype: # hidden
+ continue
+ self.get_imports(fi.classname, a.ctype)
+ if "vector" in a.ctype: # pass as Mat
+ jn_args.append ( ArgInfo([ "__int64", "%s_mat.nativeObj" % a.name, "", [], "" ]) )
+ jni_args.append ( ArgInfo([ "__int64", "%s_mat_nativeObj" % a.name, "", [], "" ]) )
+ c_prologue.append( type_dict[a.ctype]["jni_var"] % {"n" : a.name} + ";" )
+ c_prologue.append( "Mat& %(n)s_mat = *((Mat*)%(n)s_mat_nativeObj)" % {"n" : a.name} + ";" )
+ if "I" in a.out or not a.out:
+ if a.ctype.startswith("vector_vector_"):
+ self.classes[fi.classname or self.Module].imports.add("java.util.ArrayList")
+ j_prologue.append( "List<Mat> %(n)s_tmplm = new ArrayList<Mat>((%(n)s != null) ? %(n)s.size() : 0);" % {"n" : a.name } )
+ j_prologue.append( "Mat %(n)s_mat = Converters.%(t)s_to_Mat(%(n)s, %(n)s_tmplm);" % {"n" : a.name, "t" : a.ctype} )
+ else:
+ if not type_dict[a.ctype]["j_type"].startswith("MatOf"):
+ j_prologue.append( "Mat %(n)s_mat = Converters.%(t)s_to_Mat(%(n)s);" % {"n" : a.name, "t" : a.ctype} )
+ else:
+ j_prologue.append( "Mat %s_mat = %s;" % (a.name, a.name) )
+ c_prologue.append( "Mat_to_%(t)s( %(n)s_mat, %(n)s );" % {"n" : a.name, "t" : a.ctype} )
+ else:
+ if not type_dict[a.ctype]["j_type"].startswith("MatOf"):
+ j_prologue.append( "Mat %s_mat = new Mat();" % a.name )
+ else:
+ j_prologue.append( "Mat %s_mat = %s;" % (a.name, a.name) )
+ if "O" in a.out:
+ if not type_dict[a.ctype]["j_type"].startswith("MatOf"):
+ j_epilogue.append("Converters.Mat_to_%(t)s(%(n)s_mat, %(n)s);" % {"t" : a.ctype, "n" : a.name})
+ c_epilogue.append( "%(t)s_to_Mat( %(n)s, %(n)s_mat );" % {"n" : a.name, "t" : a.ctype} )
+ else:
+ fields = type_dict[a.ctype].get("jn_args", ((a.ctype, ""),))
+ if "I" in a.out or not a.out or a.ctype in self.classes: # input arg, pass by primitive fields
+ for f in fields:
+ jn_args.append ( ArgInfo([ f[0], a.name + f[1], "", [], "" ]) )
+ jni_args.append( ArgInfo([ f[0], a.name + f[1].replace(".","_").replace("[","").replace("]",""), "", [], "" ]) )
+ if a.out and a.ctype not in self.classes: # out arg, pass as double[]
+ jn_args.append ( ArgInfo([ "double[]", "%s_out" % a.name, "", [], "" ]) )
+ jni_args.append ( ArgInfo([ "double[]", "%s_out" % a.name, "", [], "" ]) )
+ j_prologue.append( "double[] %s_out = new double[%i];" % (a.name, len(fields)) )
+ c_epilogue.append( \
+ "jdouble tmp_%(n)s[%(cnt)i] = {%(args)s}; env->SetDoubleArrayRegion(%(n)s_out, 0, %(cnt)i, tmp_%(n)s);" %
+ { "n" : a.name, "cnt" : len(fields), "args" : ", ".join([a.name + f[1] for f in fields]) } )
+ if a.ctype in ('bool', 'int', 'long', 'float', 'double'):
+ j_epilogue.append('if(%(n)s!=null) %(n)s[0] = (%(t)s)%(n)s_out[0];' % {'n':a.name,'t':a.ctype})
+ else:
+ set_vals = []
+ i = 0
+ for f in fields:
+ set_vals.append( "%(n)s%(f)s = %(t)s%(n)s_out[%(i)i]" %
+ {"n" : a.name, "t": ("("+type_dict[f[0]]["j_type"]+")", "")[f[0]=="double"], "f" : f[1], "i" : i}
+ )
+ i += 1
+ j_epilogue.append( "if("+a.name+"!=null){ " + "; ".join(set_vals) + "; } ")
+
+
+ # java part:
+ # private java NATIVE method decl
+ # e.g.
+ # private static native void add_0(long src1, long src2, long dst, long mask, int dtype);
+ jn_code.write( Template(\
+ " private static native $type $name($args);\n").substitute(\
+ type = type_dict[fi.ctype].get("jn_type", "double[]"), \
+ name = fi.jname + '_' + str(suffix_counter), \
+ args = ", ".join(["%s %s" % (type_dict[a.ctype]["jn_type"], a.name.replace(".","_").replace("[","").replace("]","")) for a in jn_args])
+ ) );
+
+ # java part:
+
+ #java doc comment
+ f_name = fi.name
+ if fi.classname:
+ f_name = fi.classname + "::" + fi.name
+ java_doc = "//javadoc: " + f_name + "(%s)" % ", ".join([a.name for a in args if a.ctype])
+ j_code.write(" "*4 + java_doc + "\n")
+
+ # public java wrapper method impl (calling native one above)
+ # e.g.
+ # public static void add( Mat src1, Mat src2, Mat dst, Mat mask, int dtype )
+ # { add_0( src1.nativeObj, src2.nativeObj, dst.nativeObj, mask.nativeObj, dtype ); }
+ ret_type = fi.ctype
+ if fi.ctype.endswith('*'):
+ ret_type = ret_type[:-1]
+ ret_val = type_dict[ret_type]["j_type"] + " retVal = "
+ tail = ""
+ ret = "return retVal;"
+ if ret_type.startswith('vector'):
+ tail = ")"
+ j_type = type_dict[ret_type]["j_type"]
+ if j_type.startswith('MatOf'):
+ ret_val += j_type + ".fromNativeAddr("
+ else:
+ ret_val = "Mat retValMat = new Mat("
+ j_prologue.append( j_type + ' retVal = new Array' + j_type+'();')
+ self.classes[fi.classname or self.Module].imports.add('java.util.ArrayList')
+ j_epilogue.append('Converters.Mat_to_' + ret_type + '(retValMat, retVal);')
+ elif ret_type == "void":
+ ret_val = ""
+ ret = "return;"
+ elif ret_type == "": # c-tor
+ if fi.classname and self.classes[fi.classname].base:
+ ret_val = "super( "
+ tail = " )"
+ else:
+ ret_val = "nativeObj = "
+ ret = "return;"
+ elif ret_type in self.classes: # wrapped class
+ ret_val = type_dict[ret_type]["j_type"] + " retVal = new " + self.classes[ret_type].jname + "("
+ tail = ")"
+ elif "jn_type" not in type_dict[ret_type]:
+ ret_val = type_dict[fi.ctype]["j_type"] + " retVal = new " + type_dict[ret_type]["j_type"] + "("
+ tail = ")"
+
+ static = "static"
+ if fi.classname:
+ static = fi.static
+
+ j_args = []
+ for a in args:
+ if not a.ctype: #hidden
+ continue
+ jt = type_dict[a.ctype]["j_type"]
+ if a.out and a.ctype in ('bool', 'int', 'long', 'float', 'double'):
+ jt += '[]'
+ j_args.append( jt + ' ' + a.name )
+
+ j_code.write( Template(\
+""" public $static $j_type $j_name($j_args)
+ {
+ $prologue
+ $ret_val$jn_name($jn_args_call)$tail;
+ $epilogue
+ $ret
+ }
+
+"""
+ ).substitute(\
+ ret = ret, \
+ ret_val = ret_val, \
+ tail = tail, \
+ prologue = "\n ".join(j_prologue), \
+ epilogue = "\n ".join(j_epilogue), \
+ static=static, \
+ j_type=type_dict[fi.ctype]["j_type"], \
+ j_name=fi.jname, \
+ j_args=", ".join(j_args), \
+ jn_name=fi.jname + '_' + str(suffix_counter), \
+ jn_args_call=", ".join( [a.name for a in jn_args] ),\
+ )
+ )
+
+
+ # cpp part:
+ # jni_func(..) { _retval_ = cv_func(..); return _retval_; }
+ ret = "return _retval_;"
+ default = "return 0;"
+ if fi.ctype == "void":
+ ret = "return;"
+ default = "return;"
+ elif not fi.ctype: # c-tor
+ ret = "return (jlong) _retval_;"
+ elif fi.ctype.startswith('vector'): # c-tor
+ ret = "return (jlong) _retval_;"
+ elif fi.ctype == "string":
+ ret = "return env->NewStringUTF(_retval_.c_str());"
+ default = 'return env->NewStringUTF("");'
+ elif fi.ctype in self.classes: # wrapped class:
+ ret = "return (jlong) new %s(_retval_);" % fi.ctype
+ elif ret_type in self.classes: # pointer to wrapped class:
+ ret = "return (jlong) _retval_;"
+ elif type_dict[fi.ctype]["jni_type"] == "jdoubleArray":
+ ret = "return _da_retval_;"
+
+ # hack: replacing func call with property set/get
+ name = fi.name
+ if prop_name:
+ if args:
+ name = prop_name + " = "
+ else:
+ name = prop_name + ";//"
+
+ cvname = "cv::" + name
+ retval = fi.ctype + " _retval_ = "
+ if fi.ctype == "void":
+ retval = ""
+ elif fi.ctype.startswith('vector'):
+ retval = type_dict[fi.ctype]['jni_var'] % {"n" : '_ret_val_vector_'} + " = "
+ c_epilogue.append("Mat* _retval_ = new Mat();")
+ c_epilogue.append(fi.ctype+"_to_Mat(_ret_val_vector_, *_retval_);")
+ if fi.classname:
+ if not fi.ctype: # c-tor
+ retval = fi.classname + "* _retval_ = "
+ cvname = "new " + fi.classname
+ elif fi.static:
+ cvname = "%s::%s" % (fi.classname, name)
+ else:
+ cvname = "me->" + name
+ c_prologue.append(\
+ "%(cls)s* me = (%(cls)s*) self; //TODO: check for NULL" \
+ % { "cls" : fi.classname} \
+ )
+ cvargs = []
+ for a in args:
+ if a.pointer:
+ jni_name = "&%(n)s"
+ else:
+ jni_name = "%(n)s"
+ if not a.ctype: # hidden
+ jni_name = a.defval
+ cvargs.append( type_dict[a.ctype].get("jni_name", jni_name) % {"n" : a.name})
+ if "vector" not in a.ctype :
+ if ("I" in a.out or not a.out or a.ctype in self.classes) and "jni_var" in type_dict[a.ctype]: # complex type
+ c_prologue.append(type_dict[a.ctype]["jni_var"] % {"n" : a.name} + ";")
+ if a.out and "I" not in a.out and a.ctype not in self.classes and a.ctype:
+ c_prologue.append("%s %s;" % (a.ctype, a.name))
+
+ rtype = type_dict[fi.ctype].get("jni_type", "jdoubleArray")
+ clazz = self.Module
+ if fi.classname:
+ clazz = self.classes[fi.classname].jname
+ cpp_code.write ( Template( \
+"""
+
+JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname
+ ($args)
+{
+ try {
+ LOGD("$module::$fname()");
+ $prologue
+ $retval$cvname( $cvargs );
+ $epilogue
+ $ret
+ } catch(cv::Exception e) {
+ LOGD("$module::$fname() catched cv::Exception: %s", e.what());
+ jclass je = env->FindClass("org/opencv/core/CvException");
+ if(!je) je = env->FindClass("java/lang/Exception");
+ env->ThrowNew(je, e.what());
+ $default
+ } catch (...) {
+ LOGD("$module::$fname() catched unknown exception (...)");
+ jclass je = env->FindClass("java/lang/Exception");
+ env->ThrowNew(je, "Unknown exception in JNI code {$module::$fname()}");
+ $default
+ }
+}
+
+
+""" ).substitute( \
+ rtype = rtype, \
+ module = self.module, \
+ clazz = clazz.replace('_', '_1'), \
+ fname = (fi.jname + '_' + str(suffix_counter)).replace('_', '_1'), \
+ args = ", ".join(["%s %s" % (type_dict[a.ctype].get("jni_type"), a.name) for a in jni_args]), \
+ prologue = "\n ".join(c_prologue), \
+ epilogue = " ".join(c_epilogue), \
+ ret = ret, \
+ cvname = cvname, \
+ cvargs = ", ".join(cvargs), \
+ default = default, \
+ retval = retval, \
+ ) )
+
+ # processing args with default values
+ if not args or not args[-1].defval:
+ break
+ while args and args[-1].defval:
+ # 'smart' overloads filtering
+ a = args.pop()
+ if a.name in ('mask', 'dtype', 'ddepth', 'lineType', 'borderType', 'borderMode', 'criteria'):
+ break
+
+
+
+ def gen_class(self, name):
+ # generate code for the class
+ ci = self.classes[name]
+ # constants
+ if ci.private_consts:
+ self.java_code[name]['j_code'].write("""
+ private static final int
+ %s;\n\n""" % (",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in ci.private_consts])
+ )
+ if ci.consts:
+ self.java_code[name]['j_code'].write("""
+ public static final int
+ %s;\n\n""" % (",\n"+" "*12).join(["%s = %s" % (c.name, c.value) for c in ci.consts])
+ )
+ # c-tors
+ fflist = ci.methods.items()
+ fflist.sort()
+ for n, ffi in fflist:
+ if ffi.isconstructor:
+ for fi in ffi.funcs:
+ fi.jname = ci.jname
+ self.gen_func(fi)
+ # other methods
+ for n, ffi in fflist:
+ if not ffi.isconstructor:
+ for fi in ffi.funcs:
+ self.gen_func(fi)
+ # props
+ for pi in ci.props:
+ # getter
+ getter_name = name + ".get_" + pi.name
+ #print getter_name
+ fi = FuncInfo( [getter_name, pi.ctype, [], []] ) # [ funcname, return_ctype, [modifiers], [args] ]
+ self.gen_func(fi, pi.name)
+ if pi.rw:
+ #setter
+ setter_name = name + ".set_" + pi.name
+ #print setter_name
+ fi = FuncInfo( [ setter_name, "void", [], [ [pi.ctype, pi.name, "", [], ""] ] ] )
+ self.gen_func(fi, pi.name)
+
+ # manual ports
+ if name in ManualFuncs:
+ for func in ManualFuncs[name].keys():
+ self.java_code[name]["j_code"].write ( ManualFuncs[name][func]["j_code"] )
+ self.java_code[name]["jn_code"].write( ManualFuncs[name][func]["jn_code"] )
+ self.cpp_code.write( ManualFuncs[name][func]["cpp_code"] )
+
+ if name != self.Module:
+ # finalize()
+ self.java_code[name]["j_code"].write(
+"""
+ @Override
+ protected void finalize() throws Throwable {
+ delete(nativeObj);
+ }
+""" )
+
+ self.java_code[name]["jn_code"].write(
+"""
+ // native support for java finalize()
+ private static native void delete(long nativeObj);
+""" )
+
+ # native support for java finalize()
+ self.cpp_code.write( \
+"""
+//
+// native support for java finalize()
+// static void %(cls)s::delete( __int64 self )
+//
+
+JNIEXPORT void JNICALL Java_org_opencv_%(module)s_%(j_cls)s_delete
+ (JNIEnv* env, jclass cls, jlong self)
+{
+ delete (%(cls)s*) self;
+}
+
+""" % {"module" : module, "cls" : name, "j_cls" : ci.jname}
+ )
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 4:
+ print "Usage:\n", \
+ os.path.basename(sys.argv[0]), \
+ "<full path to hdr_parser.py> <module name> <C++ header> [<C++ header>...]"
+ print "Current args are: ", ", ".join(["'"+a+"'" for a in sys.argv])
+ exit(0)
+
+ dstdir = "."
+ hdr_parser_path = os.path.abspath(sys.argv[1])
+ if hdr_parser_path.endswith(".py"):
+ hdr_parser_path = os.path.dirname(hdr_parser_path)
+ sys.path.append(hdr_parser_path)
+ import hdr_parser
+ module = sys.argv[2]
+ srcfiles = sys.argv[3:]
+ print "Generating module '" + module + "' from headers:\n\t" + "\n\t".join(srcfiles)
+ generator = JavaWrapperGenerator()
+ generator.gen(srcfiles, module, dstdir)
+
import os, sys, re, string, glob
-allmodules = ["core", "flann", "imgproc", "ml", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "gpu", "androidcamera", "haartraining", "java", "python", "stitching", "traincascade", "ts", "photo"]
+allmodules = ["core", "flann", "imgproc", "ml", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "gpu", "androidcamera", "haartraining", "java", "python", "stitching", "traincascade", "ts", "photo", "videostab"]
verbose = False
show_warnings = True
show_errors = True
inf.close()
outf.close()
+ def FinishParagraph(self, text):
+ return text[:-1] + "</p>\n"
+
def ReformatForJavadoc(self, s):
out = ""
+ in_paragraph = False
+ in_list = False
for term in s.split("\n"):
- if term.startswith("*") or term.startswith("#."):
- term = " " + term
+ in_list_item = False
+ if term.startswith("*"):
+ in_list_item = True
+ if in_paragraph:
+ out = self.FinishParagraph(out)
+ in_paragraph = False
+ if not in_list:
+ out += " * <ul>\n"
+ in_list = True
+ term = " <li>" + term[1:]
+
+ if term.startswith("#."):
+ in_list_item = True
+ if in_paragraph:
+ out = self.FinishParagraph(out)
+ in_paragraph = False
+ if not in_list:
+ out += " * <ul>\n"
+ in_list = True
+ term = " <li>" + term[2:]
+
if not term:
+ if in_paragraph:
+ out = self.FinishParagraph(out)
+ in_paragraph = False
out += " *\n"
else:
+ if in_list and not in_list_item:
+ in_list = False
+ if out.endswith(" *\n"):
+ out = out[:-3] + " * </ul>\n *\n"
+ else:
+ out += " * </ul>\n"
pos_start = 0
pos_end = min(77, len(term)-1)
while pos_start < pos_end:
pos_end += 1
else:
break
- out += " * " + term[pos_start:pos_end+1].rstrip() + "\n"
+ if in_paragraph or term.startswith("@") or in_list_item:
+ out += " * "
+ else:
+ in_paragraph = True
+ out += " * <p>"
+ out += term[pos_start:pos_end+1].rstrip() + "\n"
pos_start = pos_end + 1
pos_end = min(pos_start + 77, len(term)-1)
+
+ if in_paragraph:
+ out = self.FinishParagraph(out)
+ if in_list:
+ out += " * </ul>\n"
return out
- def getJavaName(self, decl):
+ def getJavaName(self, decl, methodSeparator = "."):
name = "org.opencv."
name += decl["module"]
if "class" in decl:
else:
name += "." + decl["module"].capitalize()
if "method" in decl:
- name += "." + decl["method"]
+ name += methodSeparator + decl["method"]
return name
def getDocURL(self, decl):
- url = "http://opencv.itseez.com/modules/"
+ url = "http://docs.opencv.org/modules/"
url += decl["module"]
url += "/doc/"
url += os.path.basename(decl["file"]).replace(".rst",".html")
for see in decl["seealso"]:
seedecl = self.definitions.get(see,None)
if seedecl:
- doc += prefix + " * @see " + self.getJavaName(seedecl) + "\n"
+ doc += prefix + " * @see " + self.getJavaName(seedecl, "#") + "\n"
else:
doc += prefix + " * @see " + see.replace("::",".") + "\n"
prefix = " *\n"
def normalizeText(self, s):
if s is None:
return s
- s = re.sub(r"\.\. math::[ ]*\n+(.*?)(\n[ ]*\n|$)", mathReplace2, s)
+
+ s = re.sub(r"\.\. math::[ \r]*\n+((.|\n)*?)(\n[ \r]*\n|$)", mathReplace2, s)
s = re.sub(r":math:`([^`]+?)`", mathReplace, s)
s = re.sub(r" *:sup:", "^", s)
s = re.sub(r"[\n ]+\.", ".", s)
s = s.replace("**", "")
+ s = re.sub(r"``([^\n]+?)``", "<code>\\1</code>", s)
s = s.replace("``", "\"")
s = s.replace("`", "\"")
s = s.replace("\"\"", "\"")
m = m.replace("}", ")")
#print "%s ===> %s" % (match.group(0), m)
- return m
+ return "<em>" + m + "</em>"
if __name__ == "__main__":
if len(sys.argv) < 2:
\r
#include "converters.h"\r
\r
-#ifdef DEBUG\r
#include <android/log.h>\r
-#define MODULE_LOG_TAG "OpenCV.core.Mat"\r
-#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, MODULE_LOG_TAG, __VA_ARGS__))\r
-#else //DEBUG\r
+#define LOG_TAG "org.opencv.core.Mat"\r
+#define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__))\r
+#ifdef DEBUG\r
+#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))\r
+#else //!DEBUG\r
#define LOGD(...)\r
#endif //DEBUG\r
\r
\r
\r
//\r
+// Mat Mat::setTo(Scalar value, Mat mask = Mat())\r
+//\r
+\r
+\r
+JNIEXPORT jlong JNICALL Java_org_opencv_core_Mat_n_1setTo__JDDDDJ\r
+ (JNIEnv* env, jclass cls, jlong self, jdouble s_val0, jdouble s_val1, jdouble s_val2, jdouble s_val3, jlong mask_nativeObj)\r
+{\r
+ try {\r
+ LOGD("Mat::n_1setTo__JDDDDJ()");\r
+ Mat* me = (Mat*) self; //TODO: check for NULL\r
+ Scalar s(s_val0, s_val1, s_val2, s_val3);\r
+ Mat& mask = *((Mat*)mask_nativeObj);\r
+ Mat _retval_ = me->setTo( s, mask );\r
+ \r
+ return (jlong) new Mat(_retval_);\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::n_1setTo__JDDDDJ() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::n_1setTo__JDDDDJ() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::n_1setTo__JDDDDJ()}");\r
+ return 0;\r
+ }\r
+}\r
+\r
+\r
+\r
+//\r
// Mat Mat::setTo(Mat value, Mat mask = Mat())\r
//\r
\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nPutD\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jdoubleArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(!me || !me->data) return 0; // no native object behind\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
-\r
- int rest = ((me->rows - row) * me->cols - col) * me->channels();\r
- if(count>rest) count = rest;\r
- int res = count;\r
- double* values = (double*)env->GetPrimitiveArrayCritical(vals, 0);\r
- double* src = values;\r
- int r, c;\r
- for(c=col; c<me->cols && count>0; c++)\r
- {\r
- switch(me->depth()) {\r
- case CV_8U: PUT_ITEM(uchar, row, c); break;\r
- case CV_8S: PUT_ITEM(schar, row, c); break;\r
- case CV_16U: PUT_ITEM(ushort, row, c); break;\r
- case CV_16S: PUT_ITEM(short, row, c); break;\r
- case CV_32S: PUT_ITEM(int, row, c); break;\r
- case CV_32F: PUT_ITEM(float, row, c); break;\r
- case CV_64F: PUT_ITEM(double, row, c); break;\r
- }\r
- }\r
-\r
- for(r=row+1; r<me->rows && count>0; r++)\r
- for(c=0; c<me->cols && count>0; c++)\r
+ try {\r
+ LOGD("Mat::nPutD()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(!me || !me->data) return 0; // no native object behind\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+\r
+ int rest = ((me->rows - row) * me->cols - col) * me->channels();\r
+ if(count>rest) count = rest;\r
+ int res = count;\r
+ double* values = (double*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ double* src = values;\r
+ int r, c;\r
+ for(c=col; c<me->cols && count>0; c++)\r
{\r
switch(me->depth()) {\r
- case CV_8U: PUT_ITEM(uchar, r, c); break;\r
- case CV_8S: PUT_ITEM(schar, r, c); break;\r
- case CV_16U: PUT_ITEM(ushort, r, c); break;\r
- case CV_16S: PUT_ITEM(short, r, c); break;\r
- case CV_32S: PUT_ITEM(int, r, c); break;\r
- case CV_32F: PUT_ITEM(float, r, c); break;\r
- case CV_64F: PUT_ITEM(double, r, c); break;\r
+ case CV_8U: PUT_ITEM(uchar, row, c); break;\r
+ case CV_8S: PUT_ITEM(schar, row, c); break;\r
+ case CV_16U: PUT_ITEM(ushort, row, c); break;\r
+ case CV_16S: PUT_ITEM(short, row, c); break;\r
+ case CV_32S: PUT_ITEM(int, row, c); break;\r
+ case CV_32F: PUT_ITEM(float, row, c); break;\r
+ case CV_64F: PUT_ITEM(double, row, c); break;\r
}\r
}\r
\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ for(r=row+1; r<me->rows && count>0; r++)\r
+ for(c=0; c<me->cols && count>0; c++)\r
+ {\r
+ switch(me->depth()) {\r
+ case CV_8U: PUT_ITEM(uchar, r, c); break;\r
+ case CV_8S: PUT_ITEM(schar, r, c); break;\r
+ case CV_16U: PUT_ITEM(ushort, r, c); break;\r
+ case CV_16S: PUT_ITEM(short, r, c); break;\r
+ case CV_32S: PUT_ITEM(int, r, c); break;\r
+ case CV_32F: PUT_ITEM(float, r, c); break;\r
+ case CV_64F: PUT_ITEM(double, r, c); break;\r
+ }\r
+ }\r
+\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nPutD() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nPutD() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nPutD()}");\r
+ return 0;\r
+ }\r
}\r
\r
\r
if(! buff) return 0;\r
\r
count *= sizeof(T);\r
- int rest = ((m->rows - row) * m->cols - col) * m->channels() * sizeof(T);\r
+ int rest = ((m->rows - row) * m->cols - col) * m->elemSize();\r
if(count>rest) count = rest;\r
int res = count;\r
\r
memcpy(m->ptr(row, col), buff, count);\r
} else {\r
// row by row\r
- int num = (m->cols - col - 1) * m->channels() * sizeof(T); // 1st partial row\r
+ int num = (m->cols - col) * m->elemSize(); // 1st partial row\r
if(count<num) num = count;\r
uchar* data = m->ptr(row++, col);\r
while(count>0){\r
memcpy(data, buff, num);\r
count -= num;\r
buff += num;\r
- num = m->cols * m->channels() * sizeof(T);\r
+ num = m->cols * m->elemSize();\r
if(count<num) num = count;\r
data = m->ptr(row++, 0);\r
}\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nPutB\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jbyteArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->depth() != CV_8U && me->depth() != CV_8S) return 0; // incompatible type\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
- \r
- char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
- int res = mat_put<char>(me, row, col, count, values);\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ try {\r
+ LOGD("Mat::nPutB()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->depth() != CV_8U && me->depth() != CV_8S) return 0; // incompatible type\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+ \r
+ char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ int res = mat_put<char>(me, row, col, count, values);\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nPutB() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nPutB() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nPutB()}");\r
+ return 0;\r
+ }\r
}\r
\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nPutS\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jshortArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->depth() != CV_16U && me->depth() != CV_16S) return 0; // incompatible type\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
- \r
- char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
- int res = mat_put<short>(me, row, col, count, values);\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ try {\r
+ LOGD("Mat::nPutS()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->depth() != CV_16U && me->depth() != CV_16S) return 0; // incompatible type\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+ \r
+ char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ int res = mat_put<short>(me, row, col, count, values);\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nPutS() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nPutS() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nPutS()}");\r
+ return 0;\r
+ }\r
}\r
\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nPutI\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jintArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->depth() != CV_32S) return 0; // incompatible type\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
- \r
- char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
- int res = mat_put<int>(me, row, col, count, values);\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ try {\r
+ LOGD("Mat::nPutI()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->depth() != CV_32S) return 0; // incompatible type\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+ \r
+ char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ int res = mat_put<int>(me, row, col, count, values);\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nPutI() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nPutI() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nPutI()}");\r
+ return 0;\r
+ }\r
}\r
\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nPutF\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jfloatArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->depth() != CV_32F) return 0; // incompatible type\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
- \r
- char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
- int res = mat_put<float>(me, row, col, count, values);\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ try {\r
+ LOGD("Mat::nPutF()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->depth() != CV_32F) return 0; // incompatible type\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+ \r
+ char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ int res = mat_put<float>(me, row, col, count, values);\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nPutF() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nPutF() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nPutF()}");\r
+ return 0;\r
+ }\r
}\r
\r
\r
if(! m) return 0;\r
if(! buff) return 0;\r
\r
- count *= sizeof(T);\r
- int rest = ((m->rows - row) * m->cols - col) * m->channels() * sizeof(T);\r
- if(count>rest) count = rest;\r
- int res = count;\r
+ int bytesToCopy = count * sizeof(T);\r
+ int bytesRestInMat = ((m->rows - row) * m->cols - col) * m->elemSize();\r
+ if(bytesToCopy > bytesRestInMat) bytesToCopy = bytesRestInMat;\r
+ int res = bytesToCopy;\r
\r
if( m->isContinuous() )\r
{\r
- memcpy(buff, m->ptr(row, col), count);\r
+ memcpy(buff, m->ptr(row, col), bytesToCopy);\r
} else {\r
// row by row\r
- int num = (m->cols - col - 1) * m->channels() * sizeof(T); // 1st partial row\r
- if(count<num) num = count;\r
- uchar* data = m->ptr(row++, col);\r
- while(count>0){//TODO: recheck this cycle for the case col!=0\r
- memcpy(buff, data, num);\r
- count -= num;\r
- buff += num;\r
- num = m->cols * m->channels() * sizeof(T);\r
- if(count<num) num = count;\r
- data = m->ptr(row++, 0);\r
+ int bytesInRow = (m->cols - col) * m->elemSize(); // 1st partial row\r
+ while(bytesToCopy > 0)\r
+ {\r
+ int len = std::min(bytesToCopy, bytesInRow);\r
+ memcpy(buff, m->ptr(row, col), len);\r
+ bytesToCopy -= len;\r
+ buff += len;\r
+ row++;\r
+ col = 0;\r
+ bytesInRow = m->cols * m->elemSize();\r
}\r
}\r
return res;\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nGetB\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jbyteArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->depth() != CV_8U && me->depth() != CV_8S) return 0; // incompatible type\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
- \r
- char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
- int res = mat_get<char>(me, row, col, count, values);\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ try {\r
+ LOGD("Mat::nGetB()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->depth() != CV_8U && me->depth() != CV_8S) return 0; // incompatible type\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+ \r
+ char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ int res = mat_get<char>(me, row, col, count, values);\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nGetB() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nGetB() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nGetB()}");\r
+ return 0;\r
+ }\r
}\r
\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nGetS\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jshortArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->depth() != CV_16U && me->depth() != CV_16S) return 0; // incompatible type\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
- \r
- char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
- int res = mat_get<short>(me, row, col, count, values);\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ try {\r
+ LOGD("Mat::nGetS()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->depth() != CV_16U && me->depth() != CV_16S) return 0; // incompatible type\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+ \r
+ char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ int res = mat_get<short>(me, row, col, count, values);\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nGetS() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nGetS() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nGetS()}");\r
+ return 0;\r
+ }\r
}\r
\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nGetI\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jintArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->depth() != CV_32S) return 0; // incompatible type\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
- \r
- char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
- int res = mat_get<int>(me, row, col, count, values);\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ try {\r
+ LOGD("Mat::nGetI()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->depth() != CV_32S) return 0; // incompatible type\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+ \r
+ char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ int res = mat_get<int>(me, row, col, count, values);\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nGetI() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nGetI() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nGetI()}");\r
+ return 0;\r
+ }\r
}\r
\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nGetF\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jfloatArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->depth() != CV_32F) return 0; // incompatible type\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
- \r
- char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
- int res = mat_get<float>(me, row, col, count, values);\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ try {\r
+ LOGD("Mat::nGetF()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->depth() != CV_32F) return 0; // incompatible type\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+ \r
+ char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ int res = mat_get<float>(me, row, col, count, values);\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nGetF() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nGetF() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nGetF()}");\r
+ return 0;\r
+ }\r
}\r
\r
JNIEXPORT jint JNICALL Java_org_opencv_core_Mat_nGetD\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count, jdoubleArray vals)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->depth() != CV_64F) return 0; // incompatible type\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
- \r
- char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
- int res = mat_get<double>(me, row, col, count, values);\r
- env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
- return res;\r
+ try {\r
+ LOGD("Mat::nGetD()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->depth() != CV_64F) return 0; // incompatible type\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+ \r
+ char* values = (char*)env->GetPrimitiveArrayCritical(vals, 0);\r
+ int res = mat_get<double>(me, row, col, count, values);\r
+ env->ReleasePrimitiveArrayCritical(vals, values, 0);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nGetD() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nGetD() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nGetD()}");\r
+ return 0;\r
+ }\r
}\r
\r
JNIEXPORT jdoubleArray JNICALL Java_org_opencv_core_Mat_nGet\r
(JNIEnv* env, jclass cls, jlong self, jint row, jint col, jint count)\r
{\r
- cv::Mat* me = (cv::Mat*) self;\r
- if(! self) return 0; // no native object behind\r
- if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
-\r
- jdoubleArray res = env->NewDoubleArray(me->channels());\r
- if(res){\r
- jdouble buff[me->channels()];\r
- int i;\r
- switch(me->depth()){\r
- case CV_8U: for(i=0; i<me->channels(); i++) buff[i] = *((unsigned char*) me->ptr(row, col) + i); break;\r
- case CV_8S: for(i=0; i<me->channels(); i++) buff[i] = *((signed char*) me->ptr(row, col) + i); break;\r
- case CV_16U: for(i=0; i<me->channels(); i++) buff[i] = *((unsigned short*)me->ptr(row, col) + i); break;\r
- case CV_16S: for(i=0; i<me->channels(); i++) buff[i] = *((signed short*) me->ptr(row, col) + i); break;\r
- case CV_32S: for(i=0; i<me->channels(); i++) buff[i] = *((int*) me->ptr(row, col) + i); break;\r
- case CV_32F: for(i=0; i<me->channels(); i++) buff[i] = *((float*) me->ptr(row, col) + i); break;\r
- case CV_64F: for(i=0; i<me->channels(); i++) buff[i] = *((double*) me->ptr(row, col) + i); break;\r
+ try {\r
+ LOGD("Mat::nGet()");\r
+ cv::Mat* me = (cv::Mat*) self;\r
+ if(! self) return 0; // no native object behind\r
+ if(me->rows<=row || me->cols<=col) return 0; // indexes out of range\r
+\r
+ jdoubleArray res = env->NewDoubleArray(me->channels());\r
+ if(res){\r
+ jdouble buff[me->channels()];\r
+ int i;\r
+ switch(me->depth()){\r
+ case CV_8U: for(i=0; i<me->channels(); i++) buff[i] = *((unsigned char*) me->ptr(row, col) + i); break;\r
+ case CV_8S: for(i=0; i<me->channels(); i++) buff[i] = *((signed char*) me->ptr(row, col) + i); break;\r
+ case CV_16U: for(i=0; i<me->channels(); i++) buff[i] = *((unsigned short*)me->ptr(row, col) + i); break;\r
+ case CV_16S: for(i=0; i<me->channels(); i++) buff[i] = *((signed short*) me->ptr(row, col) + i); break;\r
+ case CV_32S: for(i=0; i<me->channels(); i++) buff[i] = *((int*) me->ptr(row, col) + i); break;\r
+ case CV_32F: for(i=0; i<me->channels(); i++) buff[i] = *((float*) me->ptr(row, col) + i); break;\r
+ case CV_64F: for(i=0; i<me->channels(); i++) buff[i] = *((double*) me->ptr(row, col) + i); break;\r
+ }\r
+ env->SetDoubleArrayRegion(res, 0, me->channels(), buff);\r
}\r
- env->SetDoubleArrayRegion(res, 0, me->channels(), buff);\r
+ return res;\r
+ } catch(cv::Exception e) {\r
+ LOGD("Mat::nGet() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return 0;\r
+ } catch (...) {\r
+ LOGD("Mat::nGet() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nGet()}");\r
+ return 0;\r
}\r
- return res;\r
}\r
\r
JNIEXPORT jstring JNICALL Java_org_opencv_core_Mat_nDump\r
{\r
cv::Mat* me = (cv::Mat*) self; //TODO: check for NULL\r
std::stringstream s;\r
- s << *me;\r
- return env->NewStringUTF(s.str().c_str());\r
+ try {\r
+ LOGD("Mat::nDump()");\r
+ \r
+ s << *me;\r
+ return env->NewStringUTF(s.str().c_str());\r
+ } catch(cv::Exception e) {\r
+ LOGE("Mat::nDump() catched cv::Exception: %s", e.what());\r
+ jclass je = env->FindClass("org/opencv/core/CvException");\r
+ if(!je) je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, e.what());\r
+ return env->NewStringUTF("ERROR");\r
+ } catch (...) {\r
+ LOGE("Mat::nDump() catched unknown exception (...)");\r
+ jclass je = env->FindClass("java/lang/Exception");\r
+ env->ThrowNew(je, "Unknown exception in JNI code {Mat::nDump()}");\r
+ return env->NewStringUTF("ERROR");\r
+ }\r
}\r
\r
\r
-#include "converters.h"\r
-\r
-#ifdef DEBUG\r
-#include <android/log.h>\r
-#define MODULE_LOG_TAG "OpenCV.converters"\r
-#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, MODULE_LOG_TAG, __VA_ARGS__))\r
-#else //DEBUG\r
-#define LOGD(...)\r
-#endif //DEBUG\r
-\r
-using namespace cv;\r
-\r
-#define CHECK_MAT(cond) if(!(cond)){ LOGD("FAILED: " #cond); return; }\r
-\r
-\r
-// vector_int\r
-\r
-void Mat_to_vector_int(Mat& mat, vector<int>& v_int)\r
-{\r
- v_int.clear();\r
- CHECK_MAT(mat.type()==CV_32SC1 && mat.cols==1);\r
- v_int = (vector<int>) mat;\r
-}\r
-\r
-void vector_int_to_Mat(vector<int>& v_int, Mat& mat)\r
-{\r
- mat = Mat(v_int, true);\r
-}\r
-\r
-\r
-//vector_double\r
-\r
-void Mat_to_vector_double(Mat& mat, vector<double>& v_double)\r
-{\r
- v_double.clear();\r
- CHECK_MAT(mat.type()==CV_64FC1 && mat.cols==1);\r
- v_double = (vector<double>) mat;\r
-}\r
-\r
-void vector_double_to_Mat(vector<double>& v_double, Mat& mat)\r
-{\r
- mat = Mat(v_double, true);\r
-}\r
-\r
-\r
-// vector_float\r
-\r
-void Mat_to_vector_float(Mat& mat, vector<float>& v_float)\r
-{\r
- v_float.clear();\r
- CHECK_MAT(mat.type()==CV_32FC1 && mat.cols==1);\r
- v_float = (vector<float>) mat;\r
-}\r
-\r
-void vector_float_to_Mat(vector<float>& v_float, Mat& mat)\r
-{\r
- mat = Mat(v_float, true);\r
-}\r
-\r
-\r
-//vector_uchar\r
-\r
-void Mat_to_vector_uchar(Mat& mat, vector<uchar>& v_uchar)\r
-{\r
- v_uchar.clear();\r
- CHECK_MAT(mat.type()==CV_8UC1 && mat.cols==1);\r
- v_uchar = (vector<uchar>) mat;\r
-}\r
-\r
-void vector_uchar_to_Mat(vector<uchar>& v_uchar, Mat& mat)\r
-{\r
- mat = Mat(v_uchar, true);\r
-}\r
-\r
-void Mat_to_vector_char(Mat& mat, vector<char>& v_char)\r
-{\r
- v_char.clear();\r
- CHECK_MAT(mat.type()==CV_8SC1 && mat.cols==1);\r
- v_char = (vector<char>) mat;\r
-}\r
-\r
-void vector_char_to_Mat(vector<char>& v_char, Mat& mat)\r
-{\r
- mat = Mat(v_char, true);\r
-}\r
-\r
-\r
-//vector_Rect\r
-\r
-void Mat_to_vector_Rect(Mat& mat, vector<Rect>& v_rect)\r
-{\r
- v_rect.clear();\r
- CHECK_MAT(mat.type()==CV_32SC4 && mat.cols==1);\r
- v_rect = (vector<Rect>) mat;\r
-}\r
-\r
-void vector_Rect_to_Mat(vector<Rect>& v_rect, Mat& mat)\r
-{\r
- mat = Mat(v_rect, true);\r
-}\r
-\r
-\r
-//vector_Point\r
-void Mat_to_vector_Point(Mat& mat, vector<Point>& v_point)\r
-{\r
- v_point.clear();\r
- CHECK_MAT(mat.type()==CV_32SC2 && mat.cols==1);\r
- v_point = (vector<Point>) mat;\r
-}\r
-\r
-//vector_Point2f\r
-void Mat_to_vector_Point2f(Mat& mat, vector<Point2f>& v_point)\r
-{\r
- v_point.clear();\r
- CHECK_MAT(mat.type()==CV_32FC2 && mat.cols==1);\r
- v_point = (vector<Point2f>) mat;\r
-}\r
-\r
-//vector_Point2d\r
-void Mat_to_vector_Point2d(Mat& mat, vector<Point2d>& v_point)\r
-{\r
- v_point.clear();\r
- CHECK_MAT(mat.type()==CV_64FC2 && mat.cols==1);\r
- v_point = (vector<Point2d>) mat;\r
-}\r
-\r
-\r
-//vector_Point3i\r
-void Mat_to_vector_Point3i(Mat& mat, vector<Point3i>& v_point)\r
-{\r
- v_point.clear();\r
- CHECK_MAT(mat.type()==CV_32SC3 && mat.cols==1);\r
- v_point = (vector<Point3i>) mat;\r
-}\r
-\r
-//vector_Point3f\r
-void Mat_to_vector_Point3f(Mat& mat, vector<Point3f>& v_point)\r
-{\r
- v_point.clear();\r
- CHECK_MAT(mat.type()==CV_32FC3 && mat.cols==1);\r
- v_point = (vector<Point3f>) mat;\r
-}\r
-\r
-//vector_Point3d\r
-void Mat_to_vector_Point3d(Mat& mat, vector<Point3d>& v_point)\r
-{\r
- v_point.clear();\r
- CHECK_MAT(mat.type()==CV_64FC3 && mat.cols==1);\r
- v_point = (vector<Point3d>) mat;\r
-}\r
-\r
-\r
-void vector_Point_to_Mat(vector<Point>& v_point, Mat& mat)\r
-{\r
- mat = Mat(v_point, true);\r
-}\r
-\r
-void vector_Point2f_to_Mat(vector<Point2f>& v_point, Mat& mat)\r
-{\r
- mat = Mat(v_point, true);\r
-}\r
-\r
-void vector_Point2d_to_Mat(vector<Point2d>& v_point, Mat& mat)\r
-{\r
- mat = Mat(v_point, true);\r
-}\r
-\r
-void vector_Point3i_to_Mat(vector<Point3i>& v_point, Mat& mat)\r
-{\r
- mat = Mat(v_point, true);\r
-}\r
-\r
-void vector_Point3f_to_Mat(vector<Point3f>& v_point, Mat& mat)\r
-{\r
- mat = Mat(v_point, true);\r
-}\r
-\r
-void vector_Point3d_to_Mat(vector<Point3d>& v_point, Mat& mat)\r
-{\r
- mat = Mat(v_point, true);\r
-}\r
-\r
-#ifdef HAVE_OPENCV_FEATURES2D\r
-//vector_KeyPoint\r
-void Mat_to_vector_KeyPoint(Mat& mat, vector<KeyPoint>& v_kp)\r
-{\r
- v_kp.clear();\r
- CHECK_MAT(mat.type()==CV_32FC(7) && mat.cols==1);\r
- for(int i=0; i<mat.rows; i++)\r
- {\r
- Vec<float, 7> v = mat.at< Vec<float, 7> >(i, 0);\r
- KeyPoint kp(v[0], v[1], v[2], v[3], v[4], (int)v[5], (int)v[6]);\r
- v_kp.push_back(kp);\r
- }\r
- return;\r
-}\r
-\r
-\r
-void vector_KeyPoint_to_Mat(vector<KeyPoint>& v_kp, Mat& mat)\r
-{\r
- int count = v_kp.size();\r
- mat.create(count, 1, CV_32FC(7));\r
- for(int i=0; i<count; i++)\r
- {\r
- KeyPoint kp = v_kp[i];\r
- mat.at< Vec<float, 7> >(i, 0) = Vec<float, 7>(kp.pt.x, kp.pt.y, kp.size, kp.angle, kp.response, kp.octave, kp.class_id);\r
- }\r
-}\r
-#endif\r
-\r
-\r
-//vector_Mat\r
-void Mat_to_vector_Mat(cv::Mat& mat, std::vector<cv::Mat>& v_mat)\r
-{\r
- v_mat.clear();\r
- if(mat.type() == CV_32SC2 && mat.cols == 1)\r
- {\r
- v_mat.reserve(mat.rows);\r
- for(int i=0; i<mat.rows; i++)\r
- {\r
- Vec<int, 2> a = mat.at< Vec<int, 2> >(i, 0);\r
- long long addr = (((long long)a[0])<<32) | a[1];\r
- Mat& m = *( (Mat*) addr );\r
- v_mat.push_back(m);\r
- }\r
- } else {\r
- LOGD("Mat_to_vector_Mat() FAILED: mat.type() == CV_32SC2 && mat.cols == 1");\r
- }\r
-}\r
-\r
-\r
-void vector_Mat_to_Mat(std::vector<cv::Mat>& v_mat, cv::Mat& mat)\r
-{\r
- int count = v_mat.size();\r
- mat.create(count, 1, CV_32SC2);\r
- for(int i=0; i<count; i++)\r
- {\r
- long long addr = (long long) new Mat(v_mat[i]);\r
- mat.at< Vec<int, 2> >(i, 0) = Vec<int, 2>(addr>>32, addr&0xffffffff);\r
- }\r
-}\r
-\r
-#ifdef HAVE_OPENCV_FEATURES2D\r
-//vector_DMatch\r
-void Mat_to_vector_DMatch(Mat& mat, vector<DMatch>& v_dm)\r
-{\r
- v_dm.clear();\r
- CHECK_MAT(mat.type()==CV_32FC4 && mat.cols==1);\r
- for(int i=0; i<mat.rows; i++)\r
- {\r
- Vec<float, 4> v = mat.at< Vec<float, 4> >(i, 0);\r
- DMatch dm((int)v[0], (int)v[1], (int)v[2], v[3]);\r
- v_dm.push_back(dm);\r
- }\r
- return;\r
-}\r
-\r
-\r
-void vector_DMatch_to_Mat(vector<DMatch>& v_dm, Mat& mat)\r
-{\r
- int count = v_dm.size();\r
- mat.create(count, 1, CV_32FC4);\r
- for(int i=0; i<count; i++)\r
- {\r
- DMatch dm = v_dm[i];\r
- mat.at< Vec<float, 4> >(i, 0) = Vec<float, 4>(dm.queryIdx, dm.trainIdx, dm.imgIdx, dm.distance);\r
- }\r
-}\r
-#endif\r
-\r
-void Mat_to_vector_vector_Point(Mat& mat, vector< vector< Point > >& vv_pt)\r
-{\r
- vector<Mat> vm;\r
- vm.reserve( mat.rows );\r
- Mat_to_vector_Mat(mat, vm);\r
- for(size_t i=0; i<vm.size(); i++)\r
- {\r
- vector<Point> vpt;\r
- Mat_to_vector_Point(vm[i], vpt);\r
- vv_pt.push_back(vpt);\r
- }\r
-}\r
-\r
-#ifdef HAVE_OPENCV_FEATURES2D\r
-void Mat_to_vector_vector_KeyPoint(Mat& mat, vector< vector< KeyPoint > >& vv_kp)\r
-{\r
- vector<Mat> vm;\r
- vm.reserve( mat.rows );\r
- Mat_to_vector_Mat(mat, vm);\r
- for(size_t i=0; i<vm.size(); i++)\r
- {\r
- vector<KeyPoint> vkp;\r
- Mat_to_vector_KeyPoint(vm[i], vkp);\r
- vv_kp.push_back(vkp);\r
- }\r
-}\r
-\r
-void vector_vector_KeyPoint_to_Mat(vector< vector< KeyPoint > >& vv_kp, Mat& mat)\r
-{\r
- vector<Mat> vm;\r
- vm.reserve( vv_kp.size() );\r
- for(size_t i=0; i<vv_kp.size(); i++)\r
- {\r
- Mat m;\r
- vector_KeyPoint_to_Mat(vv_kp[i], m);\r
- vm.push_back(m);\r
- }\r
- vector_Mat_to_Mat(vm, mat);\r
-}\r
-\r
-void Mat_to_vector_vector_DMatch(Mat& mat, vector< vector< DMatch > >& vv_dm)\r
-{\r
- vector<Mat> vm;\r
- vm.reserve( mat.rows );\r
- Mat_to_vector_Mat(mat, vm);\r
- for(size_t i=0; i<vm.size(); i++)\r
- {\r
- vector<DMatch> vdm;\r
- Mat_to_vector_DMatch(vm[i], vdm);\r
- vv_dm.push_back(vdm);\r
- }\r
-}\r
-\r
-void vector_vector_DMatch_to_Mat(vector< vector< DMatch > >& vv_dm, Mat& mat)\r
-{\r
- vector<Mat> vm;\r
- vm.reserve( vv_dm.size() );\r
- for(size_t i=0; i<vv_dm.size(); i++)\r
- {\r
- Mat m;\r
- vector_DMatch_to_Mat(vv_dm[i], m);\r
- vm.push_back(m);\r
- }\r
- vector_Mat_to_Mat(vm, mat);\r
-}\r
-#endif\r
-\r
-void Mat_to_vector_vector_char(Mat& mat, vector< vector< char > >& vv_ch)\r
-{\r
- vector<Mat> vm;\r
- vm.reserve( mat.rows );\r
- Mat_to_vector_Mat(mat, vm);\r
- for(size_t i=0; i<vm.size(); i++)\r
- {\r
- vector<char> vch;\r
- Mat_to_vector_char(vm[i], vch);\r
- vv_ch.push_back(vch);\r
- }\r
-}\r
-\r
-void vector_vector_char_to_Mat(vector< vector< char > >& vv_ch, Mat& mat)\r
-{\r
- vector<Mat> vm;\r
- vm.reserve( vv_ch.size() );\r
- for(size_t i=0; i<vv_ch.size(); i++)\r
- {\r
- Mat m;\r
- vector_char_to_Mat(vv_ch[i], m);\r
- vm.push_back(m);\r
- }\r
- vector_Mat_to_Mat(vm, mat);\r
-}\r
-\r
-void vector_vector_Point2f_to_Mat(vector< vector< Point2f > >& vv_pt, Mat& mat)\r
-{\r
- vector<Mat> vm;\r
- vm.reserve( vv_pt.size() );\r
- for(size_t i=0; i<vv_pt.size(); i++)\r
- {\r
- Mat m;\r
- vector_Point2f_to_Mat(vv_pt[i], m);\r
- vm.push_back(m);\r
- }\r
- vector_Mat_to_Mat(vm, mat);\r
-}\r
-\r
-void vector_vector_Point_to_Mat(vector< vector< Point > >& vv_pt, Mat& mat)\r
-{\r
- vector<Mat> vm;\r
- vm.reserve( vv_pt.size() );\r
- for(size_t i=0; i<vv_pt.size(); i++)\r
- {\r
- Mat m;\r
- vector_Point_to_Mat(vv_pt[i], m);\r
- vm.push_back(m);\r
- }\r
- vector_Mat_to_Mat(vm, mat);\r
-}\r
-\r
-void vector_Vec4f_to_Mat(vector<Vec4f>& v_vec, Mat& mat)\r
-{\r
- mat = Mat(v_vec, true);\r
-}\r
-\r
-void vector_Vec6f_to_Mat(vector<Vec6f>& v_vec, Mat& mat)\r
-{\r
- mat = Mat(v_vec, true);\r
-}\r
+#include "converters.h"
+
+#ifdef DEBUG
+#include <android/log.h>
+#define MODULE_LOG_TAG "OpenCV.converters"
+#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, MODULE_LOG_TAG, __VA_ARGS__))
+#else //DEBUG
+#define LOGD(...)
+#endif //DEBUG
+
+using namespace cv;
+
+#define CHECK_MAT(cond) if(!(cond)){ LOGD("FAILED: " #cond); return; }
+
+
+// vector_int
+
+void Mat_to_vector_int(Mat& mat, vector<int>& v_int)
+{
+ v_int.clear();
+ CHECK_MAT(mat.type()==CV_32SC1 && mat.cols==1);
+ v_int = (vector<int>) mat;
+}
+
+void vector_int_to_Mat(vector<int>& v_int, Mat& mat)
+{
+ mat = Mat(v_int, true);
+}
+
+
+//vector_double
+
+void Mat_to_vector_double(Mat& mat, vector<double>& v_double)
+{
+ v_double.clear();
+ CHECK_MAT(mat.type()==CV_64FC1 && mat.cols==1);
+ v_double = (vector<double>) mat;
+}
+
+void vector_double_to_Mat(vector<double>& v_double, Mat& mat)
+{
+ mat = Mat(v_double, true);
+}
+
+
+// vector_float
+
+void Mat_to_vector_float(Mat& mat, vector<float>& v_float)
+{
+ v_float.clear();
+ CHECK_MAT(mat.type()==CV_32FC1 && mat.cols==1);
+ v_float = (vector<float>) mat;
+}
+
+void vector_float_to_Mat(vector<float>& v_float, Mat& mat)
+{
+ mat = Mat(v_float, true);
+}
+
+
+//vector_uchar
+
+void Mat_to_vector_uchar(Mat& mat, vector<uchar>& v_uchar)
+{
+ v_uchar.clear();
+ CHECK_MAT(mat.type()==CV_8UC1 && mat.cols==1);
+ v_uchar = (vector<uchar>) mat;
+}
+
+void vector_uchar_to_Mat(vector<uchar>& v_uchar, Mat& mat)
+{
+ mat = Mat(v_uchar, true);
+}
+
+void Mat_to_vector_char(Mat& mat, vector<char>& v_char)
+{
+ v_char.clear();
+ CHECK_MAT(mat.type()==CV_8SC1 && mat.cols==1);
+ v_char = (vector<char>) mat;
+}
+
+void vector_char_to_Mat(vector<char>& v_char, Mat& mat)
+{
+ mat = Mat(v_char, true);
+}
+
+
+//vector_Rect
+
+void Mat_to_vector_Rect(Mat& mat, vector<Rect>& v_rect)
+{
+ v_rect.clear();
+ CHECK_MAT(mat.type()==CV_32SC4 && mat.cols==1);
+ v_rect = (vector<Rect>) mat;
+}
+
+void vector_Rect_to_Mat(vector<Rect>& v_rect, Mat& mat)
+{
+ mat = Mat(v_rect, true);
+}
+
+
+//vector_Point
+void Mat_to_vector_Point(Mat& mat, vector<Point>& v_point)
+{
+ v_point.clear();
+ CHECK_MAT(mat.type()==CV_32SC2 && mat.cols==1);
+ v_point = (vector<Point>) mat;
+}
+
+//vector_Point2f
+void Mat_to_vector_Point2f(Mat& mat, vector<Point2f>& v_point)
+{
+ v_point.clear();
+ CHECK_MAT(mat.type()==CV_32FC2 && mat.cols==1);
+ v_point = (vector<Point2f>) mat;
+}
+
+//vector_Point2d
+void Mat_to_vector_Point2d(Mat& mat, vector<Point2d>& v_point)
+{
+ v_point.clear();
+ CHECK_MAT(mat.type()==CV_64FC2 && mat.cols==1);
+ v_point = (vector<Point2d>) mat;
+}
+
+
+//vector_Point3i
+void Mat_to_vector_Point3i(Mat& mat, vector<Point3i>& v_point)
+{
+ v_point.clear();
+ CHECK_MAT(mat.type()==CV_32SC3 && mat.cols==1);
+ v_point = (vector<Point3i>) mat;
+}
+
+//vector_Point3f
+void Mat_to_vector_Point3f(Mat& mat, vector<Point3f>& v_point)
+{
+ v_point.clear();
+ CHECK_MAT(mat.type()==CV_32FC3 && mat.cols==1);
+ v_point = (vector<Point3f>) mat;
+}
+
+//vector_Point3d
+void Mat_to_vector_Point3d(Mat& mat, vector<Point3d>& v_point)
+{
+ v_point.clear();
+ CHECK_MAT(mat.type()==CV_64FC3 && mat.cols==1);
+ v_point = (vector<Point3d>) mat;
+}
+
+
+void vector_Point_to_Mat(vector<Point>& v_point, Mat& mat)
+{
+ mat = Mat(v_point, true);
+}
+
+void vector_Point2f_to_Mat(vector<Point2f>& v_point, Mat& mat)
+{
+ mat = Mat(v_point, true);
+}
+
+void vector_Point2d_to_Mat(vector<Point2d>& v_point, Mat& mat)
+{
+ mat = Mat(v_point, true);
+}
+
+void vector_Point3i_to_Mat(vector<Point3i>& v_point, Mat& mat)
+{
+ mat = Mat(v_point, true);
+}
+
+void vector_Point3f_to_Mat(vector<Point3f>& v_point, Mat& mat)
+{
+ mat = Mat(v_point, true);
+}
+
+void vector_Point3d_to_Mat(vector<Point3d>& v_point, Mat& mat)
+{
+ mat = Mat(v_point, true);
+}
+
+#ifdef HAVE_OPENCV_FEATURES2D
+//vector_KeyPoint
+void Mat_to_vector_KeyPoint(Mat& mat, vector<KeyPoint>& v_kp)
+{
+ v_kp.clear();
+ CHECK_MAT(mat.type()==CV_32FC(7) && mat.cols==1);
+ for(int i=0; i<mat.rows; i++)
+ {
+ Vec<float, 7> v = mat.at< Vec<float, 7> >(i, 0);
+ KeyPoint kp(v[0], v[1], v[2], v[3], v[4], (int)v[5], (int)v[6]);
+ v_kp.push_back(kp);
+ }
+ return;
+}
+
+
+void vector_KeyPoint_to_Mat(vector<KeyPoint>& v_kp, Mat& mat)
+{
+ int count = v_kp.size();
+ mat.create(count, 1, CV_32FC(7));
+ for(int i=0; i<count; i++)
+ {
+ KeyPoint kp = v_kp[i];
+ mat.at< Vec<float, 7> >(i, 0) = Vec<float, 7>(kp.pt.x, kp.pt.y, kp.size, kp.angle, kp.response, kp.octave, kp.class_id);
+ }
+}
+#endif
+
+
+//vector_Mat
+void Mat_to_vector_Mat(cv::Mat& mat, std::vector<cv::Mat>& v_mat)
+{
+ v_mat.clear();
+ if(mat.type() == CV_32SC2 && mat.cols == 1)
+ {
+ v_mat.reserve(mat.rows);
+ for(int i=0; i<mat.rows; i++)
+ {
+ Vec<int, 2> a = mat.at< Vec<int, 2> >(i, 0);
+ long long addr = (((long long)a[0])<<32) | a[1];
+ Mat& m = *( (Mat*) addr );
+ v_mat.push_back(m);
+ }
+ } else {
+ LOGD("Mat_to_vector_Mat() FAILED: mat.type() == CV_32SC2 && mat.cols == 1");
+ }
+}
+
+
+void vector_Mat_to_Mat(std::vector<cv::Mat>& v_mat, cv::Mat& mat)
+{
+ int count = v_mat.size();
+ mat.create(count, 1, CV_32SC2);
+ for(int i=0; i<count; i++)
+ {
+ long long addr = (long long) new Mat(v_mat[i]);
+ mat.at< Vec<int, 2> >(i, 0) = Vec<int, 2>(addr>>32, addr&0xffffffff);
+ }
+}
+
+#ifdef HAVE_OPENCV_FEATURES2D
+//vector_DMatch
+void Mat_to_vector_DMatch(Mat& mat, vector<DMatch>& v_dm)
+{
+ v_dm.clear();
+ CHECK_MAT(mat.type()==CV_32FC4 && mat.cols==1);
+ for(int i=0; i<mat.rows; i++)
+ {
+ Vec<float, 4> v = mat.at< Vec<float, 4> >(i, 0);
+ DMatch dm((int)v[0], (int)v[1], (int)v[2], v[3]);
+ v_dm.push_back(dm);
+ }
+ return;
+}
+
+
+void vector_DMatch_to_Mat(vector<DMatch>& v_dm, Mat& mat)
+{
+ int count = v_dm.size();
+ mat.create(count, 1, CV_32FC4);
+ for(int i=0; i<count; i++)
+ {
+ DMatch dm = v_dm[i];
+ mat.at< Vec<float, 4> >(i, 0) = Vec<float, 4>(dm.queryIdx, dm.trainIdx, dm.imgIdx, dm.distance);
+ }
+}
+#endif
+
+void Mat_to_vector_vector_Point(Mat& mat, vector< vector< Point > >& vv_pt)
+{
+ vector<Mat> vm;
+ vm.reserve( mat.rows );
+ Mat_to_vector_Mat(mat, vm);
+ for(size_t i=0; i<vm.size(); i++)
+ {
+ vector<Point> vpt;
+ Mat_to_vector_Point(vm[i], vpt);
+ vv_pt.push_back(vpt);
+ }
+}
+
+void Mat_to_vector_vector_Point2f(Mat& mat, vector< vector< Point2f > >& vv_pt)
+{
+ vector<Mat> vm;
+ vm.reserve( mat.rows );
+ Mat_to_vector_Mat(mat, vm);
+ for(size_t i=0; i<vm.size(); i++)
+ {
+ vector<Point2f> vpt;
+ Mat_to_vector_Point2f(vm[i], vpt);
+ vv_pt.push_back(vpt);
+ }
+}
+
+void Mat_to_vector_vector_Point3f(Mat& mat, vector< vector< Point3f > >& vv_pt)
+{
+ vector<Mat> vm;
+ vm.reserve( mat.rows );
+ Mat_to_vector_Mat(mat, vm);
+ for(size_t i=0; i<vm.size(); i++)
+ {
+ vector<Point3f> vpt;
+ Mat_to_vector_Point3f(vm[i], vpt);
+ vv_pt.push_back(vpt);
+ }
+}
+
+#ifdef HAVE_OPENCV_FEATURES2D
+void Mat_to_vector_vector_KeyPoint(Mat& mat, vector< vector< KeyPoint > >& vv_kp)
+{
+ vector<Mat> vm;
+ vm.reserve( mat.rows );
+ Mat_to_vector_Mat(mat, vm);
+ for(size_t i=0; i<vm.size(); i++)
+ {
+ vector<KeyPoint> vkp;
+ Mat_to_vector_KeyPoint(vm[i], vkp);
+ vv_kp.push_back(vkp);
+ }
+}
+
+void vector_vector_KeyPoint_to_Mat(vector< vector< KeyPoint > >& vv_kp, Mat& mat)
+{
+ vector<Mat> vm;
+ vm.reserve( vv_kp.size() );
+ for(size_t i=0; i<vv_kp.size(); i++)
+ {
+ Mat m;
+ vector_KeyPoint_to_Mat(vv_kp[i], m);
+ vm.push_back(m);
+ }
+ vector_Mat_to_Mat(vm, mat);
+}
+
+void Mat_to_vector_vector_DMatch(Mat& mat, vector< vector< DMatch > >& vv_dm)
+{
+ vector<Mat> vm;
+ vm.reserve( mat.rows );
+ Mat_to_vector_Mat(mat, vm);
+ for(size_t i=0; i<vm.size(); i++)
+ {
+ vector<DMatch> vdm;
+ Mat_to_vector_DMatch(vm[i], vdm);
+ vv_dm.push_back(vdm);
+ }
+}
+
+void vector_vector_DMatch_to_Mat(vector< vector< DMatch > >& vv_dm, Mat& mat)
+{
+ vector<Mat> vm;
+ vm.reserve( vv_dm.size() );
+ for(size_t i=0; i<vv_dm.size(); i++)
+ {
+ Mat m;
+ vector_DMatch_to_Mat(vv_dm[i], m);
+ vm.push_back(m);
+ }
+ vector_Mat_to_Mat(vm, mat);
+}
+#endif
+
+void Mat_to_vector_vector_char(Mat& mat, vector< vector< char > >& vv_ch)
+{
+ vector<Mat> vm;
+ vm.reserve( mat.rows );
+ Mat_to_vector_Mat(mat, vm);
+ for(size_t i=0; i<vm.size(); i++)
+ {
+ vector<char> vch;
+ Mat_to_vector_char(vm[i], vch);
+ vv_ch.push_back(vch);
+ }
+}
+
+void vector_vector_char_to_Mat(vector< vector< char > >& vv_ch, Mat& mat)
+{
+ vector<Mat> vm;
+ vm.reserve( vv_ch.size() );
+ for(size_t i=0; i<vv_ch.size(); i++)
+ {
+ Mat m;
+ vector_char_to_Mat(vv_ch[i], m);
+ vm.push_back(m);
+ }
+ vector_Mat_to_Mat(vm, mat);
+}
+
+void vector_vector_Point_to_Mat(vector< vector< Point > >& vv_pt, Mat& mat)
+{
+ vector<Mat> vm;
+ vm.reserve( vv_pt.size() );
+ for(size_t i=0; i<vv_pt.size(); i++)
+ {
+ Mat m;
+ vector_Point_to_Mat(vv_pt[i], m);
+ vm.push_back(m);
+ }
+ vector_Mat_to_Mat(vm, mat);
+}
+
+void vector_vector_Point2f_to_Mat(vector< vector< Point2f > >& vv_pt, Mat& mat)
+{
+ vector<Mat> vm;
+ vm.reserve( vv_pt.size() );
+ for(size_t i=0; i<vv_pt.size(); i++)
+ {
+ Mat m;
+ vector_Point2f_to_Mat(vv_pt[i], m);
+ vm.push_back(m);
+ }
+ vector_Mat_to_Mat(vm, mat);
+}
+
+
+void vector_vector_Point_to_Mat(vector< vector< Point > >& vv_pt, Mat& mat)
+{
+ vector<Mat> vm;
+ vm.reserve( vv_pt.size() );
+ for(size_t i=0; i<vv_pt.size(); i++)
+ {
+ Mat m;
+ vector_Point_to_Mat(vv_pt[i], m);
+ vm.push_back(m);
+ }
+ vector_Mat_to_Mat(vm, mat);
+}
+
+
+void vector_vector_Point3f_to_Mat(vector< vector< Point3f > >& vv_pt, Mat& mat)
+{
+ vector<Mat> vm;
+ vm.reserve( vv_pt.size() );
+ for(size_t i=0; i<vv_pt.size(); i++)
+ {
+ Mat m;
+ vector_Point3f_to_Mat(vv_pt[i], m);
+ vm.push_back(m);
+ }
+ vector_Mat_to_Mat(vm, mat);
+}
+
+void vector_Vec4i_to_Mat(vector<Vec4i>& v_vec, Mat& mat)
+{
+ mat = Mat(v_vec, true);
+}
+
+void vector_Vec4f_to_Mat(vector<Vec4f>& v_vec, Mat& mat)
+{
+ mat = Mat(v_vec, true);
+}
+
+void vector_Vec6f_to_Mat(vector<Vec6f>& v_vec, Mat& mat)
+{
+ mat = Mat(v_vec, true);
+}
void vector_Point3f_to_Mat(std::vector<cv::Point3f>& v_point, cv::Mat& mat);\r
void vector_Point3d_to_Mat(std::vector<cv::Point3d>& v_point, cv::Mat& mat);\r
\r
+void vector_Vec4i_to_Mat(std::vector<cv::Vec4i>& v_vec, cv::Mat& mat);\r
void vector_Vec4f_to_Mat(std::vector<cv::Vec4f>& v_vec, cv::Mat& mat);\r
void vector_Vec6f_to_Mat(std::vector<cv::Vec6f>& v_vec, cv::Mat& mat);\r
\r
void vector_vector_char_to_Mat(std::vector< std::vector< char > >& vv_ch, cv::Mat& mat);\r
\r
void Mat_to_vector_vector_Point(cv::Mat& mat, std::vector< std::vector< cv::Point > >& vv_pt);\r
+void vector_vector_Point_to_Mat(std::vector< std::vector< cv::Point > >& vv_pt, cv::Mat& mat);\r
+\r
+void Mat_to_vector_vector_Point2f(cv::Mat& mat, std::vector< std::vector< cv::Point2f > >& vv_pt);\r
void vector_vector_Point2f_to_Mat(std::vector< std::vector< cv::Point2f > >& vv_pt, cv::Mat& mat);\r
void vector_vector_Point_to_Mat(std::vector< std::vector< cv::Point > >& vv_pt, cv::Mat& mat);\r
\r
+void Mat_to_vector_vector_Point3f(cv::Mat& mat, std::vector< std::vector< cv::Point3f > >& vv_pt);\r
+void vector_vector_Point3f_to_Mat(std::vector< std::vector< cv::Point3f > >& vv_pt, cv::Mat& mat);\r
--- /dev/null
+#pragma once\r
+\r
+#include "opencv2/core/core.hpp"\r
+\r
+#if 0\r
+\r
+namespace cv\r
+{\r
+CV_EXPORTS_W void add(InputArray src1, Scalar src2, OutputArray dst, InputArray mask=noArray(), int dtype=-1);\r
+\r
+CV_EXPORTS_W void subtract(InputArray src1, Scalar src2, OutputArray dst, InputArray mask=noArray(), int dtype=-1);\r
+\r
+CV_EXPORTS_W void multiply(InputArray src1, Scalar src2, OutputArray dst, double scale=1, int dtype=-1);\r
+ \r
+CV_EXPORTS_W void divide(InputArray src1, Scalar src2, OutputArray dst, double scale=1, int dtype=-1);\r
+\r
+CV_EXPORTS_W void absdiff(InputArray src1, Scalar src2, OutputArray dst);\r
+\r
+CV_EXPORTS_W void compare(InputArray src1, Scalar src2, OutputArray dst, int cmpop);\r
+\r
+CV_EXPORTS_W void min(InputArray src1, Scalar src2, OutputArray dst);\r
+\r
+CV_EXPORTS_W void max(InputArray src1, Scalar src2, OutputArray dst);\r
+\r
+}\r
+#endif //0\r
//do nothing
}
-} // extern "C"
\ No newline at end of file
+} // extern "C"
+
+#include "opencv2/opencv_modules.hpp"
+
+#if HAVE_OPENCV_MODULES_NONFREE
+#include "opencv2/nonfree/nonfree.hpp"
+static bool makeUseOfNonfree = initModule_nonfree();
+#endif
+
+#if HAVE_OPENCV_MODULES_FEATURES2D
+#include "opencv2/features2d/features2d.hpp"
+static bool makeUseOfNonfree = initModule_features2d();
+#endif
\ No newline at end of file
}\r
\r
//\r
+ // C++: Mat Mat::setTo(Scalar value, Mat mask = Mat())\r
+ //\r
+\r
+ // javadoc: Mat::setTo(value, mask)\r
+ public Mat setTo(Scalar value, Mat mask)\r
+ {\r
+\r
+ Mat retVal = new Mat(n_setTo(nativeObj, value.val[0], value.val[1], value.val[2], value.val[3], mask.nativeObj));\r
+\r
+ return retVal;\r
+ }\r
+\r
+ //\r
// C++: Mat Mat::setTo(Mat value, Mat mask = Mat())\r
//\r
\r
// C++: Mat Mat::operator =(Scalar s)\r
private static native long n_setTo(long nativeObj, double s_val0, double s_val1, double s_val2, double s_val3);\r
\r
+ // C++: Mat Mat::setTo(Scalar value, Mat mask = Mat())\r
+ private static native long n_setTo(long nativeObj, double s_val0, double s_val1, double s_val2, double s_val3, long mask_nativeObj);\r
+\r
// C++: Mat Mat::setTo(Mat value, Mat mask = Mat())\r
private static native long n_setTo(long nativeObj, long value_nativeObj, long mask_nativeObj);\r
\r
--- /dev/null
+package org.opencv.core;\r
+\r
+import java.util.Arrays;\r
+import java.util.List;\r
+\r
+public class MatOfFloat4 extends Mat {\r
+ // 32FC4\r
+ private static final int _depth = CvType.CV_32F;\r
+ private static final int _channels = 4;\r
+\r
+ public MatOfFloat4() {\r
+ super();\r
+ }\r
+\r
+ protected MatOfFloat4(long addr) {\r
+ super(addr);\r
+ if(checkVector(_channels, _depth) < 0 )\r
+ throw new IllegalArgumentException("Incomatible Mat");\r
+ //FIXME: do we need release() here?\r
+ }\r
+ \r
+ public static MatOfFloat4 fromNativeAddr(long addr) {\r
+ return new MatOfFloat4(addr);\r
+ }\r
+\r
+ public MatOfFloat4(Mat m) {\r
+ super(m, Range.all());\r
+ if(checkVector(_channels, _depth) < 0 )\r
+ throw new IllegalArgumentException("Incomatible Mat");\r
+ //FIXME: do we need release() here?\r
+ }\r
+\r
+ public MatOfFloat4(float...a) {\r
+ super();\r
+ fromArray(a);\r
+ }\r
+\r
+ public void alloc(int elemNumber) {\r
+ if(elemNumber>0)\r
+ super.create(elemNumber, 1, CvType.makeType(_depth, _channels));\r
+ }\r
+\r
+ public void fromArray(float...a) {\r
+ if(a==null || a.length==0)\r
+ return;\r
+ int num = a.length / _channels;\r
+ alloc(num);\r
+ put(0, 0, a); //TODO: check ret val!\r
+ }\r
+\r
+ public float[] toArray() {\r
+ int num = checkVector(_channels, _depth);\r
+ if(num < 0)\r
+ throw new RuntimeException("Native Mat has unexpected type or size: " + toString());\r
+ float[] a = new float[num * _channels];\r
+ if(num == 0)\r
+ return a;\r
+ get(0, 0, a); //TODO: check ret val!\r
+ return a;\r
+ }\r
+\r
+ public void fromList(List<Float> lb) {\r
+ if(lb==null || lb.size()==0)\r
+ return;\r
+ Float ab[] = lb.toArray(new Float[0]);\r
+ float a[] = new float[ab.length];\r
+ for(int i=0; i<ab.length; i++)\r
+ a[i] = ab[i];\r
+ fromArray(a);\r
+ }\r
+\r
+ public List<Float> toList() {\r
+ float[] a = toArray();\r
+ Float ab[] = new Float[a.length];\r
+ for(int i=0; i<a.length; i++)\r
+ ab[i] = a[i];\r
+ return Arrays.asList(ab);\r
+ }\r
+}\r
--- /dev/null
+package org.opencv.core;\r
+\r
+import java.util.Arrays;\r
+import java.util.List;\r
+\r
+public class MatOfFloat6 extends Mat {\r
+ // 32FC6\r
+ private static final int _depth = CvType.CV_32F;\r
+ private static final int _channels = 6;\r
+\r
+ public MatOfFloat6() {\r
+ super();\r
+ }\r
+\r
+ protected MatOfFloat6(long addr) {\r
+ super(addr);\r
+ if(checkVector(_channels, _depth) < 0 )\r
+ throw new IllegalArgumentException("Incomatible Mat");\r
+ //FIXME: do we need release() here?\r
+ }\r
+ \r
+ public static MatOfFloat6 fromNativeAddr(long addr) {\r
+ return new MatOfFloat6(addr);\r
+ }\r
+\r
+ public MatOfFloat6(Mat m) {\r
+ super(m, Range.all());\r
+ if(checkVector(_channels, _depth) < 0 )\r
+ throw new IllegalArgumentException("Incomatible Mat");\r
+ //FIXME: do we need release() here?\r
+ }\r
+\r
+ public MatOfFloat6(float...a) {\r
+ super();\r
+ fromArray(a);\r
+ }\r
+\r
+ public void alloc(int elemNumber) {\r
+ if(elemNumber>0)\r
+ super.create(elemNumber, 1, CvType.makeType(_depth, _channels));\r
+ }\r
+\r
+ public void fromArray(float...a) {\r
+ if(a==null || a.length==0)\r
+ return;\r
+ int num = a.length / _channels;\r
+ alloc(num);\r
+ put(0, 0, a); //TODO: check ret val!\r
+ }\r
+\r
+ public float[] toArray() {\r
+ int num = checkVector(_channels, _depth);\r
+ if(num < 0)\r
+ throw new RuntimeException("Native Mat has unexpected type or size: " + toString());\r
+ float[] a = new float[num * _channels];\r
+ if(num == 0)\r
+ return a;\r
+ get(0, 0, a); //TODO: check ret val!\r
+ return a;\r
+ }\r
+\r
+ public void fromList(List<Float> lb) {\r
+ if(lb==null || lb.size()==0)\r
+ return;\r
+ Float ab[] = lb.toArray(new Float[0]);\r
+ float a[] = new float[ab.length];\r
+ for(int i=0; i<ab.length; i++)\r
+ a[i] = ab[i];\r
+ fromArray(a);\r
+ }\r
+\r
+ public List<Float> toList() {\r
+ float[] a = toArray();\r
+ Float ab[] = new Float[a.length];\r
+ for(int i=0; i<a.length; i++)\r
+ ab[i] = a[i];\r
+ return Arrays.asList(ab);\r
+ }\r
+}\r
--- /dev/null
+package org.opencv.core;\r
+\r
+import java.util.Arrays;\r
+import java.util.List;\r
+\r
+\r
+public class MatOfInt4 extends Mat {\r
+ // 32SC4\r
+ private static final int _depth = CvType.CV_32S;\r
+ private static final int _channels = 4;\r
+\r
+ public MatOfInt4() {\r
+ super();\r
+ }\r
+\r
+ protected MatOfInt4(long addr) {\r
+ super(addr);\r
+ if(checkVector(_channels, _depth) < 0 )\r
+ throw new IllegalArgumentException("Incomatible Mat");\r
+ //FIXME: do we need release() here?\r
+ }\r
+\r
+ public static MatOfInt4 fromNativeAddr(long addr) {\r
+ return new MatOfInt4(addr);\r
+ }\r
+\r
+ public MatOfInt4(Mat m) {\r
+ super(m, Range.all());\r
+ if(checkVector(_channels, _depth) < 0 )\r
+ throw new IllegalArgumentException("Incomatible Mat");\r
+ //FIXME: do we need release() here?\r
+ }\r
+\r
+ public MatOfInt4(int...a) {\r
+ super();\r
+ fromArray(a);\r
+ }\r
+\r
+ public void alloc(int elemNumber) {\r
+ if(elemNumber>0)\r
+ super.create(elemNumber, 1, CvType.makeType(_depth, _channels));\r
+ }\r
+\r
+ public void fromArray(int...a) {\r
+ if(a==null || a.length==0)\r
+ return;\r
+ int num = a.length / _channels;\r
+ alloc(num);\r
+ put(0, 0, a); //TODO: check ret val!\r
+ }\r
+\r
+ public int[] toArray() {\r
+ int num = checkVector(_channels, _depth);\r
+ if(num < 0)\r
+ throw new RuntimeException("Native Mat has unexpected type or size: " + toString());\r
+ int[] a = new int[num * _channels];\r
+ if(num == 0)\r
+ return a;\r
+ get(0, 0, a); //TODO: check ret val!\r
+ return a;\r
+ }\r
+\r
+ public void fromList(List<Integer> lb) {\r
+ if(lb==null || lb.size()==0)\r
+ return;\r
+ Integer ab[] = lb.toArray(new Integer[0]);\r
+ int a[] = new int[ab.length];\r
+ for(int i=0; i<ab.length; i++)\r
+ a[i] = ab[i];\r
+ fromArray(a);\r
+ }\r
+\r
+ public List<Integer> toList() {\r
+ int[] a = toArray();\r
+ Integer ab[] = new Integer[a.length];\r
+ for(int i=0; i<a.length; i++)\r
+ ab[i] = a[i];\r
+ return Arrays.asList(ab);\r
+ }\r
+}\r
import org.opencv.core.MatOfKeyPoint;\r
import org.opencv.core.MatOfPoint;\r
import org.opencv.core.MatOfPoint2f;\r
+import org.opencv.core.MatOfPoint3f;\r
import org.opencv.core.Point;\r
import org.opencv.core.Point3;\r
import org.opencv.core.Rect;\r
}\r
}\r
\r
+ // vector_vector_Point2f\r
+ public static Mat vector_vector_Point2f_to_Mat(List<MatOfPoint2f> pts, List<Mat> mats) {\r
+ Mat res;\r
+ int lCount = (pts != null) ? pts.size() : 0;\r
+ if (lCount > 0) {\r
+ for (MatOfPoint2f vpt : pts)\r
+ mats.add(vpt);\r
+ res = vector_Mat_to_Mat(mats);\r
+ } else {\r
+ res = new Mat();\r
+ }\r
+ return res;\r
+ }\r
+\r
+ // vector_vector_Point3f\r
+ public static void Mat_to_vector_vector_Point3f(Mat m, List<MatOfPoint3f> pts) {\r
+ if (pts == null)\r
+ throw new java.lang.IllegalArgumentException("Output List can't be null");\r
+\r
+ if (m == null)\r
+ throw new java.lang.IllegalArgumentException("Input Mat can't be null");\r
+\r
+ List<Mat> mats = new ArrayList<Mat>(m.rows());\r
+ Mat_to_vector_Mat(m, mats);\r
+ for (Mat mi : mats) {\r
+ MatOfPoint3f pt = new MatOfPoint3f(mi);\r
+ pts.add(pt);\r
+ }\r
+ }\r
+\r
+ // vector_vector_Point3f\r
+ public static Mat vector_vector_Point3f_to_Mat(List<MatOfPoint3f> pts, List<Mat> mats) {\r
+ Mat res;\r
+ int lCount = (pts != null) ? pts.size() : 0;\r
+ if (lCount > 0) {\r
+ for (MatOfPoint3f vpt : pts)\r
+ mats.add(vpt);\r
+ res = vector_Mat_to_Mat(mats);\r
+ } else {\r
+ res = new Mat();\r
+ }\r
+ return res;\r
+ }\r
+\r
// vector_vector_KeyPoint\r
public static Mat vector_vector_KeyPoint_to_Mat(List<MatOfKeyPoint> kps, List<Mat> mats) {\r
Mat res;\r
--- /dev/null
+Common Interfaces of Descriptor Extractors
+==========================================
+
+.. highlight:: cpp
+
+Extractors of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to easily switch
+between different algorithms solving the same problem. This section is devoted to computing descriptors
+represented as vectors in a multidimensional space. All objects that implement the ``vector``
+descriptor extractors inherit the
+:ocv:class:`DescriptorExtractor` interface.
+
+
+
+CalonderDescriptorExtractor
+---------------------------
+.. ocv:class:: CalonderDescriptorExtractor
+
+Wrapping class for computing descriptors by using the
+:ocv:class:`RTreeClassifier` class. ::
+
+ template<typename T>
+ class CalonderDescriptorExtractor : public DescriptorExtractor
+ {
+ public:
+ CalonderDescriptorExtractor( const string& classifierFile );
+
+ virtual void read( const FileNode &fn );
+ virtual void write( FileStorage &fs ) const;
+ virtual int descriptorSize() const;
+ virtual int descriptorType() const;
+ protected:
+ ...
+ }
\ No newline at end of file
--- /dev/null
+Common Interfaces of Generic Descriptor Matchers
+================================================
+
+.. highlight:: cpp
+
+OneWayDescriptorMatcher
+-----------------------
+.. ocv:class:: OneWayDescriptorMatcher
+
+Wrapping class for computing, matching, and classifying descriptors using the
+:ocv:class:`OneWayDescriptorBase` class. ::
+
+ class OneWayDescriptorMatcher : public GenericDescriptorMatcher
+ {
+ public:
+ class Params
+ {
+ public:
+ static const int POSE_COUNT = 500;
+ static const int PATCH_WIDTH = 24;
+ static const int PATCH_HEIGHT = 24;
+ static float GET_MIN_SCALE() { return 0.7f; }
+ static float GET_MAX_SCALE() { return 1.5f; }
+ static float GET_STEP_SCALE() { return 1.2f; }
+
+ Params( int poseCount = POSE_COUNT,
+ Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT),
+ string pcaFilename = string(),
+ string trainPath = string(), string trainImagesList = string(),
+ float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(),
+ float stepScale = GET_STEP_SCALE() );
+
+ int poseCount;
+ Size patchSize;
+ string pcaFilename;
+ string trainPath;
+ string trainImagesList;
+
+ float minScale, maxScale, stepScale;
+ };
+
+ OneWayDescriptorMatcher( const Params& params=Params() );
+ virtual ~OneWayDescriptorMatcher();
+
+ void initialize( const Params& params, const Ptr<OneWayDescriptorBase>& base=Ptr<OneWayDescriptorBase>() );
+
+ // Clears keypoints stored in collection and OneWayDescriptorBase
+ virtual void clear();
+
+ virtual void train();
+
+ virtual bool isMaskSupported();
+
+ virtual void read( const FileNode &fn );
+ virtual void write( FileStorage& fs ) const;
+
+ virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
+ protected:
+ ...
+ };
+
+
+
+
+FernDescriptorMatcher
+---------------------
+.. ocv:class:: FernDescriptorMatcher
+
+Wrapping class for computing, matching, and classifying descriptors using the
+:ocv:class:`FernClassifier` class. ::
+
+ class FernDescriptorMatcher : public GenericDescriptorMatcher
+ {
+ public:
+ class Params
+ {
+ public:
+ Params( int nclasses=0,
+ int patchSize=FernClassifier::PATCH_SIZE,
+ int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE,
+ int nstructs=FernClassifier::DEFAULT_STRUCTS,
+ int structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
+ int nviews=FernClassifier::DEFAULT_VIEWS,
+ int compressionMethod=FernClassifier::COMPRESSION_NONE,
+ const PatchGenerator& patchGenerator=PatchGenerator() );
+
+ Params( const string& filename );
+
+ int nclasses;
+ int patchSize;
+ int signatureSize;
+ int nstructs;
+ int structSize;
+ int nviews;
+ int compressionMethod;
+ PatchGenerator patchGenerator;
+
+ string filename;
+ };
+
+ FernDescriptorMatcher( const Params& params=Params() );
+ virtual ~FernDescriptorMatcher();
+
+ virtual void clear();
+
+ virtual void train();
+
+ virtual bool isMaskSupported();
+
+ virtual void read( const FileNode &fn );
+ virtual void write( FileStorage& fs ) const;
+
+ virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
+
+ protected:
+ ...
+ };
+
--- /dev/null
+Expectation Maximization
+========================
+
+This section describes obsolete ``C`` interface of EM algorithm. Details of the algorithm and its ``C++`` interface can be found in the other section :ref:`ML_Expectation Maximization`.
+
+.. highlight:: cpp
+
+
+CvEMParams
+----------
+.. ocv:class:: CvEMParams
+
+Parameters of the EM algorithm. All parameters are public. You can initialize them by a constructor and then override some of them directly if you want.
+
+CvEMParams::CvEMParams
+----------------------
+The constructors
+
+.. ocv:function:: CvEMParams::CvEMParams()
+
+.. ocv:function:: CvEMParams::CvEMParams( int nclusters, int cov_mat_type=CvEM::COV_MAT_DIAGONAL, int start_step=CvEM::START_AUTO_STEP, CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 )
+
+ :param nclusters: The number of mixture components in the Gaussian mixture model. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet.
+
+ :param cov_mat_type: Constraint on covariance matrices which defines type of matrices. Possible values are:
+
+ * **CvEM::COV_MAT_SPHERICAL** A scaled identity matrix :math:`\mu_k * I`. There is the only parameter :math:`\mu_k` to be estimated for each matrix. The option may be used in special cases, when the constraint is relevant, or as a first step in the optimization (for example in case when the data is preprocessed with PCA). The results of such preliminary estimation may be passed again to the optimization procedure, this time with ``cov_mat_type=CvEM::COV_MAT_DIAGONAL``.
+
+ * **CvEM::COV_MAT_DIAGONAL** A diagonal matrix with positive diagonal elements. The number of free parameters is ``d`` for each matrix. This is most commonly used option yielding good estimation results.
+
+ * **CvEM::COV_MAT_GENERIC** A symmetric positively defined matrix. The number of free parameters in each matrix is about :math:`d^2/2`. It is not recommended to use this option, unless there is pretty accurate initial estimation of the parameters and/or a huge number of training samples.
+
+ :param start_step: The start step of the EM algorithm:
+
+ * **CvEM::START_E_STEP** Start with Expectation step. You need to provide means :math:`a_k` of mixture components to use this option. Optionally you can pass weights :math:`\pi_k` and covariance matrices :math:`S_k` of mixture components.
+ * **CvEM::START_M_STEP** Start with Maximization step. You need to provide initial probabilities :math:`p_{i,k}` to use this option.
+ * **CvEM::START_AUTO_STEP** Start with Expectation step. You need not provide any parameters because they will be estimated by the kmeans algorithm.
+
+ :param term_crit: The termination criteria of the EM algorithm. The EM algorithm can be terminated by the number of iterations ``term_crit.max_iter`` (number of M-steps) or when relative change of likelihood logarithm is less than ``term_crit.epsilon``.
+
+ :param probs: Initial probabilities :math:`p_{i,k}` of sample :math:`i` to belong to mixture component :math:`k`. It is a floating-point matrix of :math:`nsamples \times nclusters` size. It is used and must be not NULL only when ``start_step=CvEM::START_M_STEP``.
+
+ :param weights: Initial weights :math:`\pi_k` of mixture components. It is a floating-point vector with :math:`nclusters` elements. It is used (if not NULL) only when ``start_step=CvEM::START_E_STEP``.
+
+ :param means: Initial means :math:`a_k` of mixture components. It is a floating-point matrix of :math:`nclusters \times dims` size. It is used used and must be not NULL only when ``start_step=CvEM::START_E_STEP``.
+
+ :param covs: Initial covariance matrices :math:`S_k` of mixture components. Each of covariance matrices is a valid square floating-point matrix of :math:`dims \times dims` size. It is used (if not NULL) only when ``start_step=CvEM::START_E_STEP``.
+
+The default constructor represents a rough rule-of-the-thumb:
+
+::
+
+ CvEMParams() : nclusters(10), cov_mat_type(1/*CvEM::COV_MAT_DIAGONAL*/),
+ start_step(0/*CvEM::START_AUTO_STEP*/), probs(0), weights(0), means(0), covs(0)
+ {
+ term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON );
+ }
+
+
+With another constructor it is possible to override a variety of parameters from a single number of mixtures (the only essential problem-dependent parameter) to initial values for the mixture parameters.
+
+
+CvEM
+----
+.. ocv:class:: CvEM
+
+ The class implements the EM algorithm as described in the beginning of the section :ref:`ML_Expectation Maximization`.
+
+
+CvEM::train
+-----------
+Estimates the Gaussian mixture parameters from a sample set.
+
+.. ocv:function:: void CvEM::train( const Mat& samples, const Mat& sample_idx=Mat(), CvEMParams params=CvEMParams(), Mat* labels=0 )
+
+.. ocv:function:: bool CvEM::train( const CvMat* samples, const CvMat* sampleIdx=0, CvEMParams params=CvEMParams(), CvMat* labels=0 )
+
+.. ocv:pyfunction:: cv2.EM.train(samples[, sampleIdx[, params]]) -> retval, labels
+
+ :param samples: Samples from which the Gaussian mixture model will be estimated.
+
+ :param sample_idx: Mask of samples to use. All samples are used by default.
+
+ :param params: Parameters of the EM algorithm.
+
+ :param labels: The optional output "class label" for each sample: :math:`\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N` (indices of the most probable mixture component for each sample).
+
+Unlike many of the ML models, EM is an unsupervised learning algorithm and it does not take responses (class labels or function values) as input. Instead, it computes the
+*Maximum Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the parameters inside the structure:
+:math:`p_{i,k}` in ``probs``,
+:math:`a_k` in ``means`` ,
+:math:`S_k` in ``covs[k]``,
+:math:`\pi_k` in ``weights`` , and optionally computes the output "class label" for each sample:
+:math:`\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N` (indices of the most probable mixture component for each sample).
+
+The trained model can be used further for prediction, just like any other classifier. The trained model is similar to the
+:ocv:class:`CvNormalBayesClassifier`.
+
+For an example of clustering random samples of the multi-Gaussian distribution using EM, see ``em.cpp`` sample in the OpenCV distribution.
+
+
+CvEM::predict
+-------------
+Returns a mixture component index of a sample.
+
+.. ocv:function:: float CvEM::predict( const Mat& sample, Mat* probs=0 ) const
+
+.. ocv:function:: float CvEM::predict( const CvMat* sample, CvMat* probs ) const
+
+.. ocv:pyfunction:: cv2.EM.predict(sample) -> retval, probs
+
+ :param sample: A sample for classification.
+
+ :param probs: If it is not null then the method will write posterior probabilities of each component given the sample data to this parameter.
+
+
+CvEM::getNClusters
+------------------
+Returns the number of mixture components :math:`M` in the Gaussian mixture model.
+
+.. ocv:function:: int CvEM::getNClusters() const
+
+.. ocv:function:: int CvEM::get_nclusters() const
+
+.. ocv:pyfunction:: cv2.EM.getNClusters() -> retval
+
+
+CvEM::getMeans
+------------------
+Returns mixture means :math:`a_k`.
+
+.. ocv:function:: Mat CvEM::getMeans() const
+
+.. ocv:function:: const CvMat* CvEM::get_means() const
+
+.. ocv:pyfunction:: cv2.EM.getMeans() -> means
+
+
+CvEM::getCovs
+-------------
+Returns mixture covariance matrices :math:`S_k`.
+
+.. ocv:function:: void CvEM::getCovs(std::vector<cv::Mat>& covs) const
+
+.. ocv:function:: const CvMat** CvEM::get_covs() const
+
+.. ocv:pyfunction:: cv2.EM.getCovs([covs]) -> covs
+
+
+CvEM::getWeights
+----------------
+Returns mixture weights :math:`\pi_k`.
+
+.. ocv:function:: Mat CvEM::getWeights() const
+
+.. ocv:function:: const CvMat* CvEM::get_weights() const
+
+.. ocv:pyfunction:: cv2.EM.getWeights() -> weights
+
+
+CvEM::getProbs
+--------------
+Returns vectors of probabilities for each training sample.
+
+.. ocv:function:: Mat CvEM::getProbs() const
+
+.. ocv:function:: const CvMat* CvEM::get_probs() const
+
+.. ocv:pyfunction:: cv2.EM.getProbs() -> probs
+
+For each training sample :math:`i` (that have been passed to the constructor or to :ocv:func:`CvEM::train`) returns probabilities :math:`p_{i,k}` to belong to a mixture component :math:`k`.
+
+
+CvEM::getLikelihood
+-------------------
+Returns logarithm of likelihood.
+
+.. ocv:function:: double CvEM::getLikelihood() const
+
+.. ocv:function:: double CvEM::get_log_likelihood() const
+
+.. ocv:pyfunction:: cv2.EM.getLikelihood() -> likelihood
+
+
+CvEM::write
+-----------
+Writes the trained Gaussian mixture model to the file storage.
+
+.. ocv:function:: void CvEM::write( CvFileStorage* fs, const char* name ) const
+
+ :param fs: A file storage where the model will be written.
+ :param name: A name of the file node where the model data will be written.
+
+
+CvEM::read
+-----------------
+Reads the trained Gaussian mixture model from the file storage.
+
+.. ocv:function:: void CvEM::read( CvFileStorage* fs, CvFileNode* node )
+
+ :param fs: A file storage with the trained model.
+
+ :param node: The parent map. If it is NULL, the function searches a node with parameters in all the top-level nodes (streams), starting with the first one.
+
--- /dev/null
+Feature Detection and Description
+=================================
+
+.. highlight:: cpp
+
+RandomizedTree
+--------------
+.. ocv:class:: RandomizedTree
+
+Class containing a base structure for ``RTreeClassifier``. ::
+
+ class CV_EXPORTS RandomizedTree
+ {
+ public:
+ friend class RTreeClassifier;
+
+ RandomizedTree();
+ ~RandomizedTree();
+
+ void train(std::vector<BaseKeypoint> const& base_set,
+ RNG &rng, int depth, int views,
+ size_t reduced_num_dim, int num_quant_bits);
+ void train(std::vector<BaseKeypoint> const& base_set,
+ RNG &rng, PatchGenerator &make_patch, int depth,
+ int views, size_t reduced_num_dim, int num_quant_bits);
+
+ // next two functions are EXPERIMENTAL
+ //(do not use unless you know exactly what you do)
+ static void quantizeVector(float *vec, int dim, int N, float bnds[2],
+ int clamp_mode=0);
+ static void quantizeVector(float *src, int dim, int N, float bnds[2],
+ uchar *dst);
+
+ // patch_data must be a 32x32 array (no row padding)
+ float* getPosterior(uchar* patch_data);
+ const float* getPosterior(uchar* patch_data) const;
+ uchar* getPosterior2(uchar* patch_data);
+
+ void read(const char* file_name, int num_quant_bits);
+ void read(std::istream &is, int num_quant_bits);
+ void write(const char* file_name) const;
+ void write(std::ostream &os) const;
+
+ int classes() { return classes_; }
+ int depth() { return depth_; }
+
+ void discardFloatPosteriors() { freePosteriors(1); }
+
+ inline void applyQuantization(int num_quant_bits)
+ { makePosteriors2(num_quant_bits); }
+
+ private:
+ int classes_;
+ int depth_;
+ int num_leaves_;
+ std::vector<RTreeNode> nodes_;
+ float **posteriors_; // 16-byte aligned posteriors
+ uchar **posteriors2_; // 16-byte aligned posteriors
+ std::vector<int> leaf_counts_;
+
+ void createNodes(int num_nodes, RNG &rng);
+ void allocPosteriorsAligned(int num_leaves, int num_classes);
+ void freePosteriors(int which);
+ // which: 1=posteriors_, 2=posteriors2_, 3=both
+ void init(int classes, int depth, RNG &rng);
+ void addExample(int class_id, uchar* patch_data);
+ void finalize(size_t reduced_num_dim, int num_quant_bits);
+ int getIndex(uchar* patch_data) const;
+ inline float* getPosteriorByIndex(int index);
+ inline uchar* getPosteriorByIndex2(int index);
+ inline const float* getPosteriorByIndex(int index) const;
+ void convertPosteriorsToChar();
+ void makePosteriors2(int num_quant_bits);
+ void compressLeaves(size_t reduced_num_dim);
+ void estimateQuantPercForPosteriors(float perc[2]);
+ };
+
+
+
+RandomizedTree::train
+-------------------------
+Trains a randomized tree using an input set of keypoints.
+
+.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
+
+.. ocv:function:: void train(std::vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int depth, int views, size_t reduced_num_dim, int num_quant_bits)
+
+ :param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
+
+ :param rng: Random-number generator used for training.
+
+ :param make_patch: Patch generator used for training.
+
+ :param depth: Maximum tree depth.
+
+ :param views: Number of random views of each keypoint neighborhood to generate.
+
+ :param reduced_num_dim: Number of dimensions used in the compressed signature.
+
+ :param num_quant_bits: Number of bits used for quantization.
+
+
+
+RandomizedTree::read
+------------------------
+Reads a pre-saved randomized tree from a file or stream.
+
+.. ocv:function:: read(const char* file_name, int num_quant_bits)
+
+.. ocv:function:: read(std::istream &is, int num_quant_bits)
+
+ :param file_name: Name of the file that contains randomized tree data.
+
+ :param is: Input stream associated with the file that contains randomized tree data.
+
+ :param num_quant_bits: Number of bits used for quantization.
+
+
+
+RandomizedTree::write
+-------------------------
+Writes the current randomized tree to a file or stream.
+
+.. ocv:function:: void write(const char* file_name) const
+
+.. ocv:function:: void write(std::ostream &os) const
+
+ :param file_name: Name of the file where randomized tree data is stored.
+
+ :param os: Output stream associated with the file where randomized tree data is stored.
+
+
+
+RandomizedTree::applyQuantization
+-------------------------------------
+.. ocv:function:: void applyQuantization(int num_quant_bits)
+
+ Applies quantization to the current randomized tree.
+
+ :param num_quant_bits: Number of bits used for quantization.
+
+
+RTreeNode
+---------
+.. ocv:class:: RTreeNode
+
+Class containing a base structure for ``RandomizedTree``. ::
+
+ struct RTreeNode
+ {
+ short offset1, offset2;
+
+ RTreeNode() {}
+
+ RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2)
+ : offset1(y1*PATCH_SIZE + x1),
+ offset2(y2*PATCH_SIZE + x2)
+ {}
+
+ //! Left child on 0, right child on 1
+ inline bool operator() (uchar* patch_data) const
+ {
+ return patch_data[offset1] > patch_data[offset2];
+ }
+ };
+
+
+
+RTreeClassifier
+---------------
+.. ocv:class:: RTreeClassifier
+
+Class containing ``RTreeClassifier``. It represents the Calonder descriptor originally introduced by Michael Calonder. ::
+
+ class CV_EXPORTS RTreeClassifier
+ {
+ public:
+ static const int DEFAULT_TREES = 48;
+ static const size_t DEFAULT_NUM_QUANT_BITS = 4;
+
+ RTreeClassifier();
+
+ void train(std::vector<BaseKeypoint> const& base_set,
+ RNG &rng,
+ int num_trees = RTreeClassifier::DEFAULT_TREES,
+ int depth = DEFAULT_DEPTH,
+ int views = DEFAULT_VIEWS,
+ size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
+ int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
+ bool print_status = true);
+ void train(std::vector<BaseKeypoint> const& base_set,
+ RNG &rng,
+ PatchGenerator &make_patch,
+ int num_trees = RTreeClassifier::DEFAULT_TREES,
+ int depth = DEFAULT_DEPTH,
+ int views = DEFAULT_VIEWS,
+ size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM,
+ int num_quant_bits = DEFAULT_NUM_QUANT_BITS,
+ bool print_status = true);
+
+ // sig must point to a memory block of at least
+ //classes()*sizeof(float|uchar) bytes
+ void getSignature(IplImage *patch, uchar *sig);
+ void getSignature(IplImage *patch, float *sig);
+ void getSparseSignature(IplImage *patch, float *sig,
+ float thresh);
+
+ static int countNonZeroElements(float *vec, int n, double tol=1e-10);
+ static inline void safeSignatureAlloc(uchar **sig, int num_sig=1,
+ int sig_len=176);
+ static inline uchar* safeSignatureAlloc(int num_sig=1,
+ int sig_len=176);
+
+ inline int classes() { return classes_; }
+ inline int original_num_classes()
+ { return original_num_classes_; }
+
+ void setQuantization(int num_quant_bits);
+ void discardFloatPosteriors();
+
+ void read(const char* file_name);
+ void read(std::istream &is);
+ void write(const char* file_name) const;
+ void write(std::ostream &os) const;
+
+ std::vector<RandomizedTree> trees_;
+
+ private:
+ int classes_;
+ int num_quant_bits_;
+ uchar **posteriors_;
+ ushort *ptemp_;
+ int original_num_classes_;
+ bool keep_floats_;
+ };
+
+
+
+RTreeClassifier::train
+--------------------------
+Trains a randomized tree classifier using an input set of keypoints.
+
+.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
+
+.. ocv:function:: void train(vector<BaseKeypoint> const& base_set, RNG& rng, PatchGenerator& make_patch, int num_trees = RTreeClassifier::DEFAULT_TREES, int depth = DEFAULT_DEPTH, int views = DEFAULT_VIEWS, size_t reduced_num_dim = DEFAULT_REDUCED_NUM_DIM, int num_quant_bits = DEFAULT_NUM_QUANT_BITS, bool print_status = true)
+
+ :param base_set: Vector of the ``BaseKeypoint`` type. It contains image keypoints used for training.
+
+ :param rng: Random-number generator used for training.
+
+ :param make_patch: Patch generator used for training.
+
+ :param num_trees: Number of randomized trees used in ``RTreeClassificator`` .
+
+ :param depth: Maximum tree depth.
+
+ :param views: Number of random views of each keypoint neighborhood to generate.
+
+ :param reduced_num_dim: Number of dimensions used in the compressed signature.
+
+ :param num_quant_bits: Number of bits used for quantization.
+
+ :param print_status: Current status of training printed on the console.
+
+
+
+RTreeClassifier::getSignature
+---------------------------------
+Returns a signature for an image patch.
+
+.. ocv:function:: void getSignature(IplImage *patch, uchar *sig)
+
+.. ocv:function:: void getSignature(IplImage *patch, float *sig)
+
+ :param patch: Image patch to calculate the signature for.
+ :param sig: Output signature (array dimension is ``reduced_num_dim)`` .
+
+
+
+RTreeClassifier::getSparseSignature
+---------------------------------------
+Returns a sparse signature for an image patch
+
+.. ocv:function:: void getSparseSignature(IplImage *patch, float *sig, float thresh)
+
+ :param patch: Image patch to calculate the signature for.
+
+ :param sig: Output signature (array dimension is ``reduced_num_dim)`` .
+
+ :param thresh: Threshold used for compressing the signature.
+
+ Returns a signature for an image patch similarly to ``getSignature`` but uses a threshold for removing all signature elements below the threshold so that the signature is compressed.
+
+
+RTreeClassifier::countNonZeroElements
+-----------------------------------------
+Returns the number of non-zero elements in an input array.
+
+.. ocv:function:: static int countNonZeroElements(float *vec, int n, double tol=1e-10)
+
+ :param vec: Input vector containing float elements.
+
+ :param n: Input vector size.
+
+ :param tol: Threshold used for counting elements. All elements less than ``tol`` are considered as zero elements.
+
+
+
+RTreeClassifier::read
+-------------------------
+Reads a pre-saved ``RTreeClassifier`` from a file or stream.
+
+.. ocv:function:: read(const char* file_name)
+
+.. ocv:function:: read(std::istream& is)
+
+ :param file_name: Name of the file that contains randomized tree data.
+
+ :param is: Input stream associated with the file that contains randomized tree data.
+
+
+
+RTreeClassifier::write
+--------------------------
+Writes the current ``RTreeClassifier`` to a file or stream.
+
+.. ocv:function:: void write(const char* file_name) const
+
+.. ocv:function:: void write(std::ostream &os) const
+
+ :param file_name: Name of the file where randomized tree data is stored.
+
+ :param os: Output stream associated with the file where randomized tree data is stored.
+
+
+
+RTreeClassifier::setQuantization
+------------------------------------
+Applies quantization to the current randomized tree.
+
+.. ocv:function:: void setQuantization(int num_quant_bits)
+
+ :param num_quant_bits: Number of bits used for quantization.
+
+The example below demonstrates the usage of ``RTreeClassifier`` for matching the features. The features are extracted from the test and train images with SURF. Output is
+:math:`best\_corr` and
+:math:`best\_corr\_idx` arrays that keep the best probabilities and corresponding features indices for every train feature. ::
+
+ CvMemStorage* storage = cvCreateMemStorage(0);
+ CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
+ CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
+ CvSURFParams params = cvSURFParams(500, 1);
+ cvExtractSURF( test_image, 0, &imageKeypoints, &imageDescriptors,
+ storage, params );
+ cvExtractSURF( train_image, 0, &objectKeypoints, &objectDescriptors,
+ storage, params );
+
+ RTreeClassifier detector;
+ int patch_width = PATCH_SIZE;
+ iint patch_height = PATCH_SIZE;
+ vector<BaseKeypoint> base_set;
+ int i=0;
+ CvSURFPoint* point;
+ for (i=0;i<(n_points > 0 ? n_points : objectKeypoints->total);i++)
+ {
+ point=(CvSURFPoint*)cvGetSeqElem(objectKeypoints,i);
+ base_set.push_back(
+ BaseKeypoint(point->pt.x,point->pt.y,train_image));
+ }
+
+ //Detector training
+ RNG rng( cvGetTickCount() );
+ PatchGenerator gen(0,255,2,false,0.7,1.3,-CV_PI/3,CV_PI/3,
+ -CV_PI/3,CV_PI/3);
+
+ printf("RTree Classifier training...n");
+ detector.train(base_set,rng,gen,24,DEFAULT_DEPTH,2000,
+ (int)base_set.size(), detector.DEFAULT_NUM_QUANT_BITS);
+ printf("Donen");
+
+ float* signature = new float[detector.original_num_classes()];
+ float* best_corr;
+ int* best_corr_idx;
+ if (imageKeypoints->total > 0)
+ {
+ best_corr = new float[imageKeypoints->total];
+ best_corr_idx = new int[imageKeypoints->total];
+ }
+
+ for(i=0; i < imageKeypoints->total; i++)
+ {
+ point=(CvSURFPoint*)cvGetSeqElem(imageKeypoints,i);
+ int part_idx = -1;
+ float prob = 0.0f;
+
+ CvRect roi = cvRect((int)(point->pt.x) - patch_width/2,
+ (int)(point->pt.y) - patch_height/2,
+ patch_width, patch_height);
+ cvSetImageROI(test_image, roi);
+ roi = cvGetImageROI(test_image);
+ if(roi.width != patch_width || roi.height != patch_height)
+ {
+ best_corr_idx[i] = part_idx;
+ best_corr[i] = prob;
+ }
+ else
+ {
+ cvSetImageROI(test_image, roi);
+ IplImage* roi_image =
+ cvCreateImage(cvSize(roi.width, roi.height),
+ test_image->depth, test_image->nChannels);
+ cvCopy(test_image,roi_image);
+
+ detector.getSignature(roi_image, signature);
+ for (int j = 0; j< detector.original_num_classes();j++)
+ {
+ if (prob < signature[j])
+ {
+ part_idx = j;
+ prob = signature[j];
+ }
+ }
+
+ best_corr_idx[i] = part_idx;
+ best_corr[i] = prob;
+
+ if (roi_image)
+ cvReleaseImage(&roi_image);
+ }
+ cvResetImageROI(test_image);
+ }
+
+..
:maxdepth: 2
motion_analysis
+ expectation_maximization
+ planar_subdivisions
+ feature_detection_and_description
+ common_interfaces_of_descriptor_extractors
+ common_interfaces_of_generic_descriptor_matchers
--- /dev/null
+Planar Subdivisions (C API)
+============================
+
+.. highlight:: c
+
+CvSubdiv2D
+----------
+
+.. ocv:struct:: CvSubdiv2D
+
+Planar subdivision.
+
+::
+
+ #define CV_SUBDIV2D_FIELDS() \
+ CV_GRAPH_FIELDS() \
+ int quad_edges; \
+ int is_geometry_valid; \
+ CvSubdiv2DEdge recent_edge; \
+ CvPoint2D32f topleft; \
+ CvPoint2D32f bottomright;
+
+ typedef struct CvSubdiv2D
+ {
+ CV_SUBDIV2D_FIELDS()
+ }
+ CvSubdiv2D;
+
+..
+
+Planar subdivision is the subdivision of a plane into a set of
+non-overlapped regions (facets) that cover the whole plane. The above
+structure describes a subdivision built on a 2D point set, where the points
+are linked together and form a planar graph, which, together with a few
+edges connecting the exterior subdivision points (namely, convex hull points)
+with infinity, subdivides a plane into facets by its edges.
+
+For every subdivision, there is a dual subdivision in which facets and
+points (subdivision vertices) swap their roles. This means that a facet is
+treated as a vertex (called a virtual point below) of the dual subdivision and
+the original subdivision vertices become facets. In the figure below, the
+original subdivision is marked with solid lines and dual subdivision -
+with dotted lines.
+
+.. image:: pics/subdiv.png
+
+OpenCV subdivides a plane into triangles using the Delaunay's
+algorithm. Subdivision is built iteratively starting from a dummy
+triangle that includes all the subdivision points for sure. In this
+case, the dual subdivision is a Voronoi diagram of the input 2D point set. The
+subdivisions can be used for the 3D piece-wise transformation of a plane,
+morphing, fast location of points on the plane, building special graphs
+(such as NNG,RNG), and so forth.
+
+CvQuadEdge2D
+------------
+
+.. ocv:struct:: CvQuadEdge2D
+
+Quad-edge of a planar subdivision.
+
+::
+
+ /* one of edges within quad-edge, lower 2 bits is index (0..3)
+ and upper bits are quad-edge pointer */
+ typedef long CvSubdiv2DEdge;
+
+ /* quad-edge structure fields */
+ #define CV_QUADEDGE2D_FIELDS() \
+ int flags; \
+ struct CvSubdiv2DPoint* pt[4]; \
+ CvSubdiv2DEdge next[4];
+
+ typedef struct CvQuadEdge2D
+ {
+ CV_QUADEDGE2D_FIELDS()
+ }
+ CvQuadEdge2D;
+
+..
+
+Quad-edge is a basic element of a subdivision containing four edges (e, eRot, reversed e, and reversed eRot):
+
+.. image:: pics/quadedge.png
+
+CvSubdiv2DPoint
+---------------
+
+.. ocv:struct:: CvSubdiv2DPoint
+
+Point of an original or dual subdivision.
+
+::
+
+ #define CV_SUBDIV2D_POINT_FIELDS()\
+ int flags; \
+ CvSubdiv2DEdge first; \
+ CvPoint2D32f pt; \
+ int id;
+
+ #define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
+
+ typedef struct CvSubdiv2DPoint
+ {
+ CV_SUBDIV2D_POINT_FIELDS()
+ }
+ CvSubdiv2DPoint;
+
+..
+
+* id
+ This integer can be used to index auxiliary data associated with each vertex of the planar subdivision.
+
+CalcSubdivVoronoi2D
+-------------------
+Calculates the coordinates of the Voronoi diagram cells.
+
+.. ocv:cfunction:: void cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv )
+.. ocv:pyoldfunction:: cv.CalcSubdivVoronoi2D(subdiv)-> None
+
+ :param subdiv: Delaunay subdivision, in which all the points are already added.
+
+The function calculates the coordinates
+of virtual points. All virtual points corresponding to a vertex of the
+original subdivision form (when connected together) a boundary of the Voronoi
+cell at that point.
+
+ClearSubdivVoronoi2D
+--------------------
+Removes all virtual points.
+
+.. ocv:cfunction:: void cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv )
+.. ocv:pyoldfunction:: cv.ClearSubdivVoronoi2D(subdiv)-> None
+
+ :param subdiv: Delaunay subdivision.
+
+The function removes all of the virtual points. It
+is called internally in
+:ocv:cfunc:`CalcSubdivVoronoi2D`
+if the subdivision
+was modified after the previous call to the function.
+
+CreateSubdivDelaunay2D
+----------------------
+Creates an empty Delaunay triangulation.
+
+.. ocv:cfunction:: CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage )
+.. ocv:pyoldfunction:: cv.CreateSubdivDelaunay2D(rect, storage)-> emptyDelaunayTriangulation
+
+ :param rect: Rectangle that includes all of the 2D points that are to be added to the subdivision.
+
+ :param storage: Container for the subdivision.
+
+The function creates an empty Delaunay
+subdivision where 2D points can be added using the function
+:ocv:cfunc:`SubdivDelaunay2DInsert`
+. All of the points to be added must be within
+the specified rectangle, otherwise a runtime error is raised.
+
+Note that the triangulation is a single large triangle that covers the given rectangle. Hence the three vertices of this triangle are outside the rectangle
+``rect``
+.
+
+FindNearestPoint2D
+------------------
+Finds the subdivision vertex closest to the given point.
+
+.. ocv:cfunction:: CvSubdiv2DPoint* cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt )
+.. ocv:pyoldfunction:: cv.FindNearestPoint2D(subdiv, pt)-> point
+
+ :param subdiv: Delaunay or another subdivision.
+
+ :param pt: Input point.
+
+The function is another function that
+locates the input point within the subdivision. It finds the subdivision vertex that
+is the closest to the input point. It is not necessarily one of vertices
+of the facet containing the input point, though the facet (located using
+:ocv:cfunc:`Subdiv2DLocate`
+) is used as a starting
+point. The function returns a pointer to the found subdivision vertex.
+
+Subdiv2DEdgeDst
+---------------
+Returns the edge destination.
+
+.. ocv:cfunction:: CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge )
+.. ocv:pyoldfunction:: cv.Subdiv2DEdgeDst(edge)-> point
+
+ :param edge: Subdivision edge (not a quad-edge).
+
+The function returns the edge destination. The
+returned pointer may be NULL if the edge is from a dual subdivision and
+the virtual point coordinates are not calculated yet. The virtual points
+can be calculated using the function
+:ocv:cfunc:`CalcSubdivVoronoi2D`.
+
+Subdiv2DGetEdge
+---------------
+Returns one of the edges related to the given edge.
+
+.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type )
+.. ocv:pyoldfunction:: cv.Subdiv2DGetEdge(edge, type)-> CvSubdiv2DEdge
+
+ :param edge: Subdivision edge (not a quad-edge).
+
+ :param type: Parameter specifying which of the related edges to return. The following values are possible:
+
+ * **CV_NEXT_AROUND_ORG** next around the edge origin ( ``eOnext`` on the picture below if ``e`` is the input edge)
+
+ * **CV_NEXT_AROUND_DST** next around the edge vertex ( ``eDnext`` )
+
+ * **CV_PREV_AROUND_ORG** previous around the edge origin (reversed ``eRnext`` )
+
+ * **CV_PREV_AROUND_DST** previous around the edge destination (reversed ``eLnext`` )
+
+ * **CV_NEXT_AROUND_LEFT** next around the left facet ( ``eLnext`` )
+
+ * **CV_NEXT_AROUND_RIGHT** next around the right facet ( ``eRnext`` )
+
+ * **CV_PREV_AROUND_LEFT** previous around the left facet (reversed ``eOnext`` )
+
+ * **CV_PREV_AROUND_RIGHT** previous around the right facet (reversed ``eDnext`` )
+
+.. image:: pics/quadedge.png
+
+The function returns one of the edges related to the input edge.
+
+Subdiv2DNextEdge
+----------------
+Returns next edge around the edge origin.
+
+.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DNextEdge( CvSubdiv2DEdge edge )
+.. ocv:pyoldfunction:: cv.Subdiv2DNextEdge(edge)-> CvSubdiv2DEdge
+
+ :param edge: Subdivision edge (not a quad-edge).
+
+The function returns the next edge around the edge origin:
+``eOnext``
+on the picture above if
+``e``
+is the input edge).
+
+Subdiv2DLocate
+--------------
+Returns the location of a point within a Delaunay triangulation.
+
+.. ocv:cfunction:: CvSubdiv2DPointLocation cvSubdiv2DLocate( CvSubdiv2D* subdiv, CvPoint2D32f pt, CvSubdiv2DEdge* edge, CvSubdiv2DPoint** vertex=NULL )
+.. ocv:pyoldfunction:: cv.Subdiv2DLocate(subdiv, pt) -> (loc, where)
+
+ :param subdiv: Delaunay or another subdivision.
+
+ :param pt: Point to locate.
+
+ :param edge: Output edge that the point belongs to or is located to the right of it.
+
+ :param vertex: Optional output vertex double pointer the input point coincides with.
+
+The function locates the input point within the subdivision. There are five cases:
+
+*
+ The point falls into some facet. The function returns
+ ``CV_PTLOC_INSIDE``
+ and
+ ``*edge``
+ will contain one of edges of the facet.
+
+*
+ The point falls onto the edge. The function returns
+ ``CV_PTLOC_ON_EDGE``
+ and
+ ``*edge``
+ will contain this edge.
+
+*
+ The point coincides with one of the subdivision vertices. The function returns
+ ``CV_PTLOC_VERTEX``
+ and
+ ``*vertex``
+ will contain a pointer to the vertex.
+
+*
+ The point is outside the subdivision reference rectangle. The function returns
+ ``CV_PTLOC_OUTSIDE_RECT``
+ and no pointers are filled.
+
+*
+ One of input arguments is invalid. A runtime error is raised or, if silent or "parent" error processing mode is selected,
+ ``CV_PTLOC_ERROR``
+ is returnd.
+
+Subdiv2DRotateEdge
+------------------
+Returns another edge of the same quad-edge.
+
+.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate )
+.. ocv:pyoldfunction:: cv.Subdiv2DRotateEdge(edge, rotate)-> CvSubdiv2DEdge
+
+ :param edge: Subdivision edge (not a quad-edge).
+
+ :param rotate: Parameter specifying which of the edges of the same quad-edge as the input one to return. The following values are possible:
+
+ * **0** the input edge ( ``e`` on the picture below if ``e`` is the input edge)
+
+ * **1** the rotated edge ( ``eRot`` )
+
+ * **2** the reversed edge (reversed ``e`` (in green))
+
+ * **3** the reversed rotated edge (reversed ``eRot`` (in green))
+
+The function returns one of the edges of the same quad-edge as the input edge.
+
+SubdivDelaunay2DInsert
+----------------------
+Inserts a single point into a Delaunay triangulation.
+
+.. ocv:cfunction:: CvSubdiv2DPoint* cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt)
+.. ocv:pyoldfunction:: cv.SubdivDelaunay2DInsert(subdiv, pt)-> point
+
+ :param subdiv: Delaunay subdivision created by the function :ocv:cfunc:`CreateSubdivDelaunay2D`.
+
+ :param pt: Inserted point.
+
+The function inserts a single point into a subdivision and modifies the subdivision topology appropriately. If a point with the same coordinates exists already, no new point is added. The function returns a pointer to the allocated point. No virtual point coordinates are calculated at this stage.
+
CV_WRAP cv::Mat getWeights() const;
CV_WRAP cv::Mat getProbs() const;
- CV_WRAP inline double getLikelihood() const { return emObj.isTrained() ? likelihood : DBL_MAX; }
+ CV_WRAP inline double getLikelihood() const { return emObj.isTrained() ? logLikelihood : DBL_MAX; }
#endif
CV_WRAP virtual void clear();
cv::EM emObj;
cv::Mat probs;
- double likelihood;
+ double logLikelihood;
CvMat meansHdr;
std::vector<CvMat> covsHdrs;
return classifier_.trees_.empty();
}
+
+////////////////////// Brute Force Matcher //////////////////////////
+
+template<class Distance>
+class CV_EXPORTS BruteForceMatcher : public BFMatcher
+{
+public:
+ BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {}
+ virtual ~BruteForceMatcher() {}
+};
+
+
/****************************************************************************************\
* Planar Object Detection *
\****************************************************************************************/
CvGaussBGStatModelParams params;
CvGaussBGPoint* g_point;
int countFrames;
+ void* mog;
} CvGaussBGModel;
if( *bg_model )
{
- delete (cv::Mat*)((*bg_model)->g_point);
+ delete (cv::BackgroundSubtractorMOG*)((*bg_model)->mog);
cvReleaseImage( &(*bg_model)->background );
cvReleaseImage( &(*bg_model)->foreground );
- cvReleaseMemStorage(&(*bg_model)->storage);
memset( *bg_model, 0, sizeof(**bg_model) );
delete *bg_model;
*bg_model = 0;
static int CV_CDECL
icvUpdateGaussianBGModel( IplImage* curr_frame, CvGaussBGModel* bg_model, double learningRate )
{
- int region_count = 0;
-
cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground);
- cv::BackgroundSubtractorMOG mog;
- mog.bgmodel = *(cv::Mat*)bg_model->g_point;
- mog.frameSize = mog.bgmodel.data ? cv::Size(cvGetSize(curr_frame)) : cv::Size();
- mog.frameType = image.type();
-
- mog.nframes = bg_model->countFrames;
- mog.history = bg_model->params.win_size;
- mog.nmixtures = bg_model->params.n_gauss;
- mog.varThreshold = bg_model->params.std_threshold*bg_model->params.std_threshold;
- mog.backgroundRatio = bg_model->params.bg_threshold;
-
- mog(image, mask, learningRate);
-
- bg_model->countFrames = mog.nframes;
- if( ((cv::Mat*)bg_model->g_point)->data != mog.bgmodel.data )
- *((cv::Mat*)bg_model->g_point) = mog.bgmodel;
-
- //foreground filtering
-
- //filter small regions
- cvClearMemStorage(bg_model->storage);
-
- //cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_OPEN, 1 );
- //cvMorphologyEx( bg_model->foreground, bg_model->foreground, 0, 0, CV_MOP_CLOSE, 1 );
-
-#if 0
- CvSeq *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
- cvFindContours( bg_model->foreground, bg_model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
- for( seq = first_seq; seq; seq = seq->h_next )
- {
- CvContour* cnt = (CvContour*)seq;
- if( cnt->rect.width * cnt->rect.height < bg_model->params.minArea )
- {
- //delete small contour
- prev_seq = seq->h_prev;
- if( prev_seq )
- {
- prev_seq->h_next = seq->h_next;
- if( seq->h_next ) seq->h_next->h_prev = prev_seq;
- }
- else
- {
- first_seq = seq->h_next;
- if( seq->h_next ) seq->h_next->h_prev = NULL;
- }
- }
- else
- {
- region_count++;
- }
- }
- bg_model->foreground_regions = first_seq;
- cvZero(bg_model->foreground);
- cvDrawContours(bg_model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);
-#endif
+ cv::BackgroundSubtractorMOG* mog = (cv::BackgroundSubtractorMOG*)(bg_model->mog);
+ CV_Assert(mog != 0);
- CvMat _mask = mask;
- cvCopy(&_mask, bg_model->foreground);
+ (*mog)(image, mask, learningRate);
+ bg_model->countFrames++;
- return region_count;
+ return 0;
}
CV_IMPL CvBGStatModel*
bg_model->params = params;
- //prepare storages
- bg_model->g_point = (CvGaussBGPoint*)new cv::Mat();
+ cv::BackgroundSubtractorMOG* mog =
+ new cv::BackgroundSubtractorMOG(params.win_size,
+ params.n_gauss,
+ params.bg_threshold,
+ params.variance_init);
- bg_model->background = cvCreateImage(cvSize(first_frame->width,
- first_frame->height), IPL_DEPTH_8U, first_frame->nChannels);
- bg_model->foreground = cvCreateImage(cvSize(first_frame->width,
- first_frame->height), IPL_DEPTH_8U, 1);
+ bg_model->mog = mog;
- bg_model->storage = cvCreateMemStorage();
+ CvSize sz = cvGetSize(first_frame);
+ bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels);
+ bg_model->foreground = cvCreateImage(sz, IPL_DEPTH_8U, 1);
bg_model->countFrames = 0;
/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// Intel License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright( C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of Intel Corporation may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-//(including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort(including negligence or otherwise) arising in any way out of
-// the use of this software, even ifadvised of the possibility of such damage.
-//
-//M*/
+ //
+ // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+ //
+ // By downloading, copying, installing or using the software you agree to this license.
+ // If you do not agree to this license, do not download, install,
+ // copy or use the software.
+ //
+ //
+ // Intel License Agreement
+ // For Open Source Computer Vision Library
+ //
+ // Copyright( C) 2000, Intel Corporation, all rights reserved.
+ // Third party copyrights are property of their respective owners.
+ //
+ // Redistribution and use in source and binary forms, with or without modification,
+ // are permitted provided that the following conditions are met:
+ //
+ // * Redistribution's of source code must retain the above copyright notice,
+ // this list of conditions and the following disclaimer.
+ //
+ // * Redistribution's in binary form must reproduce the above copyright notice,
+ // this list of conditions and the following disclaimer in the documentation
+ // and/or other materials provided with the distribution.
+ //
+ // * The name of Intel Corporation may not be used to endorse or promote products
+ // derived from this software without specific prior written permission.
+ //
+ // This software is provided by the copyright holders and contributors "as is" and
+ // any express or implied warranties, including, but not limited to, the implied
+ // warranties of merchantability and fitness for a particular purpose are disclaimed.
+ // In no event shall the Intel Corporation or contributors be liable for any direct,
+ // indirect, incidental, special, exemplary, or consequential damages
+ //(including, but not limited to, procurement of substitute goods or services;
+ // loss of use, data, or profits; or business interruption) however caused
+ // and on any theory of liability, whether in contract, strict liability,
+ // or tort(including negligence or otherwise) arising in any way out of
+ // the use of this software, even ifadvised of the possibility of such damage.
+ //
+ //M*/
#include "precomp.hpp"
using namespace cv;
CvEMParams::CvEMParams() : nclusters(10), cov_mat_type(CvEM::COV_MAT_DIAGONAL),
- start_step(CvEM::START_AUTO_STEP), probs(0), weights(0), means(0), covs(0)
+start_step(CvEM::START_AUTO_STEP), probs(0), weights(0), means(0), covs(0)
{
term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON );
}
CvEMParams::CvEMParams( int _nclusters, int _cov_mat_type, int _start_step,
- CvTermCriteria _term_crit, const CvMat* _probs,
- const CvMat* _weights, const CvMat* _means, const CvMat** _covs ) :
- nclusters(_nclusters), cov_mat_type(_cov_mat_type), start_step(_start_step),
- probs(_probs), weights(_weights), means(_means), covs(_covs), term_crit(_term_crit)
+ CvTermCriteria _term_crit, const CvMat* _probs,
+ const CvMat* _weights, const CvMat* _means, const CvMat** _covs ) :
+nclusters(_nclusters), cov_mat_type(_cov_mat_type), start_step(_start_step),
+probs(_probs), weights(_weights), means(_means), covs(_covs), term_crit(_term_crit)
{}
-CvEM::CvEM() : likelihood(DBL_MAX)
+CvEM::CvEM() : logLikelihood(DBL_MAX)
{
}
CvEM::CvEM( const CvMat* samples, const CvMat* sample_idx,
- CvEMParams params, CvMat* labels ) : likelihood(DBL_MAX)
+ CvEMParams params, CvMat* labels ) : logLikelihood(DBL_MAX)
{
train(samples, sample_idx, params, labels);
}
double CvEM::calcLikelihood( const Mat &input_sample ) const
{
- double likelihood;
- emObj.predict(input_sample, noArray(), &likelihood);
- return likelihood;
+ return emObj.predict(input_sample)[0];
}
float
CvEM::predict( const CvMat* _sample, CvMat* _probs ) const
{
Mat prbs0 = cvarrToMat(_probs), prbs = prbs0, sample = cvarrToMat(_sample);
- int cls = emObj.predict(sample, _probs ? _OutputArray(prbs) : cv::noArray());
+ int cls = static_cast<int>(emObj.predict(sample, _probs ? _OutputArray(prbs) : cv::noArray())[1]);
if(_probs)
{
if( prbs.data != prbs0.data )
prbs = src.probs;
weights = src.weights;
means = src.means;
-
+
if(src.covs)
{
covsHdrs.resize(src.nclusters);
}
bool CvEM::train( const CvMat* _samples, const CvMat* _sample_idx,
- CvEMParams _params, CvMat* _labels )
+ CvEMParams _params, CvMat* _labels )
{
CV_Assert(_sample_idx == 0);
Mat samples = cvarrToMat(_samples), labels0, labels;
bool isOk = train(samples, Mat(), _params, _labels ? &labels : 0);
CV_Assert( labels0.data == labels.data );
-
+
return isOk;
}
CvEMParams _params, Mat* _labels )
{
CV_Assert(_sample_idx.empty());
- Mat prbs, weights, means, likelihoods;
+ Mat prbs, weights, means, logLikelihoods;
std::vector<Mat> covsHdrs;
init_params(_params, prbs, weights, means, covsHdrs);
-
+
emObj = EM(_params.nclusters, _params.cov_mat_type, _params.term_crit);
bool isOk = false;
if( _params.start_step == EM::START_AUTO_STEP )
- isOk = emObj.train(_samples, _labels ? _OutputArray(*_labels) : cv::noArray(),
- probs, likelihoods);
+ isOk = emObj.train(_samples,
+ logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
else if( _params.start_step == EM::START_E_STEP )
isOk = emObj.trainE(_samples, means, covsHdrs, weights,
- _labels ? _OutputArray(*_labels) : cv::noArray(),
- probs, likelihoods);
+ logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
else if( _params.start_step == EM::START_M_STEP )
isOk = emObj.trainM(_samples, prbs,
- _labels ? _OutputArray(*_labels) : cv::noArray(),
- probs, likelihoods);
+ logLikelihoods, _labels ? _OutputArray(*_labels) : cv::noArray(), probs);
else
CV_Error(CV_StsBadArg, "Bad start type of EM algorithm");
if(isOk)
{
- likelihoods = sum(likelihoods).val[0];
+ logLikelihood = sum(logLikelihoods).val[0];
set_mat_hdrs();
}
-
+
return isOk;
}
float
CvEM::predict( const Mat& _sample, Mat* _probs ) const
{
- int cls = emObj.predict(_sample, _probs ? _OutputArray(*_probs) : cv::noArray());
- return (float)cls;
+ return static_cast<float>(emObj.predict(_sample, _probs ? _OutputArray(*_probs) : cv::noArray())[1]);
}
int CvEM::getNClusters() const
{
if( _keypoints )
{
- CvSURFPoint pt = cvSURFPoint(kpt[i].pt, kpt[i].class_id, cvRound(kpt[i].size));
+ CvSURFPoint pt = cvSURFPoint(kpt[i].pt, kpt[i].class_id, cvRound(kpt[i].size), kpt[i].angle, kpt[i].response);
cvSeqPush(*_keypoints, &pt);
}
if( _descriptors )
--- /dev/null
+#include "test_precomp.hpp"
+
+using namespace cv;
+
+struct CV_EXPORTS L2Fake : public L2<float>
+{
+ enum { normType = NORM_L2 };
+};
+
+class CV_BruteForceMatcherTest : public cvtest::BaseTest
+{
+public:
+ CV_BruteForceMatcherTest() {}
+protected:
+ void run( int )
+ {
+ const int dimensions = 64;
+ const int descriptorsNumber = 5000;
+
+ Mat train = Mat( descriptorsNumber, dimensions, CV_32FC1);
+ Mat query = Mat( descriptorsNumber, dimensions, CV_32FC1);
+
+ Mat permutation( 1, descriptorsNumber, CV_32SC1 );
+ for( int i=0;i<descriptorsNumber;i++ )
+ permutation.at<int>( 0, i ) = i;
+
+ //RNG rng = RNG( cvGetTickCount() );
+ RNG rng;
+ randShuffle( permutation, 1, &rng );
+
+ float boundary = 500.f;
+ for( int row=0;row<descriptorsNumber;row++ )
+ {
+ for( int col=0;col<dimensions;col++ )
+ {
+ int bit = rng( 2 );
+ train.at<float>( permutation.at<int>( 0, row ), col ) = bit*boundary + rng.uniform( 0.f, boundary );
+ query.at<float>( row, col ) = bit*boundary + rng.uniform( 0.f, boundary );
+ }
+ }
+
+ vector<DMatch> specMatches, genericMatches;
+ BruteForceMatcher<L2<float> > specMatcher;
+ BruteForceMatcher<L2Fake > genericMatcher;
+
+ int64 time0 = cvGetTickCount();
+ specMatcher.match( query, train, specMatches );
+ int64 time1 = cvGetTickCount();
+ genericMatcher.match( query, train, genericMatches );
+ int64 time2 = cvGetTickCount();
+
+ float specMatcherTime = float(time1 - time0)/(float)cvGetTickFrequency();
+ ts->printf( cvtest::TS::LOG, "Matching by matrix multiplication time s: %f, us per pair: %f\n",
+ specMatcherTime*1e-6, specMatcherTime/( descriptorsNumber*descriptorsNumber ) );
+
+ float genericMatcherTime = float(time2 - time1)/(float)cvGetTickFrequency();
+ ts->printf( cvtest::TS::LOG, "Matching without matrix multiplication time s: %f, us per pair: %f\n",
+ genericMatcherTime*1e-6, genericMatcherTime/( descriptorsNumber*descriptorsNumber ) );
+
+ if( (int)specMatches.size() != descriptorsNumber || (int)genericMatches.size() != descriptorsNumber )
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
+ for( int i=0;i<descriptorsNumber;i++ )
+ {
+ float epsilon = 0.01f;
+ bool isEquiv = fabs( specMatches[i].distance - genericMatches[i].distance ) < epsilon &&
+ specMatches[i].queryIdx == genericMatches[i].queryIdx &&
+ specMatches[i].trainIdx == genericMatches[i].trainIdx;
+ if( !isEquiv || specMatches[i].trainIdx != permutation.at<int>( 0, i ) )
+ {
+ ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
+ break;
+ }
+ }
+
+
+ //Test mask
+ Mat mask( query.rows, train.rows, CV_8UC1 );
+ rng.fill( mask, RNG::UNIFORM, 0, 2 );
+
+
+ time0 = cvGetTickCount();
+ specMatcher.match( query, train, specMatches, mask );
+ time1 = cvGetTickCount();
+ genericMatcher.match( query, train, genericMatches, mask );
+ time2 = cvGetTickCount();
+
+ specMatcherTime = float(time1 - time0)/(float)cvGetTickFrequency();
+ ts->printf( cvtest::TS::LOG, "Matching by matrix multiplication time with mask s: %f, us per pair: %f\n",
+ specMatcherTime*1e-6, specMatcherTime/( descriptorsNumber*descriptorsNumber ) );
+
+ genericMatcherTime = float(time2 - time1)/(float)cvGetTickFrequency();
+ ts->printf( cvtest::TS::LOG, "Matching without matrix multiplication time with mask s: %f, us per pair: %f\n",
+ genericMatcherTime*1e-6, genericMatcherTime/( descriptorsNumber*descriptorsNumber ) );
+
+ if( specMatches.size() != genericMatches.size() )
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
+
+ for( size_t i=0;i<specMatches.size();i++ )
+ {
+ //float epsilon = 1e-2;
+ float epsilon = 10000000;
+ bool isEquiv = fabs( specMatches[i].distance - genericMatches[i].distance ) < epsilon &&
+ specMatches[i].queryIdx == genericMatches[i].queryIdx &&
+ specMatches[i].trainIdx == genericMatches[i].trainIdx;
+ if( !isEquiv )
+ {
+ ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
+ break;
+ }
+ }
+ }
+};
+
+TEST(Legacy_BruteForceMatcher, accuracy) { CV_BruteForceMatcherTest test; test.safe_run(); }
+
}
-TEST(Features2d_LSH, regression) { CV_LSHTest test; test.safe_run(); }
-TEST(Features2d_SpillTree, regression) { CV_SpillTreeTest_C test; test.safe_run(); }
-TEST(Features2d_KDTree_C, regression) { CV_KDTreeTest_C test; test.safe_run(); }
+TEST(Legacy_LSH, regression) { CV_LSHTest test; test.safe_run(); }
+TEST(Legacy_SpillTree, regression) { CV_SpillTreeTest_C test; test.safe_run(); }
+TEST(Legacy_KDTree_C, regression) { CV_KDTreeTest_C test; test.safe_run(); }
void calcOpticalFlowBM( const Mat& prev, const Mat& curr, Size bSize, Size shiftSize, Size maxRange, int usePrevious, Mat& flow )
{
- Size sz((curr.cols - bSize.width)/shiftSize.width, (curr.rows - bSize.height)/shiftSize.height);
+ Size sz((curr.cols - bSize.width + shiftSize.width)/shiftSize.width, (curr.rows - bSize.height + shiftSize.height)/shiftSize.height);
Mat velx(sz, CV_32F), vely(sz, CV_32F);
CvMat cvvelx = velx; CvMat cvvely = vely;
}
-TEST(Video_OpticalFlow, accuracy) { CV_OptFlowTest test; test.safe_run(); }
+TEST(Legacy_OpticalFlow, accuracy) { CV_OptFlowTest test; test.safe_run(); }
ts->set_failed_test_info( code );
}
-TEST(Imgproc_PyrSegmentation, regression) { CV_PyrSegmentationTest test; test.safe_run(); }
+TEST(Legacy_PyrSegmentation, regression) { CV_PyrSegmentationTest test; test.safe_run(); }
/* End of file. */
};
-TEST(Calib3d_StereoGC, regression) { CV_StereoGCTest test; test.safe_run(); }
+TEST(Legacy_StereoGC, regression) { CV_StereoGCTest test; test.safe_run(); }
return code;
}
-TEST(Imgproc_Subdiv, correctness) { CV_SubdivTest test; test.safe_run(); }
+TEST(Legacy_Subdiv, correctness) { CV_SubdivTest test; test.safe_run(); }
/* End of file. */
Extremely randomized trees have been introduced by Pierre Geurts, Damien Ernst and Louis Wehenkel in the article "Extremely randomized trees", 2006 [http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.65.7485&rep=rep1&type=pdf]. The algorithm of growing Extremely randomized trees is similar to :ref:`Random Trees` (Random Forest), but there are two differences:
-#. Extremely randomized trees don't apply the bagging procedure to constract the training samples for each tree. The same input training set is used to train all trees.
+#. Extremely randomized trees don't apply the bagging procedure to construct a set of the training samples for each tree. The same input training set is used to train all trees.
#. Extremely randomized trees pick a node split very extremely (both a variable index and variable splitting value are chosen randomly), whereas Random Forest finds the best split (optimal one by variable index and variable splitting value) among random subset of variables.
+
+.. _ML_Expectation Maximization:
+
+
Expectation Maximization
========================
*
Bilmes98 J. A. Bilmes. *A Gentle Tutorial of the EM Algorithm and its Application to Parameter Estimation for Gaussian Mixture and Hidden Markov Models*. Technical Report TR-97-021, International Computer Science Institute and Computer Science Division, University of California at Berkeley, April 1998.
+EM
+--
+.. ocv:class:: EM
-CvEMParams
-----------
-.. ocv:class:: CvEMParams
-
-Parameters of the EM algorithm. All parameters are public. You can initialize them by a constructor and then override some of them directly if you want.
-
+The class implements the EM algorithm as described in the beginning of this section. It is inherited from :ocv:class:`Algorithm`.
-CvEMParams::CvEMParams
-----------------------
-The constructors
+EM::EM
+------
+The constructor of the class
-.. ocv:function:: CvEMParams::CvEMParams()
+.. ocv:function:: EM::EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL, const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, FLT_EPSILON) )
-.. ocv:function:: CvEMParams::CvEMParams( int nclusters, int cov_mat_type=CvEM::COV_MAT_DIAGONAL, int start_step=CvEM::START_AUTO_STEP, CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 )
- :param nclusters: The number of mixture components in the gaussian mixture model. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet.
+ :param nclusters: The number of mixture components in the Gaussian mixture model. Default value of the parameter is ``EM::DEFAULT_NCLUSTERS=5``. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet.
- :param cov_mat_type: Constraint on covariance matrices which defines type of matrices. Possible values are:
-
- * **CvEM::COV_MAT_SPHERICAL** A scaled identity matrix :math:`\mu_k * I`. There is the only parameter :math:`\mu_k` to be estimated for earch matrix. The option may be used in special cases, when the constraint is relevant, or as a first step in the optimization (for example in case when the data is preprocessed with PCA). The results of such preliminary estimation may be passed again to the optimization procedure, this time with ``cov_mat_type=CvEM::COV_MAT_DIAGONAL``.
-
- * **CvEM::COV_MAT_DIAGONAL** A diagonal matrix with positive diagonal elements. The number of free parameters is ``d`` for each matrix. This is most commonly used option yielding good estimation results.
-
- * **CvEM::COV_MAT_GENERIC** A symmetric positively defined matrix. The number of free parameters in each matrix is about :math:`d^2/2`. It is not recommended to use this option, unless there is pretty accurate initial estimation of the parameters and/or a huge number of training samples.
-
- :param start_step: The start step of the EM algorithm:
-
- * **CvEM::START_E_STEP** Start with Expectation step. You need to provide means :math:`a_k` of mixture components to use this option. Optionally you can pass weights :math:`\pi_k` and covariance matrices :math:`S_k` of mixture components.
- * **CvEM::START_M_STEP** Start with Maximization step. You need to provide initial probabilities :math:`p_{i,k}` to use this option.
- * **CvEM::START_AUTO_STEP** Start with Expectation step. You need not provide any parameters because they will be estimated by the k-means algorithm.
+ :param covMatType: Constraint on covariance matrices which defines type of matrices. Possible values are:
- :param term_crit: The termination criteria of the EM algorithm. The EM algorithm can be terminated by the number of iterations ``term_crit.max_iter`` (number of M-steps) or when relative change of likelihood logarithm is less than ``term_crit.epsilon``.
+ * **EM::COV_MAT_SPHERICAL** A scaled identity matrix :math:`\mu_k * I`. There is the only parameter :math:`\mu_k` to be estimated for each matrix. The option may be used in special cases, when the constraint is relevant, or as a first step in the optimization (for example in case when the data is preprocessed with PCA). The results of such preliminary estimation may be passed again to the optimization procedure, this time with ``covMatType=EM::COV_MAT_DIAGONAL``.
- :param probs: Initial probabilities :math:`p_{i,k}` of sample :math:`i` to belong to mixture component :math:`k`. It is a floating-point matrix of :math:`nsamples \times nclusters` size. It is used and must be not NULL only when ``start_step=CvEM::START_M_STEP``.
+ * **EM::COV_MAT_DIAGONAL** A diagonal matrix with positive diagonal elements. The number of free parameters is ``d`` for each matrix. This is most commonly used option yielding good estimation results.
- :param weights: Initial weights :math:`\pi_k` of mixture components. It is a floating-point vector with :math:`nclusters` elements. It is used (if not NULL) only when ``start_step=CvEM::START_E_STEP``.
-
- :param means: Initial means :math:`a_k` of mixture components. It is a floating-point matrix of :math:`nclusters \times dims` size. It is used used and must be not NULL only when ``start_step=CvEM::START_E_STEP``.
-
- :param covs: Initial covariance matrices :math:`S_k` of mixture components. Each of covariance matrices is a valid square floating-point matrix of :math:`dims \times dims` size. It is used (if not NULL) only when ``start_step=CvEM::START_E_STEP``.
-
-The default constructor represents a rough rule-of-the-thumb:
-
-::
-
- CvEMParams() : nclusters(10), cov_mat_type(1/*CvEM::COV_MAT_DIAGONAL*/),
- start_step(0/*CvEM::START_AUTO_STEP*/), probs(0), weights(0), means(0), covs(0)
- {
- term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON );
- }
-
-
-With another constructor it is possible to override a variety of parameters from a single number of mixtures (the only essential problem-dependent parameter) to initial values for the mixture parameters.
+ * **EM::COV_MAT_GENERIC** A symmetric positively defined matrix. The number of free parameters in each matrix is about :math:`d^2/2`. It is not recommended to use this option, unless there is pretty accurate initial estimation of the parameters and/or a huge number of training samples.
+
+ :param termCrit: The termination criteria of the EM algorithm. The EM algorithm can be terminated by the number of iterations ``termCrit.maxCount`` (number of M-steps) or when relative change of likelihood logarithm is less than ``termCrit.epsilon``. Default maximum number of iterations is ``EM::DEFAULT_MAX_ITERS=100``.
+EM::train
+---------
+Estimates the Gaussian mixture parameters from a samples set.
-CvEM
-----
-.. ocv:class:: CvEM
+.. ocv:function:: bool EM::train(InputArray samples, OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
- The class implements the EM algorithm as described in the beginning of this section.
+.. ocv:function:: bool EM::trainE(InputArray samples, InputArray means0, InputArray covs0=noArray(), InputArray weights0=noArray(), OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
+
+.. ocv:function:: bool EM::trainM(InputArray samples, InputArray probs0, OutputArray logLikelihoods=noArray(), OutputArray labels=noArray(), OutputArray probs=noArray())
+ :param samples: Samples from which the Gaussian mixture model will be estimated. It should be a one-channel matrix, each row of which is a sample. If the matrix does not have ``CV_64F`` type it will be converted to the inner matrix of such type for the further computing.
+
+ :param means0: Initial means :math:`a_k` of mixture components. It is a one-channel matrix of :math:`nclusters \times dims` size. If the matrix does not have ``CV_64F`` type it will be converted to the inner matrix of such type for the further computing.
-CvEM::train
------------
-Estimates the Gaussian mixture parameters from a sample set.
+ :param covs0: The vector of initial covariance matrices :math:`S_k` of mixture components. Each of covariance matrices is a one-channel matrix of :math:`dims \times dims` size. If the matrices do not have ``CV_64F`` type they will be converted to the inner matrices of such type for the further computing.
+
+ :param weights0: Initial weights :math:`\pi_k` of mixture components. It should be a one-channel floating-point matrix with :math:`1 \times nclusters` or :math:`nclusters \times 1` size.
+
+ :param probs0: Initial probabilities :math:`p_{i,k}` of sample :math:`i` to belong to mixture component :math:`k`. It is a one-channel floating-point matrix of :math:`nsamples \times nclusters` size.
-.. ocv:function:: void CvEM::train( const Mat& samples, const Mat& sample_idx=Mat(), CvEMParams params=CvEMParams(), Mat* labels=0 )
+ :param logLikelihoods: The optional output matrix that contains a likelihood logarithm value for each sample. It has :math:`nsamples \times 1` size and ``CV_64FC1`` type.
-.. ocv:function:: bool CvEM::train( const CvMat* samples, const CvMat* sampleIdx=0, CvEMParams params=CvEMParams(), CvMat* labels=0 )
+ :param labels: The optional output "class label" for each sample: :math:`\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N` (indices of the most probable mixture component for each sample). It has :math:`nsamples \times 1` size and ``CV_32SC1`` type.
+
+ :param probs: The optional output matrix that contains posterior probabilities of each Gaussian mixture component given the each sample. It has :math:`nsamples \times nclusters` size and ``CV_64FC1`` type.
-.. ocv:pyfunction:: cv2.EM.train(samples[, sampleIdx[, params]]) -> retval, labels
+Three versions of training method differ in the initialization of Gaussian mixture model parameters and start step:
- :param samples: Samples from which the Gaussian mixture model will be estimated.
+* **train** - Starts with Expectation step. Initial values of the model parameters will be estimated by the k-means algorithm.
- :param sample_idx: Mask of samples to use. All samples are used by default.
+* **trainE** - Starts with Expectation step. You need to provide initial means :math:`a_k` of mixture components. Optionally you can pass initial weights :math:`\pi_k` and covariance matrices :math:`S_k` of mixture components.
- :param params: Parameters of the EM algorithm.
+* **trainM** - Starts with Maximization step. You need to provide initial probabilities :math:`p_{i,k}` to use this option.
- :param labels: The optional output "class label" for each sample: :math:`\texttt{labels}_i=\texttt{arg max}_k(p_{i,k}), i=1..N` (indices of the most probable mixture component for each sample).
+The methods return ``true`` if the Gaussian mixture model was trained successfully, otherwise it returns ``false``.
Unlike many of the ML models, EM is an unsupervised learning algorithm and it does not take responses (class labels or function values) as input. Instead, it computes the
*Maximum Likelihood Estimate* of the Gaussian mixture parameters from an input sample set, stores all the parameters inside the structure:
The trained model can be used further for prediction, just like any other classifier. The trained model is similar to the
:ocv:class:`CvNormalBayesClassifier`.
-For an example of clustering random samples of the multi-Gaussian distribution using EM, see ``em.cpp`` sample in the OpenCV distribution.
-
-
-CvEM::predict
--------------
-Returns a mixture component index of a sample.
-
-.. ocv:function:: float CvEM::predict( const Mat& sample, Mat* probs=0 ) const
-
-.. ocv:function:: float CvEM::predict( const CvMat* sample, CvMat* probs ) const
-
-.. ocv:pyfunction:: cv2.EM.predict(sample) -> retval, probs
-
- :param sample: A sample for classification.
-
- :param probs: If it is not null then the method will write posterior probabilities of each component given the sample data to this parameter.
-
-
-CvEM::getNClusters
-------------------
-Returns the number of mixture components :math:`M` in the gaussian mixture model.
-
-.. ocv:function:: int CvEM::getNClusters() const
-
-.. ocv:function:: int CvEM::get_nclusters() const
-
-.. ocv:pyfunction:: cv2.EM.getNClusters() -> retval
-
-
-CvEM::getMeans
-------------------
-Returns mixture means :math:`a_k`.
-
-.. ocv:function:: Mat CvEM::getMeans() const
-
-.. ocv:function:: const CvMat* CvEM::get_means() const
-
-.. ocv:pyfunction:: cv2.EM.getMeans() -> means
-
-
-CvEM::getCovs
--------------
-Returns mixture covariance matrices :math:`S_k`.
-
-.. ocv:function:: void CvEM::getCovs(std::vector<cv::Mat>& covs) const
-
-.. ocv:function:: const CvMat** CvEM::get_covs() const
-
-.. ocv:pyfunction:: cv2.EM.getCovs([covs]) -> covs
-
-
-CvEM::getWeights
-----------------
-Returns mixture weights :math:`\pi_k`.
-
-.. ocv:function:: Mat CvEM::getWeights() const
-
-.. ocv:function:: const CvMat* CvEM::get_weights() const
-
-.. ocv:pyfunction:: cv2.EM.getWeights() -> weights
-
-
-CvEM::getProbs
---------------
-Returns vectors of probabilities for each training sample.
+EM::predict
+-----------
+Returns a likelihood logarithm value and an index of the most probable mixture component for the given sample.
-.. ocv:function:: Mat CvEM::getProbs() const
+.. ocv:function:: Vec2d predict(InputArray sample, OutputArray probs=noArray()) const
+
+ :param sample: A sample for classification. It should be a one-channel matrix of :math:`1 \times dims` or :math:`dims \times 1` size.
-.. ocv:function:: const CvMat* CvEM::get_probs() const
+ :param probs: Optional output matrix that contains posterior probabilities of each component given the sample. It has :math:`1 \times nclusters` size and ``CV_64FC1`` type.
-.. ocv:pyfunction:: cv2.EM.getProbs() -> probs
+The method returns a two-element ``double`` vector. Zero element is a likelihood logarithm value for the sample. First element is an index of the most probable mixture component for the given sample.
-For each training sample :math:`i` (that have been passed to the constructor or to :ocv:func:`CvEM::train`) returns probabilities :math:`p_{i,k}` to belong to a mixture component :math:`k`.
+CvEM::isTrained
+---------------
+Returns ``true`` if the Gaussian mixture model was trained.
+.. ocv:function:: bool EM::isTrained() const
-CvEM::getLikelihood
+EM::read, EM::write
-------------------
-Returns logarithm of likelihood.
-
-.. ocv:function:: double CvEM::getLikelihood() const
-
-.. ocv:function:: double CvEM::get_log_likelihood() const
-
-.. ocv:pyfunction:: cv2.EM.getLikelihood() -> likelihood
-
-
-CvEM::getLikelihoodDelta
-------------------------
-Returns difference between logarithm of likelihood on the last iteration and logarithm of likelihood on the previous iteration.
-
-.. ocv:function:: double CvEM::getLikelihoodDelta() const
-
-.. ocv:function:: double CvEM::get_log_likelihood_delta() const
-
-.. ocv:pyfunction:: cv2.EM.getLikelihoodDelta() -> likelihood delta
-
-CvEM::write_params
-------------------
-Writes used parameters of the EM algorithm to a file storage.
-
-.. ocv:function:: void CvEM::write_params( CvFileStorage* fs ) const
-
- :param fs: A file storage where parameters will be written.
-
-
-CvEM::read_params
------------------
-Reads parameters of the EM algorithm.
-
-.. ocv:function:: void CvEM::read_params( CvFileStorage* fs, CvFileNode* node )
-
- :param fs: A file storage with parameters of the EM algorithm.
-
- :param node: The parent map. If it is NULL, the function searches a node with parameters in all the top-level nodes (streams), starting with the first one.
-
-The function reads EM parameters from the specified file storage node. For example of clustering random samples of multi-Gaussian distribution using EM see em.cpp sample in OpenCV distribution.
-
+See :ocv:func:`Algorithm::read` and :ocv:func:`Algorithm::write`.
+EM::get, EM::set
+----------------
+See :ocv:func:`Algorithm::get` and :ocv:func:`Algorithm::set`. The following parameters are available:
+
+* ``"nclusters"``
+* ``"covMatType"``
+* ``"maxIters"``
+* ``"epsilon"``
+* ``"weights"`` *(read-only)*
+* ``"means"`` *(read-only)*
+* ``"covs"`` *(read-only)*
+..
void mix_train_and_test_idx();
const CvMat* get_var_idx();
- void chahge_var_idx( int vi, bool state );
+ void change_var_idx( int vi, bool state );
const CvMat* get_var_types();
void set_var_types( const char* str );
It returns ``0`` if the used subset is not set. It throws an exception if the data has not been loaded from the file yet. Returned matrix is a single-row matrix of the type ``CV_32SC1``. Its column count is equal to the size of the used variable subset.
-CvMLData::chahge_var_idx
+CvMLData::change_var_idx
------------------------
Enables or disables particular variable in the loaded data
-.. ocv:function:: void CvMLData::chahge_var_idx( int vi, bool state )
+.. ocv:function:: void CvMLData::change_var_idx( int vi, bool state )
By default, after reading the data set all variables in the ``values`` matrix (see :ocv:func:`CvMLData::get_values`) are used. But you may want to use only a subset of variables and include/exclude (depending on ``state`` value) a variable with the ``vi`` index from the used subset. If the data has not been loaded from the file yet, an exception is thrown.
enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL};
// Default parameters
- enum {DEFAULT_NCLUSTERS=10, DEFAULT_MAX_ITERS=100};
+ enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};
// The initial step
enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
- const TermCriteria& termcrit=TermCriteria(TermCriteria::COUNT+
+ const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+
TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, FLT_EPSILON));
CV_WRAP virtual void clear();
CV_WRAP virtual bool train(InputArray samples,
+ OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
- OutputArray probs=noArray(),
- OutputArray logLikelihoods=noArray());
+ OutputArray probs=noArray());
CV_WRAP virtual bool trainE(InputArray samples,
InputArray means0,
InputArray covs0=noArray(),
InputArray weights0=noArray(),
+ OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
- OutputArray probs=noArray(),
- OutputArray logLikelihoods=noArray());
+ OutputArray probs=noArray());
CV_WRAP virtual bool trainM(InputArray samples,
InputArray probs0,
+ OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
- OutputArray probs=noArray(),
- OutputArray logLikelihoods=noArray());
+ OutputArray probs=noArray());
- CV_WRAP int predict(InputArray sample,
- OutputArray probs=noArray(),
- CV_OUT double* logLikelihood=0) const;
+ CV_WRAP Vec2d predict(InputArray sample,
+ OutputArray probs=noArray()) const;
CV_WRAP bool isTrained() const;
const Mat* weights0);
bool doTrain(int startStep,
+ OutputArray logLikelihoods,
OutputArray labels,
- OutputArray probs,
- OutputArray logLikelihoods);
+ OutputArray probs);
virtual void eStep();
virtual void mStep();
void decomposeCovs();
void computeLogWeightDivDet();
- void computeProbabilities(const Mat& sample, int& label, Mat* probs, double* logLikelihood) const;
+ Vec2d computeProbabilities(const Mat& sample, Mat* probs) const;
// all inner matrices have type CV_64FC1
CV_PROP_RW int nclusters;
Mat trainProbs;
Mat trainLogLikelihoods;
Mat trainLabels;
- Mat trainCounts;
CV_PROP Mat weights;
CV_PROP Mat means;
// returns:
// 0 - OK
- // 1 - file can not be opened or is not correct
+ // -1 - file can not be opened or is not correct
int read_csv( const char* filename );
const CvMat* get_values() const;
void mix_train_and_test_idx();
const CvMat* get_var_idx();
- void chahge_var_idx( int vi, bool state ); // state == true to set vi-variable as predictor
+ void chahge_var_idx( int vi, bool state ); // misspelled (saved for back compitability),
+ // use change_var_idx
+ void change_var_idx( int vi, bool state ); // state == true to set vi-variable as predictor
const CvMat* get_var_types();
int get_var_type( int var_idx ) const;
int type;
token = strtok(buf, str_delimiter);
if (!token)
- {
- fclose(file);
- return -1;
- }
+ break;
for (int i = 0; i < cols_count-1; i++)
{
str_to_flt_elem( token, el_ptr[i], type);
str_to_flt_elem( token, el_ptr[cols_count-1], type);
var_types_ptr[cols_count-1] |= type;
cvSeqPush( seq, el_ptr );
- if( !fgets_chomp( buf, M, file ) || !strchr( buf, delimiter ) )
+ if( !fgets_chomp( buf, M, file ) )
break;
}
fclose(file);
void CvMLData::chahge_var_idx( int vi, bool state )
{
- CV_FUNCNAME( "CvMLData::get_responses_ptr" );
+ change_var_idx( vi, state );
+}
+
+void CvMLData::change_var_idx( int vi, bool state )
+{
+ CV_FUNCNAME( "CvMLData::change_var_idx" );
__BEGIN__;
int var_count = 0;
namespace cv
{
-const double minEigenValue = DBL_MIN;
+const double minEigenValue = DBL_EPSILON;
///////////////////////////////////////////////////////////////////////////////////////////////////////
-EM::EM(int _nclusters, int _covMatType, const TermCriteria& _criteria)
+EM::EM(int _nclusters, int _covMatType, const TermCriteria& _termCrit)
{
nclusters = _nclusters;
covMatType = _covMatType;
- maxIters = (_criteria.type & TermCriteria::MAX_ITER) ? _criteria.maxCount : DEFAULT_MAX_ITERS;
- epsilon = (_criteria.type & TermCriteria::EPS) ? _criteria.epsilon : 0;
+ maxIters = (_termCrit.type & TermCriteria::MAX_ITER) ? _termCrit.maxCount : DEFAULT_MAX_ITERS;
+ epsilon = (_termCrit.type & TermCriteria::EPS) ? _termCrit.epsilon : 0;
}
EM::~EM()
trainProbs.release();
trainLogLikelihoods.release();
trainLabels.release();
- trainCounts.release();
weights.release();
means.release();
bool EM::train(InputArray samples,
+ OutputArray logLikelihoods,
OutputArray labels,
- OutputArray probs,
- OutputArray logLikelihoods)
+ OutputArray probs)
{
Mat samplesMat = samples.getMat();
setTrainData(START_AUTO_STEP, samplesMat, 0, 0, 0, 0);
- return doTrain(START_AUTO_STEP, labels, probs, logLikelihoods);
+ return doTrain(START_AUTO_STEP, logLikelihoods, labels, probs);
}
bool EM::trainE(InputArray samples,
InputArray _means0,
InputArray _covs0,
InputArray _weights0,
+ OutputArray logLikelihoods,
OutputArray labels,
- OutputArray probs,
- OutputArray logLikelihoods)
+ OutputArray probs)
{
Mat samplesMat = samples.getMat();
vector<Mat> covs0;
setTrainData(START_E_STEP, samplesMat, 0, !_means0.empty() ? &means0 : 0,
!_covs0.empty() ? &covs0 : 0, _weights0.empty() ? &weights0 : 0);
- return doTrain(START_E_STEP, labels, probs, logLikelihoods);
+ return doTrain(START_E_STEP, logLikelihoods, labels, probs);
}
bool EM::trainM(InputArray samples,
InputArray _probs0,
+ OutputArray logLikelihoods,
OutputArray labels,
- OutputArray probs,
- OutputArray logLikelihoods)
+ OutputArray probs)
{
Mat samplesMat = samples.getMat();
Mat probs0 = _probs0.getMat();
setTrainData(START_M_STEP, samplesMat, !_probs0.empty() ? &probs0 : 0, 0, 0, 0);
- return doTrain(START_M_STEP, labels, probs, logLikelihoods);
+ return doTrain(START_M_STEP, logLikelihoods, labels, probs);
}
-int EM::predict(InputArray _sample, OutputArray _probs, double* logLikelihood) const
+Vec2d EM::predict(InputArray _sample, OutputArray _probs) const
{
Mat sample = _sample.getMat();
CV_Assert(isTrained());
sample.convertTo(tmp, CV_64FC1);
sample = tmp;
}
+ sample.reshape(1, 1);
- int label;
Mat probs;
if( _probs.needed() )
{
_probs.create(1, nclusters, CV_64FC1);
probs = _probs.getMat();
}
- computeProbabilities(sample, label, !probs.empty() ? &probs : 0, logLikelihood);
- return label;
+ return computeProbabilities(sample, !probs.empty() ? &probs : 0);
}
bool EM::isTrained() const
}
}
-bool EM::doTrain(int startStep, OutputArray labels, OutputArray probs, OutputArray logLikelihoods)
+bool EM::doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs)
{
int dim = trainSamples.cols;
// Precompute the empty initial train data in the cases of EM::START_E_STEP and START_AUTO_STEP
trainProbs.release();
trainLabels.release();
trainLogLikelihoods.release();
- trainCounts.release();
return true;
}
-void EM::computeProbabilities(const Mat& sample, int& label, Mat* probs, double* logLikelihood) const
+Vec2d EM::computeProbabilities(const Mat& sample, Mat* probs) const
{
// L_ik = log(weight_k) - 0.5 * log(|det(cov_k)|) - 0.5 *(x_i - mean_k)' cov_k^(-1) (x_i - mean_k)]
// q = arg(max_k(L_ik))
int dim = sample.cols;
Mat L(1, nclusters, CV_64FC1);
- label = 0;
+ int label = 0;
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
const Mat centeredSample = sample - means.row(clusterIndex);
Lval += w * val * val;
}
CV_DbgAssert(!logWeightDivDet.empty());
- Lval = logWeightDivDet.at<double>(clusterIndex) - 0.5 * Lval;
- L.at<double>(clusterIndex) = Lval;
+ L.at<double>(clusterIndex) = logWeightDivDet.at<double>(clusterIndex) - 0.5 * Lval;
- if(Lval > L.at<double>(label))
+ if(L.at<double>(clusterIndex) > L.at<double>(label))
label = clusterIndex;
}
- if(!probs && !logLikelihood)
- return;
-
double maxLVal = L.at<double>(label);
Mat expL_Lmax = L; // exp(L_ij - L_iq)
for(int i = 0; i < L.cols; i++)
expL_Lmax.copyTo(*probs);
}
- if(logLikelihood)
- *logLikelihood = std::log(expDiffSum) + maxLVal - 0.5 * dim * CV_LOG2PI;
+ Vec2d res;
+ res[0] = std::log(expDiffSum) + maxLVal - 0.5 * dim * CV_LOG2PI;
+ res[1] = label;
+
+ return res;
}
void EM::eStep()
for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
{
Mat sampleProbs = trainProbs.row(sampleIndex);
- computeProbabilities(trainSamples.row(sampleIndex), trainLabels.at<int>(sampleIndex),
- &sampleProbs, &trainLogLikelihoods.at<double>(sampleIndex));
+ Vec2d res = computeProbabilities(trainSamples.row(sampleIndex), &sampleProbs);
+ trainLogLikelihoods.at<double>(sampleIndex) = res[0];
+ trainLabels.at<int>(sampleIndex) = static_cast<int>(res[1]);
}
}
void EM::mStep()
{
- trainCounts.create(1, nclusters, CV_32SC1);
- trainCounts = Scalar(0);
+ // Update means_k, covs_k and weights_k from probs_ik
+ int dim = trainSamples.cols;
- for(int sampleIndex = 0; sampleIndex < trainLabels.rows; sampleIndex++)
- trainCounts.at<int>(trainLabels.at<int>(sampleIndex))++;
+ // Update weights
+ // not normalized first
+ reduce(trainProbs, weights, 0, CV_REDUCE_SUM);
- if(countNonZero(trainCounts) != (int)trainCounts.total())
- {
- clusterTrainSamples();
- }
- else
- {
- // Update means_k, covs_k and weights_k from probs_ik
- int dim = trainSamples.cols;
+ // Update means
+ means.create(nclusters, dim, CV_64FC1);
+ means = Scalar(0);
- // Update weights
- // not normalized first
- reduce(trainProbs, weights, 0, CV_REDUCE_SUM);
+ const double minPosWeight = trainSamples.rows * DBL_EPSILON;
+ double minWeight = DBL_MAX;
+ int minWeightClusterIndex = -1;
+ for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
+ {
+ if(weights.at<double>(clusterIndex) <= minPosWeight)
+ continue;
- // Update means
- means.create(nclusters, dim, CV_64FC1);
- means = Scalar(0);
- for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
+ if(weights.at<double>(clusterIndex) < minWeight)
{
- Mat clusterMean = means.row(clusterIndex);
- for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
- clusterMean += trainProbs.at<double>(sampleIndex, clusterIndex) * trainSamples.row(sampleIndex);
- clusterMean /= weights.at<double>(clusterIndex);
+ minWeight = weights.at<double>(clusterIndex);
+ minWeightClusterIndex = clusterIndex;
}
- // Update covsEigenValues and invCovsEigenValues
- covs.resize(nclusters);
- covsEigenValues.resize(nclusters);
+ Mat clusterMean = means.row(clusterIndex);
+ for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
+ clusterMean += trainProbs.at<double>(sampleIndex, clusterIndex) * trainSamples.row(sampleIndex);
+ clusterMean /= weights.at<double>(clusterIndex);
+ }
+
+ // Update covsEigenValues and invCovsEigenValues
+ covs.resize(nclusters);
+ covsEigenValues.resize(nclusters);
+ if(covMatType == EM::COV_MAT_GENERIC)
+ covsRotateMats.resize(nclusters);
+ invCovsEigenValues.resize(nclusters);
+ for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
+ {
+ if(weights.at<double>(clusterIndex) <= minPosWeight)
+ continue;
+
+ if(covMatType != EM::COV_MAT_SPHERICAL)
+ covsEigenValues[clusterIndex].create(1, dim, CV_64FC1);
+ else
+ covsEigenValues[clusterIndex].create(1, 1, CV_64FC1);
+
if(covMatType == EM::COV_MAT_GENERIC)
- covsRotateMats.resize(nclusters);
- invCovsEigenValues.resize(nclusters);
- for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
- {
- if(covMatType != EM::COV_MAT_SPHERICAL)
- covsEigenValues[clusterIndex].create(1, dim, CV_64FC1);
- else
- covsEigenValues[clusterIndex].create(1, 1, CV_64FC1);
+ covs[clusterIndex].create(dim, dim, CV_64FC1);
- if(covMatType == EM::COV_MAT_GENERIC)
- covs[clusterIndex].create(dim, dim, CV_64FC1);
+ Mat clusterCov = covMatType != EM::COV_MAT_GENERIC ?
+ covsEigenValues[clusterIndex] : covs[clusterIndex];
- Mat clusterCov = covMatType != EM::COV_MAT_GENERIC ?
- covsEigenValues[clusterIndex] : covs[clusterIndex];
+ clusterCov = Scalar(0);
- clusterCov = Scalar(0);
+ Mat centeredSample;
+ for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
+ {
+ centeredSample = trainSamples.row(sampleIndex) - means.row(clusterIndex);
- Mat centeredSample;
- for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
+ if(covMatType == EM::COV_MAT_GENERIC)
+ clusterCov += trainProbs.at<double>(sampleIndex, clusterIndex) * centeredSample.t() * centeredSample;
+ else
{
- centeredSample = trainSamples.row(sampleIndex) - means.row(clusterIndex);
-
- if(covMatType == EM::COV_MAT_GENERIC)
- clusterCov += trainProbs.at<double>(sampleIndex, clusterIndex) * centeredSample.t() * centeredSample;
- else
+ double p = trainProbs.at<double>(sampleIndex, clusterIndex);
+ for(int di = 0; di < dim; di++ )
{
- double p = trainProbs.at<double>(sampleIndex, clusterIndex);
- for(int di = 0; di < dim; di++ )
- {
- double val = centeredSample.at<double>(di);
- clusterCov.at<double>(covMatType != EM::COV_MAT_SPHERICAL ? di : 0) += p*val*val;
- }
+ double val = centeredSample.at<double>(di);
+ clusterCov.at<double>(covMatType != EM::COV_MAT_SPHERICAL ? di : 0) += p*val*val;
}
}
+ }
- if(covMatType == EM::COV_MAT_SPHERICAL)
- clusterCov /= dim;
+ if(covMatType == EM::COV_MAT_SPHERICAL)
+ clusterCov /= dim;
- clusterCov /= weights.at<double>(clusterIndex);
+ clusterCov /= weights.at<double>(clusterIndex);
- // Update covsRotateMats for EM::COV_MAT_GENERIC only
- if(covMatType == EM::COV_MAT_GENERIC)
- {
- SVD svd(covs[clusterIndex], SVD::MODIFY_A + SVD::FULL_UV);
- covsEigenValues[clusterIndex] = svd.w;
- covsRotateMats[clusterIndex] = svd.u;
- }
+ // Update covsRotateMats for EM::COV_MAT_GENERIC only
+ if(covMatType == EM::COV_MAT_GENERIC)
+ {
+ SVD svd(covs[clusterIndex], SVD::MODIFY_A + SVD::FULL_UV);
+ covsEigenValues[clusterIndex] = svd.w;
+ covsRotateMats[clusterIndex] = svd.u;
+ }
- max(covsEigenValues[clusterIndex], minEigenValue, covsEigenValues[clusterIndex]);
+ max(covsEigenValues[clusterIndex], minEigenValue, covsEigenValues[clusterIndex]);
- // update invCovsEigenValues
- invCovsEigenValues[clusterIndex] = 1./covsEigenValues[clusterIndex];
- }
+ // update invCovsEigenValues
+ invCovsEigenValues[clusterIndex] = 1./covsEigenValues[clusterIndex];
+ }
- // Normalize weights
- weights /= trainSamples.rows;
+ for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
+ {
+ if(weights.at<double>(clusterIndex) <= minPosWeight)
+ {
+ Mat clusterMean = means.row(clusterIndex);
+ means.row(minWeightClusterIndex).copyTo(clusterMean);
+ covs[minWeightClusterIndex].copyTo(covs[clusterIndex]);
+ covsEigenValues[minWeightClusterIndex].copyTo(covsEigenValues[clusterIndex]);
+ if(covMatType == EM::COV_MAT_GENERIC)
+ covsRotateMats[minWeightClusterIndex].copyTo(covsRotateMats[clusterIndex]);
+ invCovsEigenValues[minWeightClusterIndex].copyTo(invCovsEigenValues[clusterIndex]);
+ }
}
+
+ // Normalize weights
+ weights /= trainSamples.rows;
}
void EM::read(const FileNode& fn)
computeLogWeightDivDet();
}
-static Algorithm* createEM()
-{
- return new EM;
-}
-static AlgorithmInfo em_info("StatModel.EM", createEM);
-
-AlgorithmInfo* EM::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- EM obj;
- em_info.addParam(obj, "nclusters", obj.nclusters);
- em_info.addParam(obj, "covMatType", obj.covMatType);
-
- em_info.addParam(obj, "weights", obj.weights);
- em_info.addParam(obj, "means", obj.means);
- em_info.addParam(obj, "covs", obj.covs);
-
- initialized = true;
- }
- return &em_info;
-}
} // namespace cv
/* End of file. */
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "precomp.hpp"
+
+namespace cv
+{
+
+static Algorithm* createEM()
+{
+ return new EM;
+}
+static AlgorithmInfo em_info("StatModel.EM", createEM);
+
+AlgorithmInfo* EM::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ EM obj;
+ em_info.addParam(obj, "nclusters", obj.nclusters);
+ em_info.addParam(obj, "covMatType", obj.covMatType);
+ em_info.addParam(obj, "maxIters", obj.maxIters);
+ em_info.addParam(obj, "epsilon", obj.epsilon);
+
+ em_info.addParam(obj, "weights", obj.weights, true);
+ em_info.addParam(obj, "means", obj.means, true);
+ em_info.addParam(obj, "covs", obj.covs, true);
+
+ initialized = true;
+ }
+ return &em_info;
+}
+
+bool initModule_ml(void)
+{
+ Ptr<Algorithm> em = createEM();
+ return em->info() != 0;
+}
+
+}
}
static
-bool getLabelsMap( const Mat& labels, const vector<int>& sizes, vector<int>& labelsMap )
+bool getLabelsMap( const Mat& labels, const vector<int>& sizes, vector<int>& labelsMap, bool checkClusterUniq=true )
{
size_t total = 0, nclusters = sizes.size();
for(size_t i = 0; i < sizes.size(); i++)
startIndex += sizes[clusterIndex];
int cls = maxIdx( count );
- CV_Assert( !buzy[cls] );
+ CV_Assert( !checkClusterUniq || !buzy[cls] );
labelsMap[clusterIndex] = cls;
buzy[cls] = true;
}
- for(size_t i = 0; i < buzy.size(); i++)
- if(!buzy[i])
- return false;
+
+ if(checkClusterUniq)
+ {
+ for(size_t i = 0; i < buzy.size(); i++)
+ if(!buzy[i])
+ return false;
+ }
return true;
}
static
-bool calcErr( const Mat& labels, const Mat& origLabels, const vector<int>& sizes, float& err, bool labelsEquivalent = true )
+bool calcErr( const Mat& labels, const Mat& origLabels, const vector<int>& sizes, float& err, bool labelsEquivalent = true, bool checkClusterUniq=true )
{
err = 0;
CV_Assert( !labels.empty() && !origLabels.empty() );
bool isFlt = labels.type() == CV_32FC1;
if( !labelsEquivalent )
{
- if( !getLabelsMap( labels, sizes, labelsMap ) )
+ if( !getLabelsMap( labels, sizes, labelsMap, checkClusterUniq ) )
return false;
for( int i = 0; i < labels.rows; i++ )
cv::EM em(params.nclusters, params.covMatType, params.termCrit);
if( params.startStep == EM::START_AUTO_STEP )
- em.train( trainData, labels );
+ em.train( trainData, noArray(), labels );
else if( params.startStep == EM::START_E_STEP )
- em.trainE( trainData, *params.means, *params.covs, *params.weights, labels );
+ em.trainE( trainData, *params.means, *params.covs, *params.weights, noArray(), labels );
else if( params.startStep == EM::START_M_STEP )
- em.trainM( trainData, *params.probs, labels );
+ em.trainM( trainData, *params.probs, noArray(), labels );
// check train error
- if( !calcErr( labels, trainLabels, sizes, err , false ) )
+ if( !calcErr( labels, trainLabels, sizes, err , false, false ) )
{
ts->printf( cvtest::TS::LOG, "Case index %i : Bad output labels.\n", caseIndex );
code = cvtest::TS::FAIL_INVALID_OUTPUT;
for( int i = 0; i < testData.rows; i++ )
{
Mat sample = testData.row(i);
- double likelihood = 0;
Mat probs;
- labels.at<int>(i,0) = (int)em.predict( sample, probs, &likelihood );
+ labels.at<int>(i) = static_cast<int>(em.predict( sample, probs )[1]);
}
- if( !calcErr( labels, testLabels, sizes, err, false ) )
+ if( !calcErr( labels, testLabels, sizes, err, false, false ) )
{
ts->printf( cvtest::TS::LOG, "Case index %i : Bad output labels.\n", caseIndex );
code = cvtest::TS::FAIL_INVALID_OUTPUT;
Mat firstResult(samples.rows, 1, CV_32SC1);
for( int i = 0; i < samples.rows; i++)
- firstResult.at<int>(i) = em.predict(samples.row(i));
+ firstResult.at<int>(i) = static_cast<int>(em.predict(samples.row(i))[1]);
// Write out
string filename = tempfile() + ".xml";
int errCaseCount = 0;
for( int i = 0; i < samples.rows; i++)
- errCaseCount = std::abs(em.predict(samples.row(i)) - firstResult.at<int>(i)) < FLT_EPSILON ? 0 : 1;
+ errCaseCount = std::abs(em.predict(samples.row(i))[1] - firstResult.at<int>(i)) < FLT_EPSILON ? 0 : 1;
if( errCaseCount > 0 )
{
}
};
+class CV_EMTest_Classification : public cvtest::BaseTest
+{
+public:
+ CV_EMTest_Classification() {}
+protected:
+ virtual void run(int)
+ {
+ // This test classifies spam by the following way:
+ // 1. estimates distributions of "spam" / "not spam"
+ // 2. predict classID using Bayes classifier for estimated distributions.
+
+ CvMLData data;
+ string dataFilename = string(ts->get_data_path()) + "spambase.data";
+
+ if(data.read_csv(dataFilename.c_str()) != 0)
+ {
+ ts->printf(cvtest::TS::LOG, "File with spambase dataset cann't be read.\n");
+ ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
+ }
+
+ Mat values = data.get_values();
+ CV_Assert(values.cols == 58);
+ int responseIndex = 57;
+
+ Mat samples = values.colRange(0, responseIndex);
+ Mat responses = values.col(responseIndex);
+
+ vector<int> trainSamplesMask(samples.rows, 0);
+ int trainSamplesCount = (int)(0.5f * samples.rows);
+ for(int i = 0; i < trainSamplesCount; i++)
+ trainSamplesMask[i] = 1;
+ RNG rng(0);
+ for(size_t i = 0; i < trainSamplesMask.size(); i++)
+ {
+ int i1 = rng(static_cast<unsigned>(trainSamplesMask.size()));
+ int i2 = rng(static_cast<unsigned>(trainSamplesMask.size()));
+ std::swap(trainSamplesMask[i1], trainSamplesMask[i2]);
+ }
+
+ EM model0(3), model1(3);
+ Mat samples0, samples1;
+ for(int i = 0; i < samples.rows; i++)
+ {
+ if(trainSamplesMask[i])
+ {
+ Mat sample = samples.row(i);
+ int resp = (int)responses.at<float>(i);
+ if(resp == 0)
+ samples0.push_back(sample);
+ else
+ samples1.push_back(sample);
+ }
+ }
+ model0.train(samples0);
+ model1.train(samples1);
+
+ Mat trainConfusionMat(2, 2, CV_32SC1, Scalar(0)),
+ testConfusionMat(2, 2, CV_32SC1, Scalar(0));
+ const double lambda = 1.;
+ for(int i = 0; i < samples.rows; i++)
+ {
+ Mat sample = samples.row(i);
+ double sampleLogLikelihoods0 = model0.predict(sample)[0];
+ double sampleLogLikelihoods1 = model1.predict(sample)[0];
+
+ int classID = sampleLogLikelihoods0 >= lambda * sampleLogLikelihoods1 ? 0 : 1;
+
+ if(trainSamplesMask[i])
+ trainConfusionMat.at<int>((int)responses.at<float>(i), classID)++;
+ else
+ testConfusionMat.at<int>((int)responses.at<float>(i), classID)++;
+ }
+// std::cout << trainConfusionMat << std::endl;
+// std::cout << testConfusionMat << std::endl;
+
+ double trainError = (double)(trainConfusionMat.at<int>(1,0) + trainConfusionMat.at<int>(0,1)) / trainSamplesCount;
+ double testError = (double)(testConfusionMat.at<int>(1,0) + testConfusionMat.at<int>(0,1)) / (samples.rows - trainSamplesCount);
+ const double maxTrainError = 0.16;
+ const double maxTestError = 0.19;
+
+ int code = cvtest::TS::OK;
+ if(trainError > maxTrainError)
+ {
+ ts->printf(cvtest::TS::LOG, "Too large train classification error (calc = %f, valid=%f).\n", trainError, maxTrainError);
+ code = cvtest::TS::FAIL_INVALID_TEST_DATA;
+ }
+ if(testError > maxTestError)
+ {
+ ts->printf(cvtest::TS::LOG, "Too large test classification error (calc = %f, valid=%f).\n", trainError, maxTrainError);
+ code = cvtest::TS::FAIL_INVALID_TEST_DATA;
+ }
+
+ ts->set_failed_test_info(code);
+ }
+};
+
TEST(ML_KMeans, accuracy) { CV_KMeansTest test; test.safe_run(); }
TEST(ML_KNearest, accuracy) { CV_KNearestTest test; test.safe_run(); }
TEST(ML_EM, accuracy) { CV_EMTest test; test.safe_run(); }
TEST(ML_EM, save_load) { CV_EMTest_SaveLoad test; test.safe_run(); }
+TEST(ML_EM, classification) { CV_EMTest_Classification test; test.safe_run(); }
+
----
.. ocv:class:: SIFT
-Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) approach. ::
-
- class CV_EXPORTS SIFT
- {
- public:
- struct CommonParams
- {
- static const int DEFAULT_NOCTAVES = 4;
- static const int DEFAULT_NOCTAVE_LAYERS = 3;
- static const int DEFAULT_FIRST_OCTAVE = -1;
- enum{ FIRST_ANGLE = 0, AVERAGE_ANGLE = 1 };
-
- CommonParams();
- CommonParams( int _nOctaves, int _nOctaveLayers, int _firstOctave,
- int _angleMode );
- int nOctaves, nOctaveLayers, firstOctave;
- int angleMode;
- };
-
- struct DetectorParams
- {
- static double GET_DEFAULT_THRESHOLD()
- { return 0.04 / SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS / 2.0; }
- static double GET_DEFAULT_EDGE_THRESHOLD() { return 10.0; }
-
- DetectorParams();
- DetectorParams( double _threshold, double _edgeThreshold );
- double threshold, edgeThreshold;
- };
-
- struct DescriptorParams
- {
- static double GET_DEFAULT_MAGNIFICATION() { return 3.0; }
- static const bool DEFAULT_IS_NORMALIZE = true;
- static const int DESCRIPTOR_SIZE = 128;
-
- DescriptorParams();
- DescriptorParams( double _magnification, bool _isNormalize,
- bool _recalculateAngles );
- double magnification;
- bool isNormalize;
- bool recalculateAngles;
- };
-
- SIFT();
- //! sift-detector constructor
- SIFT( double _threshold, double _edgeThreshold,
- int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
- int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
- int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
- int _angleMode=CommonParams::FIRST_ANGLE );
- //! sift-descriptor constructor
- SIFT( double _magnification, bool _isNormalize=true,
- bool _recalculateAngles = true,
- int _nOctaves=CommonParams::DEFAULT_NOCTAVES,
- int _nOctaveLayers=CommonParams::DEFAULT_NOCTAVE_LAYERS,
- int _firstOctave=CommonParams::DEFAULT_FIRST_OCTAVE,
- int _angleMode=CommonParams::FIRST_ANGLE );
- SIFT( const CommonParams& _commParams,
- const DetectorParams& _detectorParams = DetectorParams(),
- const DescriptorParams& _descriptorParams = DescriptorParams() );
-
- //! returns the descriptor size in floats (128)
- int descriptorSize() const { return DescriptorParams::DESCRIPTOR_SIZE; }
- //! finds the keypoints using the SIFT algorithm
- void operator()(const Mat& img, const Mat& mask,
- vector<KeyPoint>& keypoints) const;
- //! finds the keypoints and computes descriptors for them using SIFT algorithm.
- //! Optionally it can compute descriptors for the user-provided keypoints
- void operator()(const Mat& img, const Mat& mask,
- vector<KeyPoint>& keypoints,
- Mat& descriptors,
- bool useProvidedKeypoints=false) const;
-
- CommonParams getCommonParams () const { return commParams; }
- DetectorParams getDetectorParams () const { return detectorParams; }
- DescriptorParams getDescriptorParams () const { return descriptorParams; }
- protected:
- ...
- };
+Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) algorithm by D. Lowe [Lowe04]_.
+.. [Lowe04] Lowe, D. G., “Distinctive Image Features from Scale-Invariant Keypoints”, International Journal of Computer Vision, 60, 2, pp. 91-110, 2004.
+SIFT::SIFT
+----------
+The SIFT constructors.
+
+.. ocv:function:: SIFT::SIFT( int nfeatures=0, int nOctaveLayers=3, double contrastThreshold=0.04, double edgeThreshold=10, double sigma=1.6)
+
+ :param nfeatures: The number of best features to retain. The features are ranked by their scores (measured in SIFT algorithm as the local contrast)
+
+ :param nOctaveLayers: The number of layers in each octave. 3 is the value used in D. Lowe paper. The number of octaves is computed automatically from the image resolution.
+
+ :param contrastThreshold: The contrast threshold used to filter out weak features in semi-uniform (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
+
+ :param edgeThreshold: The threshold used to filter out edge-like features. Note that the its meaning is different from the contrastThreshold, i.e. the larger the ``edgeThreshold``, the less features are filtered out (more features are retained).
+
+ :param sigma: The sigma of the Gaussian applied to the input image at the octave #0. If your image is captured with a weak camera with soft lenses, you might want to reduce the number.
+
+
+SIFT::operator ()
+-----------------
+Extract features and computes their descriptors using SIFT algorithm
+
+.. ocv:function:: void SIFT::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
+ :param image: Input 8-bit grayscale image
+
+ :param mask: Optional input mask that marks the regions where we should detect features.
+
+ :param keypoints: The input/output vector of keypoints
+
+ :param descriptors: The output matrix of descriptors. Pass ``cv::noArray()`` if you do not need them.
+
+ :param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
+
+
SURF
----
.. ocv:class:: SURF
----------------
Detects keypoints and computes SURF descriptors for them.
-.. ocv:function:: void SURF::operator()(const Mat& image, const Mat& mask, vector<KeyPoint>& keypoints)
-.. ocv:function:: void SURF::operator()(const Mat& image, const Mat& mask, vector<KeyPoint>& keypoints, vector<float>& descriptors, bool useProvidedKeypoints=false)
+.. ocv:function:: void SURF::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints) const
+.. ocv:function:: void SURF::operator()(InputArray image, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
.. ocv:pyfunction:: cv2.SURF.detect(img, mask) -> keypoints
.. ocv:pyfunction:: cv2.SURF.detect(img, mask[, useProvidedKeypoints]) -> keypoints, descriptors
:param keypoints: The input/output vector of keypoints
- :param descriptors: The output concatenated vectors of descriptors. Each descriptor is 64- or 128-element vector, as returned by ``SURF::descriptorSize()``. So the total size of ``descriptors`` will be ``keypoints.size()*descriptorSize()``.
+ :param descriptors: The output matrix of descriptors. Pass ``cv::noArray()`` if you do not need them.
:param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "precomp.hpp"
+
+namespace cv
+{
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static Algorithm* createSURF()
+{
+ return new SURF;
+}
+
+static AlgorithmInfo& surf_info()
+{
+ static AlgorithmInfo surf_info_var("Feature2D.SURF", createSURF);
+ return surf_info_var;
+}
+
+static AlgorithmInfo& surf_info_auto = surf_info();
+
+AlgorithmInfo* SURF::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ SURF obj;
+ surf_info().addParam(obj, "hessianThreshold", obj.hessianThreshold);
+ surf_info().addParam(obj, "nOctaves", obj.nOctaves);
+ surf_info().addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
+ surf_info().addParam(obj, "extended", obj.extended);
+ surf_info().addParam(obj, "upright", obj.upright);
+
+ initialized = true;
+ }
+ return &surf_info();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static Algorithm* createSIFT() { return new SIFT; }
+
+static AlgorithmInfo& sift_info()
+{
+ static AlgorithmInfo sift_info_var("Feature2D.SIFT", createSIFT);
+ return sift_info_var;
+}
+
+static AlgorithmInfo& sift_info_auto = sift_info();
+
+AlgorithmInfo* SIFT::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ SIFT obj;
+ sift_info().addParam(obj, "nFeatures", obj.nfeatures);
+ sift_info().addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
+ sift_info().addParam(obj, "contrastThreshold", obj.contrastThreshold);
+ sift_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
+ sift_info().addParam(obj, "sigma", obj.sigma);
+
+ initialized = true;
+ }
+ return &sift_info();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool initModule_nonfree(void)
+{
+ Ptr<Algorithm> sift = createSIFT(), surf = createSURF();
+ return sift->info() != 0 && surf->info() != 0;
+}
+
+}
+
\ No newline at end of file
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// copy or use the software.
//
//
-// Intel License Agreement
+// License Agreement
// For Open Source Computer Vision Library
//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
-// * The name of Intel Corporation may not be used to endorse or promote products
+// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
float hessianThreshold;
};
+struct KeypointGreater
+{
+ inline bool operator()(const KeyPoint& kp1, const KeyPoint& kp2) const
+ {
+ if(kp1.response > kp2.response) return true;
+ if(kp1.response < kp2.response) return false;
+ if(kp1.size > kp2.size) return true;
+ if(kp1.size < kp2.size) return false;
+ if(kp1.octave > kp2.octave) return true;
+ if(kp1.octave < kp2.octave) return false;
+ if(kp1.pt.y < kp2.pt.y) return false;
+ if(kp1.pt.y > kp2.pt.y) return true;
+ return kp1.pt.x < kp2.pt.y;
+ }
+};
+
static void fastHessianDetector( const Mat& sum, const Mat& mask_sum, vector<KeyPoint>& keypoints,
int nOctaves, int nOctaveLayers, float hessianThreshold )
SURFFindInvoker(sum, mask_sum, dets, traces, sizes,
sampleSteps, middleIndices, keypoints,
nOctaveLayers, hessianThreshold) );
+
+ std::sort(keypoints.begin(), keypoints.end(), KeypointGreater());
}
void SURF::computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
{
(*this)(image, Mat(), keypoints, descriptors, true);
-}
-
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-static Algorithm* createSURF()
-{
- return new SURF;
-}
-
-static AlgorithmInfo& surf_info()
-{
- static AlgorithmInfo surf_info_var("Feature2D.SURF", createSURF);
- return surf_info_var;
-}
-
-static AlgorithmInfo& surf_info_auto = surf_info();
-
-AlgorithmInfo* SURF::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- SURF obj;
- surf_info().addParam(obj, "hessianThreshold", obj.hessianThreshold);
- surf_info().addParam(obj, "nOctaves", obj.nOctaves);
- surf_info().addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
- surf_info().addParam(obj, "extended", obj.extended);
- surf_info().addParam(obj, "upright", obj.upright);
-
- initialized = true;
- }
- return &surf_info();
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-static Algorithm* createSIFT() { return new SIFT; }
-
-static AlgorithmInfo& sift_info()
-{
- static AlgorithmInfo sift_info_var("Feature2D.SIFT", createSIFT);
- return sift_info_var;
-}
-
-static AlgorithmInfo& sift_info_auto = sift_info();
-
-AlgorithmInfo* SIFT::info() const
-{
- static volatile bool initialized = false;
- if( !initialized )
- {
- SIFT obj;
- sift_info().addParam(obj, "nFeatures", obj.nfeatures);
- sift_info().addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
- sift_info().addParam(obj, "contrastThreshold", obj.contrastThreshold);
- sift_info().addParam(obj, "edgeThreshold", obj.edgeThreshold);
- sift_info().addParam(obj, "sigma", obj.sigma);
-
- initialized = true;
- }
- return &sift_info();
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-bool initModule_nonfree(void)
-{
- Ptr<Algorithm> sift = createSIFT(), surf = createSURF();
- return sift->info() != 0 && surf->info() != 0;
}
}
if( validDescriptors.size != calcDescriptors.size || validDescriptors.type() != calcDescriptors.type() )
{
ts->printf(cvtest::TS::LOG, "Valid and computed descriptors matrices must have the same size and type.\n");
+ ts->printf(cvtest::TS::LOG, "Valid size is (%d x %d) actual size is (%d x %d).\n", validDescriptors.rows, validDescriptors.cols, calcDescriptors.rows, calcDescriptors.cols);
+ ts->printf(cvtest::TS::LOG, "Valid type is %d actual type is %d.\n", validDescriptors.type(), calcDescriptors.type());
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
TEST( Features2d_DescriptorExtractor_SURF, regression )
{
- CV_DescriptorExtractorTest<L2<float> > test( "descriptor-surf", 0.035f,
+ CV_DescriptorExtractorTest<L2<float> > test( "descriptor-surf", 0.05f,
DescriptorExtractor::create("SURF"), 0.147372f );
test.safe_run();
}
-/*TEST( Features2d_DescriptorExtractor_OpponentSIFT, regression )
+TEST( Features2d_DescriptorExtractor_OpponentSIFT, regression )
{
CV_DescriptorExtractorTest<L2<float> > test( "descriptor-opponent-sift", 0.18f,
DescriptorExtractor::create("OpponentSIFT"), 8.06652f );
test.safe_run();
-}*/
+}
TEST( Features2d_DescriptorExtractor_OpponentSURF, regression )
{
- CV_DescriptorExtractorTest<L2<float> > test( "descriptor-opponent-surf", 0.18f,
+ CV_DescriptorExtractorTest<L2<float> > test( "descriptor-opponent-surf", 0.3f,
DescriptorExtractor::create("OpponentSURF"), 0.147372f );
test.safe_run();
}
matcher->knnMatch(descQ, descT, matches, k);
//cout << "\nBest " << k << " matches to " << descT.rows << " train desc-s." << endl;
- ASSERT_EQ(descQ.rows, matches.size());
+ ASSERT_EQ(descQ.rows, static_cast<int>(matches.size()));
for(size_t i = 0; i<matches.size(); i++)
{
//cout << "\nmatches[" << i << "].size()==" << matches[i].size() << endl;
- ASSERT_GT(min(k, descT.rows), static_cast<int>(matches[i].size()));
+ ASSERT_GE(min(k, descT.rows), static_cast<int>(matches[i].size()));
for(size_t j = 0; j<matches[i].size(); j++)
{
//cout << "\t" << matches[i][j].queryIdx << " -> " << matches[i][j].trainIdx << endl;
};
-
-
-struct CV_EXPORTS DataMatrixCode {
- char msg[4]; //TODO std::string
- Mat original;
- Point corners[4]; //TODO vector
-};
-
-CV_EXPORTS void findDataMatrix(const Mat& image, std::vector<DataMatrixCode>& codes);
-CV_EXPORTS void drawDataMatrixCodes(const std::vector<DataMatrixCode>& codes, Mat& drawImage);
+CV_EXPORTS_W void findDataMatrix(InputArray image,
+ CV_OUT vector<string>& codes,
+ OutputArray corners=noArray(),
+ OutputArrayOfArrays dmtx=noArray());
+CV_EXPORTS_W void drawDataMatrixCodes(InputOutputArray image,
+ const vector<string>& codes,
+ InputArray corners);
}
/****************************************************************************************\
namespace cv
{
-namespace
+
+void findDataMatrix(InputArray _image,
+ vector<string>& codes,
+ OutputArray _corners,
+ OutputArrayOfArrays _dmtx)
{
- struct CvDM2DM_transform
- {
- DataMatrixCode operator()(CvDataMatrixCode& cvdm)
+ Mat image = _image.getMat();
+ CvMat m(image);
+ deque <CvDataMatrixCode> rc = cvFindDataMatrix(&m);
+ int i, n = (int)rc.size();
+ Mat corners;
+
+ if( _corners.needed() )
{
- DataMatrixCode dm;
- memcpy(dm.msg,cvdm.msg,sizeof(cvdm.msg));
- dm.original = cv::Mat(cvdm.original,true);
- cvReleaseMat(&cvdm.original);
- cv::Mat c(cvdm.corners,true);
- dm.corners[0] = c.at<Point>(0,0);
- dm.corners[1] = c.at<Point>(1,0);
- dm.corners[2] = c.at<Point>(2,0);
- dm.corners[3] = c.at<Point>(3,0);
- cvReleaseMat(&cvdm.corners);
- return dm;
+ _corners.create(n, 4, CV_32SC2);
+ corners = _corners.getMat();
}
- };
-
- struct DrawDataMatrixCode
- {
- DrawDataMatrixCode(cv::Mat& image):image(image){}
- void operator()(const DataMatrixCode& code)
+
+ if( _dmtx.needed() )
+ _dmtx.create(n, 1, CV_8U);
+
+ codes.resize(n);
+
+ for( i = 0; i < n; i++ )
{
- Scalar c(0, 255, 0);
- Scalar c2(255, 0,0);
- line(image, code.corners[0], code.corners[1], c);
- line(image, code.corners[1], code.corners[2], c);
- line(image, code.corners[2], code.corners[3], c);
- line(image, code.corners[3], code.corners[0], c);
- string code_text(code.msg,4);
- //int baseline = 0;
- //Size sz = getTextSize(code_text, CV_FONT_HERSHEY_SIMPLEX, 1, 1, &baseline);
- putText(image, code_text, code.corners[0], CV_FONT_HERSHEY_SIMPLEX, 0.8, c2, 1, CV_AA, false);
+ CvDataMatrixCode& rc_i = rc[i];
+ codes[i] = string(rc_i.msg);
+
+ if( corners.data )
+ {
+ const Point* srcpt = (Point*)rc_i.corners->data.ptr;
+ Point* dstpt = (Point*)corners.ptr(i);
+ for( int k = 0; k < 4; k++ )
+ dstpt[k] = srcpt[k];
+ }
+ cvReleaseMat(&rc_i.corners);
+
+ if( _dmtx.needed() )
+ {
+ _dmtx.create(rc_i.original->rows, rc_i.original->cols, rc_i.original->type, i);
+ Mat dst = _dmtx.getMat(i);
+ Mat(rc_i.original).copyTo(dst);
+ }
+ cvReleaseMat(&rc_i.original);
}
- cv::Mat& image;
-
- DrawDataMatrixCode& operator=(const DrawDataMatrixCode&);
- };
}
-void findDataMatrix(const cv::Mat& image, std::vector<DataMatrixCode>& codes)
+void drawDataMatrixCodes(InputOutputArray _image,
+ const vector<string>& codes,
+ InputArray _corners)
{
- CvMat m(image);
- deque <CvDataMatrixCode> rc = cvFindDataMatrix(&m);
- codes.clear();
- codes.resize(rc.size());
- std::transform(rc.begin(),rc.end(),codes.begin(),CvDM2DM_transform());
-}
-
-void drawDataMatrixCodes(const std::vector<DataMatrixCode>& codes, Mat& drawImage)
-{
- std::for_each(codes.begin(),codes.end(),DrawDataMatrixCode(drawImage));
+ Mat image = _image.getMat();
+ Mat corners = _corners.getMat();
+ int i, n = corners.rows;
+
+ if( n > 0 )
+ {
+ CV_Assert( corners.depth() == CV_32S &&
+ corners.cols*corners.channels() == 8 &&
+ n == (int)codes.size() );
+ }
+
+ for( i = 0; i < n; i++ )
+ {
+ Scalar c(0, 255, 0);
+ Scalar c2(255, 0,0);
+ const Point* pt = (const Point*)corners.ptr(i);
+
+ for( int k = 0; k < 4; k++ )
+ line(image, pt[k], pt[(k+1)%4], c);
+ //int baseline = 0;
+ //Size sz = getTextSize(code_text, CV_FONT_HERSHEY_SIMPLEX, 1, 1, &baseline);
+ putText(image, codes[i], pt[0], CV_FONT_HERSHEY_SIMPLEX, 0.8, c2, 1, CV_AA, false);
+ }
}
-
+
}
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "precomp.hpp"
class CV_LatentSVMDetectorTest : public cvtest::BaseTest
{
-public:
- CV_LatentSVMDetectorTest();
- ~CV_LatentSVMDetectorTest();
protected:
void run(int);
- bool isEqual(CvRect r1, CvRect r2);
+ bool isEqual(CvRect r1, CvRect r2, int eps);
};
-CV_LatentSVMDetectorTest::CV_LatentSVMDetectorTest()
+bool CV_LatentSVMDetectorTest::isEqual(CvRect r1, CvRect r2, int eps)
{
-}
-
-CV_LatentSVMDetectorTest::~CV_LatentSVMDetectorTest() {}
-
-bool CV_LatentSVMDetectorTest::isEqual(CvRect r1, CvRect r2)
-{
- return ((r1.x == r2.x) && (r1.y == r2.y) && (r1.width == r2.width) && (r1.height == r2.height));
+ return (std::abs(r1.x - r2.x) <= eps
+ && std::abs(r1.y - r2.y) <= eps
+ && std::abs(r1.width - r2.width) <= eps
+ && std::abs(r1.height - r2.height) <= eps);
}
void CV_LatentSVMDetectorTest::run( int /* start_from */)
string img_path = string(ts->get_data_path()) + "latentsvmdetector/cat.jpg";
string model_path = string(ts->get_data_path()) + "latentsvmdetector/models_VOC2007/cat.xml";
int numThreads = -1;
+
#ifdef HAVE_TBB
numThreads = 2;
tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);
- init.initialize(numThreads);
+ init.initialize(numThreads);
#endif
- IplImage* image = cvLoadImage(img_path.c_str());
- if (!image)
+
+ IplImage* image = cvLoadImage(img_path.c_str());
+ if (!image)
{
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
}
- CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_path.c_str());
- if (!detector)
- {
- ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
- cvReleaseImage(&image);
- return;
- }
+ CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_path.c_str());
+ if (!detector)
+ {
+ ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
+ cvReleaseImage(&image);
+ return;
+ }
- CvMemStorage* storage = cvCreateMemStorage(0);
+ CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* detections = 0;
detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads);
- if (detections->total != num_detections)
- {
- ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
- }
- else
- {
- ts->set_failed_test_info(cvtest::TS::OK);
- for (int i = 0; i < detections->total; i++)
- {
- CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i );
- CvRect bounding_box = detection.rect;
- float score = detection.score;
- if ((!isEqual(bounding_box, true_bounding_boxes[i])) || (fabs(score - true_scores[i]) > score_thr))
- {
- ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
- break;
- }
- }
- }
+ if (detections->total != num_detections)
+ {
+ ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
+ }
+ else
+ {
+ ts->set_failed_test_info(cvtest::TS::OK);
+ for (int i = 0; i < detections->total; i++)
+ {
+ CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i );
+ CvRect bounding_box = detection.rect;
+ float score = detection.score;
+ if ((!isEqual(bounding_box, true_bounding_boxes[i], 1)) || (fabs(score - true_scores[i]) > score_thr))
+ {
+ ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
+ break;
+ }
+ }
+ }
#ifdef HAVE_TBB
init.terminate();
#endif
- cvReleaseMemStorage( &storage );
- cvReleaseLatentSvmDetector( &detector );
+ cvReleaseMemStorage( &storage );
+ cvReleaseLatentSvmDetector( &detector );
cvReleaseImage( &image );
}
class LatentSVMDetectorTest : public cvtest::BaseTest
{
-public:
- LatentSVMDetectorTest();
protected:
void run(int);
};
-LatentSVMDetectorTest::LatentSVMDetectorTest()
-{
-}
-
static void writeDetections( FileStorage& fs, const string& nodeName, const vector<LatentSvmDetector::ObjectDetection>& detections )
{
fs << nodeName << "[";
}
}
-static inline bool isEqual( const LatentSvmDetector::ObjectDetection& d1, const LatentSvmDetector::ObjectDetection& d2)
+static inline bool isEqual( const LatentSvmDetector::ObjectDetection& d1, const LatentSvmDetector::ObjectDetection& d2, int eps, float threshold)
{
- return ((d1.rect.x == d2.rect.x) && (d1.rect.y == d2.rect.y) && (d1.rect.width == d2.rect.width) && (d1.rect.height == d2.rect.height) &&
- (d1.classID == d2.classID) &&
- std::abs(d1.score-d2.score) < score_thr );
+ return (
+ std::abs(d1.rect.x - d2.rect.x) <= eps
+ && std::abs(d1.rect.y - d2.rect.y) <= eps
+ && std::abs(d1.rect.width - d2.rect.width) <= eps
+ && std::abs(d1.rect.height - d2.rect.height) <= eps
+ && (d1.classID == d2.classID)
+ && std::abs(d1.score - d2.score) <= threshold
+ );
}
-bool compareResults( const vector<LatentSvmDetector::ObjectDetection>& calc, const vector<LatentSvmDetector::ObjectDetection>& valid )
+std::ostream& operator << (std::ostream& os, const CvRect& r)
+{
+ return (os << "[x=" << r.x << ", y=" << r.y << ", w=" << r.width << ", h=" << r.height << "]");
+}
+
+bool compareResults( const vector<LatentSvmDetector::ObjectDetection>& calc, const vector<LatentSvmDetector::ObjectDetection>& valid, int eps, float threshold)
{
if( calc.size() != valid.size() )
return false;
{
const LatentSvmDetector::ObjectDetection& c = calc[i];
const LatentSvmDetector::ObjectDetection& v = valid[i];
- if( !isEqual(c,v) )
+ if( !isEqual(c, v, eps, threshold) )
+ {
+ std::cerr << "Expected: " << v.rect << " class=" << v.classID << " score=" << v.score << std::endl;
+ std::cerr << "Actual: " << c.rect << " class=" << c.classID << " score=" << c.score << std::endl;
return false;
+ }
}
return true;
}
string true_res_path = string(ts->get_data_path()) + "latentsvmdetector/results.xml";
int numThreads = 1;
+
#ifdef HAVE_TBB
numThreads = 2;
#endif
+
Mat image_cat = imread( img_path_cat );
Mat image_cars = imread( img_path_cars );
if( image_cat.empty() || image_cars.empty() )
readDetections( fs, "detections12_cars", true_detections12_cars );
- if( !compareResults(detections1_cat, true_detections1_cat) )
+ if( !compareResults(detections1_cat, true_detections1_cat, 1, score_thr) )
{
std::cerr << "Results of detector1 are invalid on image cat.jpg" << std::endl;
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
}
- if( !compareResults(detections12_cat, true_detections12_cat) )
+ if( !compareResults(detections12_cat, true_detections12_cat, 1, score_thr) )
{
std::cerr << "Results of detector12 are invalid on image cat.jpg" << std::endl;
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
}
- if( !compareResults(detections12_cars, true_detections12_cars) )
+ if( !compareResults(detections12_cars, true_detections12_cars, 1, score_thr) )
{
std::cerr << "Results of detector12 are invalid on image cars.jpg" << std::endl;
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
DEPENDS ${opencv_hdrs})
add_library(${the_module} SHARED src2/cv2.cpp ${CMAKE_CURRENT_BINARY_DIR}/generated0.i ${cv2_generated_hdrs} src2/cv2.cv.hpp)
-target_link_libraries(${the_module} ${PYTHON_LIBRARIES} ${OPENCV_MODULE_${the_module}_DEPS})
+if(PYTHON_DEBUG_LIBRARIES)
+ target_link_libraries(${the_module} debug ${PYTHON_DEBUG_LIBRARIES} optimized ${PYTHON_LIBRARIES})
+else()
+ target_link_libraries(${the_module} ${PYTHON_LIBRARIES})
+endif()
+target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS})
execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import distutils.sysconfig; print distutils.sysconfig.get_config_var('SO')"
RESULT_VARIABLE PYTHON_CVPY_PROCESS
set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG")
endif()
-if(MSVC)
+if(MSVC AND NOT PYTHON_DEBUG_LIBRARIES)
set(PYTHON_INSTALL_CONFIGURATIONS CONFIGURATIONS Release)
else()
set(PYTHON_INSTALL_CONFIGURATIONS "")
typedef vector<KeyPoint> vector_KeyPoint;
typedef vector<Mat> vector_Mat;
typedef vector<DMatch> vector_DMatch;
+typedef vector<string> vector_string;
typedef vector<vector<Point> > vector_vector_Point;
typedef vector<vector<Point2f> > vector_vector_Point2f;
typedef vector<vector<Point3f> > vector_vector_Point3f;
typedef vector<vector<DMatch> > vector_vector_DMatch;
+typedef Ptr<Algorithm> Ptr_Algorithm;
typedef Ptr<FeatureDetector> Ptr_FeatureDetector;
typedef Ptr<DescriptorExtractor> Ptr_DescriptorExtractor;
typedef Ptr<DescriptorMatcher> Ptr_DescriptorMatcher;
}
};
+template<> struct pyopencvVecConverter<string>
+{
+ static bool to(PyObject* obj, vector<string>& value, const char* name="<unknown>")
+ {
+ return pyopencv_to_generic_vec(obj, value, name);
+ }
+
+ static PyObject* from(const vector<string>& value)
+ {
+ return pyopencv_from_generic_vec(value);
+ }
+};
+
static inline bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const char *name="<unknown>")
{
PyObject* m = Py_InitModule(MODULESTR, methods);
PyObject* d = PyModule_GetDict(m);
- PyDict_SetItemString(d, "__version__", PyString_FromString("$Rev: 4557 $"));
+ PyDict_SetItemString(d, "__version__", PyString_FromString(CV_VERSION));
opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, NULL);
PyDict_SetItemString(d, "error", opencv_error);
PyObject *pyobj_cost_matrix = NULL;
CvArr* flow=NULL;
PyObject *pyobj_flow = NULL;
- float lower_bound = 0.0;
+ float lower_bound = FLT_MAX;
PyObject *userdata = NULL;
if (!PyArg_ParseTupleAndKeywords(args, kw, "OOi|OOOfO", (char**)keywords,
m = Py_InitModule(OLD_MODULESTR, old_methods);
d = PyModule_GetDict(m);
- PyDict_SetItemString(d, "__version__", PyString_FromString("$Rev: 4557 $"));
+ PyDict_SetItemString(d, "__version__", PyString_FromString(CV_VERSION));
PyDict_SetItemString(d, "error", opencv_error);
// Couple of warnings about strict aliasing here. Not clear how to fix.
temp_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
threshes = [ x / 100. for x in range(1,10) ]
- results = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, use_harris = 1)) for t in threshes])
+ results = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, useHarris = 1)) for t in threshes])
# Check that GoodFeaturesToTrack has not modified input image
self.assert_(arr.tostring() == original.tostring())
# Check for repeatability
for i in range(10):
- results2 = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, use_harris = 1)) for t in threshes])
+ results2 = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, useHarris = 1)) for t in threshes])
self.assert_(results == results2)
for t0,t1 in zip(threshes, threshes[1:]):
a = self.get_sample("samples/c/lena.jpg", 0)
b = self.get_sample("samples/c/lena.jpg", 0)
(w,h) = cv.GetSize(a)
- vel_size = (w - 8, h - 8)
+ vel_size = (w - 8 + 1, h - 8 + 1)
velx = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1)
vely = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1)
cv.CalcOpticalFlowBM(a, b, (8,8), (1,1), (8,8), 0, velx, vely)
\r
#include "opencv2/highgui/highgui.hpp"\r
#include "opencv2/flann/flann.hpp"\r
+#include "opencv2/opencv_modules.hpp"\r
\r
using namespace std;\r
using namespace cv;\r
typedef TestBaseWithParam<String> stitch;\r
typedef TestBaseWithParam<String> match;\r
\r
-PERF_TEST_P(stitch, a123, testing::Values("surf", "orb"))\r
+#if HAVE_OPENCV_NONFREE\r
+#define TEST_DETECTORS testing::Values("surf", "orb")\r
+#else\r
+#define TEST_DETECTORS testing::Values<String>("orb")\r
+#endif\r
+\r
+PERF_TEST_P(stitch, a123, TEST_DETECTORS)\r
{\r
Mat pano;\r
\r
}\r
}\r
\r
-PERF_TEST_P(stitch, b12, testing::Values("surf", "orb"))\r
+PERF_TEST_P(stitch, b12, TEST_DETECTORS)\r
{\r
Mat pano;\r
\r
}\r
}\r
\r
-PERF_TEST_P( match, bestOf2Nearest, testing::Values("surf", "orb"))\r
+PERF_TEST_P( match, bestOf2Nearest, TEST_DETECTORS)\r
{\r
Mat img1, img1_full = imread( getDataPath("stitching/b1.jpg") );\r
Mat img2, img2_full = imread( getDataPath("stitching/b2.jpg") );\r
#ifdef HAVE_OPENCV_GPU
if (try_use_gpu && gpu::getCudaEnabledDeviceCount() > 0)
{
+#if HAVE_OPENCV_NONFREE
stitcher.setFeaturesFinder(new detail::SurfFeaturesFinderGpu());
+#else
+ stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder());
+#endif
stitcher.setWarper(new SphericalWarperGpu());
stitcher.setSeamFinder(new detail::GraphCutSeamFinderGpu());
}
else
#endif
{
+#if HAVE_OPENCV_NONFREE
stitcher.setFeaturesFinder(new detail::SurfFeaturesFinder());
+#else
+ stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder());
+#endif
stitcher.setWarper(new SphericalWarper());
stitcher.setSeamFinder(new detail::GraphCutSeamFinder(detail::GraphCutSeamFinderBase::COST_COLOR));
}
//M*/\r
\r
#include "test_precomp.hpp"\r
+#include "opencv2/opencv_modules.hpp"\r
+\r
+#if HAVE_OPENCV_NONFREE\r
\r
using namespace cv;\r
using namespace std;\r
ASSERT_GT(br_rect_count, 0);\r
ASSERT_EQ(bad_count, 0);\r
}\r
+\r
+#endif
\ No newline at end of file
def isTest(self, fullpath):
if not os.path.isfile(fullpath):
return False
+ if self.targetos == "nt" and not fullpath.endswith(".exe"):
+ return False
if hostos == self.targetos:
return os.access(fullpath, os.X_OK)
if self.targetos == "android" and fullpath.endswith(".apk"):
def getLogName(self, app, timestamp):
app = os.path.basename(app)
if app.endswith(".exe"):
- app = app[:-4]
+ if app.endswith("d.exe"):
+ app = app[:-5]
+ else:
+ app = app[:-4]
if app.startswith(self.nameprefix):
app = app[len(self.nameprefix):]
if fname == name:
return t
if fname.endswith(".exe") or (self.targetos == "android" and fname.endswith(".apk")):
- fname = fname[:-4]
+ if fname.endswith("d.exe"):
+ fname = fname[:-5]
+ else:
+ fname = fname[:-4]
if fname == name:
return t
if fname.startswith(self.nameprefix):
return
elif self.targetos == "android":
hostlogpath = ""
+ usercolor = [a for a in args if a.startswith("--gtest_color=")]
+ if len(userlog) == 0 and _stdout.isatty() and hostos != "nt":
+ args.append("--gtest_color=yes")
try:
andoidcwd = "/data/bin/" + getpass.getuser().replace(" ","") + "_" + self.options.mode +"/"
exename = os.path.basename(exe)
parser.add_option("-a", "--accuracy", dest="accuracy", help="look for accuracy tests instead of performance tests", action="store_true", default=False)
parser.add_option("-l", "--longname", dest="useLongNames", action="store_true", help="generate log files with long names", default=False)
parser.add_option("", "--android_test_data_path", dest="test_data_path", help="OPENCV_TEST_DATA_PATH for Android run", metavar="PATH", default="/sdcard/opencv_testdata/")
- parser.add_option("", "--configuration", dest="configuration", help="force Debug or Release donfiguration", metavar="CFG", default="")
+ parser.add_option("", "--configuration", dest="configuration", help="force Debug or Release configuration", metavar="CFG", default="")
parser.add_option("", "--serial", dest="adb_serial", help="Android: directs command to the USB device or emulator with the given serial number", metavar="serial number", default="")
parser.add_option("", "--package", dest="junit_package", help="Android: run jUnit tests for specified package", metavar="package", default="")
parser.add_option("", "--help-tests", dest="help", help="Show help for test executable", action="store_true", default=False)
else:
options.mode = "perf"
- run_args = getRunArgs(args)
+ run_args = getRunArgs(args[1:] or ['.'])
if len(run_args) == 0:
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<build_path>"
BadArgTest::BadArgTest()
{
- progress = -1;
- test_case_idx = -1;
- freq = cv::getTickFrequency();
+ progress = -1;
+ test_case_idx = -1;
+ freq = cv::getTickFrequency();
+ // oldErrorCbk = 0;
+ // oldErrorCbkData = 0;
}
BadArgTest::~BadArgTest(void)
return errcount;
}
-
/*****************************************************************************************\
* Base Class for Test System *
\*****************************************************************************************/
return "Generic/Unknown";
}
+static int tsErrorCallback( int status, const char* func_name, const char* err_msg, const char* file_name, int line, TS* ts )
+{
+ ts->printf(TS::LOG, "OpenCV Error: %s (%s) in %s, file %s, line %d\n", cvErrorStr(status), err_msg, func_name[0] != 0 ? func_name : "unknown function", file_name, line);
+ return 0;
+}
+
/************************************** Running tests **********************************/
void TS::init( const string& modulename )
data_path = string(buf);
}
+ cv::redirectError((cv::ErrorCallback)tsErrorCallback, this);
+
if( ::testing::GTEST_FLAG(catch_exceptions) )
{
- cvSetErrMode( CV_ErrModeParent );
- cvRedirectError( cvStdErrReport );
#if defined WIN32 || defined _WIN32
#ifdef _MSC_VER
_set_se_translator( SEHTranslator );
}
else
{
- cvSetErrMode( CV_ErrModeLeaf );
- cvRedirectError( cvGuiBoxReport );
#if defined WIN32 || defined _WIN32
#ifdef _MSC_VER
_set_se_translator( 0 );
}
+static int
+normHamming(const uchar* src, size_t total, int cellSize)
+{
+ int result = 0;
+ int mask = cellSize == 1 ? 1 : cellSize == 2 ? 3 : cellSize == 4 ? 15 : -1;
+ CV_Assert( mask >= 0 );
+
+ for( size_t i = 0; i < total; i++ )
+ {
+ unsigned a = src[i];
+ for( ; a != 0; a >>= cellSize )
+ result += (a & mask) != 0;
+ }
+ return result;
+}
+
+
template<typename _Tp> static double
norm_(const _Tp* src, size_t total, int cn, int normType, double startval, const uchar* mask)
{
double norm(const Mat& src, int normType, const Mat& mask)
{
+ if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
+ {
+ if( !mask.empty() )
+ {
+ Mat temp;
+ bitwise_and(src, mask, temp);
+ return norm(temp, normType, Mat());
+ }
+
+ CV_Assert( src.depth() == CV_8U );
+
+ const Mat *arrays[]={&src, 0};
+ Mat planes[1];
+
+ NAryMatIterator it(arrays, planes);
+ size_t total = planes[0].total();
+ size_t i, nplanes = it.nplanes;
+ double result = 0;
+ int cellSize = normType == NORM_HAMMING ? 1 : 2;
+
+ for( i = 0; i < nplanes; i++, ++it )
+ result += normHamming(planes[0].data, total, cellSize);
+ return result;
+ }
+ int normType0 = normType;
+ normType = normType == NORM_L2SQR ? NORM_L2 : normType;
+
CV_Assert( mask.empty() || (src.size == mask.size && mask.type() == CV_8U) );
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
+
const Mat *arrays[]={&src, &mask, 0};
Mat planes[2];
CV_Error(CV_StsUnsupportedFormat, "");
};
}
- if( normType == NORM_L2 )
+ if( normType0 == NORM_L2 )
result = sqrt(result);
return result;
}
double norm(const Mat& src1, const Mat& src2, int normType, const Mat& mask)
{
+ if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
+ {
+ Mat temp;
+ bitwise_xor(src1, src2, temp);
+ if( !mask.empty() )
+ bitwise_and(temp, mask, temp);
+
+ CV_Assert( temp.depth() == CV_8U );
+
+ const Mat *arrays[]={&temp, 0};
+ Mat planes[1];
+
+ NAryMatIterator it(arrays, planes);
+ size_t total = planes[0].total();
+ size_t i, nplanes = it.nplanes;
+ double result = 0;
+ int cellSize = normType == NORM_HAMMING ? 1 : 2;
+
+ for( i = 0; i < nplanes; i++, ++it )
+ result += normHamming(planes[0].data, total, cellSize);
+ return result;
+ }
+ int normType0 = normType;
+ normType = normType == NORM_L2SQR ? NORM_L2 : normType;
+
CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
CV_Assert( mask.empty() || (src1.size == mask.size && mask.type() == CV_8U) );
CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
CV_Error(CV_StsUnsupportedFormat, "");
};
}
- if( normType == NORM_L2 )
+ if( normType0 == NORM_L2 )
result = sqrt(result);
return result;
}
BackgroundSubtractor
--------------------
-.. ocv:class:: BackgroundSubtractor
+.. ocv:class:: BackgroundSubtractor : public Algorithm
Base class for background/foreground segmentation. ::
- class BackgroundSubtractor
+ class BackgroundSubtractor : public Algorithm
{
public:
virtual ~BackgroundSubtractor();
The class is only used to define the common interface for
the whole family of background/foreground segmentation algorithms.
*/
-class CV_EXPORTS_W BackgroundSubtractor
+class CV_EXPORTS_W BackgroundSubtractor : public Algorithm
{
public:
//! the virtual destructor
//! re-initiaization method
virtual void initialize(Size frameSize, int frameType);
+ virtual AlgorithmInfo* info() const;
+
+protected:
Size frameSize;
int frameType;
Mat bgmodel;
//! re-initiaization method
virtual void initialize(Size frameSize, int frameType);
+ virtual AlgorithmInfo* info() const;
+
+protected:
Size frameSize;
int frameType;
Mat bgmodel;
int nframes;
int history;
int nmixtures;
- //! here it is the maximum allowed number of mixture comonents.
+ //! here it is the maximum allowed number of mixture components.
//! Actual number is determined dynamically per pixel
- float varThreshold;
- // threshold on the squared Mahalan. dist. to decide if it is well described
- //by the background model or not. Related to Cthr from the paper.
- //This does not influence the update of the background. A typical value could be 4 sigma
- //and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
+ double varThreshold;
+ // threshold on the squared Mahalanobis distance to decide if it is well described
+ // by the background model or not. Related to Cthr from the paper.
+ // This does not influence the update of the background. A typical value could be 4 sigma
+ // and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
/////////////////////////
- //less important parameters - things you might change but be carefull
+ // less important parameters - things you might change but be carefull
////////////////////////
float backgroundRatio;
- //corresponds to fTB=1-cf from the paper
- //TB - threshold when the component becomes significant enough to be included into
- //the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
- //For alpha=0.001 it means that the mode should exist for approximately 105 frames before
- //it is considered foreground
- //float noiseSigma;
+ // corresponds to fTB=1-cf from the paper
+ // TB - threshold when the component becomes significant enough to be included into
+ // the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
+ // For alpha=0.001 it means that the mode should exist for approximately 105 frames before
+ // it is considered foreground
+ // float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
};
-static void process8uC1( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fgmask, double learningRate )
+static void process8uC1( const Mat& image, Mat& fgmask, double learningRate,
+ Mat& bgmodel, int nmixtures, double backgroundRatio,
+ double varThreshold, double noiseSigma )
{
int x, y, k, k1, rows = image.rows, cols = image.cols;
- float alpha = (float)learningRate, T = (float)obj.backgroundRatio, vT = (float)obj.varThreshold;
- int K = obj.nmixtures;
- MixData<float>* mptr = (MixData<float>*)obj.bgmodel.data;
+ float alpha = (float)learningRate, T = (float)backgroundRatio, vT = (float)varThreshold;
+ int K = nmixtures;
+ MixData<float>* mptr = (MixData<float>*)bgmodel.data;
const float w0 = (float)defaultInitialWeight;
const float sk0 = (float)(w0/(defaultNoiseSigma*2));
const float var0 = (float)(defaultNoiseSigma*defaultNoiseSigma*4);
- const float minVar = (float)(obj.noiseSigma*obj.noiseSigma);
+ const float minVar = (float)(noiseSigma*noiseSigma);
for( y = 0; y < rows; y++ )
{
}
}
-static void process8uC3( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fgmask, double learningRate )
+
+static void process8uC3( const Mat& image, Mat& fgmask, double learningRate,
+ Mat& bgmodel, int nmixtures, double backgroundRatio,
+ double varThreshold, double noiseSigma )
{
int x, y, k, k1, rows = image.rows, cols = image.cols;
- float alpha = (float)learningRate, T = (float)obj.backgroundRatio, vT = (float)obj.varThreshold;
- int K = obj.nmixtures;
+ float alpha = (float)learningRate, T = (float)backgroundRatio, vT = (float)varThreshold;
+ int K = nmixtures;
const float w0 = (float)defaultInitialWeight;
const float sk0 = (float)(w0/(defaultNoiseSigma*2*sqrt(3.)));
const float var0 = (float)(defaultNoiseSigma*defaultNoiseSigma*4);
- const float minVar = (float)(obj.noiseSigma*obj.noiseSigma);
- MixData<Vec3f>* mptr = (MixData<Vec3f>*)obj.bgmodel.data;
+ const float minVar = (float)(noiseSigma*noiseSigma);
+ MixData<Vec3f>* mptr = (MixData<Vec3f>*)bgmodel.data;
for( y = 0; y < rows; y++ )
{
CV_Assert(learningRate >= 0);
if( image.type() == CV_8UC1 )
- process8uC1( *this, image, fgmask, learningRate );
+ process8uC1( image, fgmask, learningRate, bgmodel, nmixtures, backgroundRatio, varThreshold, noiseSigma );
else if( image.type() == CV_8UC3 )
- process8uC3( *this, image, fgmask, learningRate );
+ process8uC3( image, fgmask, learningRate, bgmodel, nmixtures, backgroundRatio, varThreshold, noiseSigma );
else
CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" );
}
#include "precomp.hpp"
+namespace cv
+{
/*
Interface of Gaussian mixture algorithm from:
-fast - number of Gausssian components is constantly adapted per pixel.
-performs also shadow detection (see bgfg_segm_test.cpp example)
- */
-
-
-#define CV_BG_MODEL_MOG2 3 /* "Mixture of Gaussians 2". */
-
-
-/* default parameters of gaussian background detection algorithm */
-#define CV_BGFG_MOG2_STD_THRESHOLD 4.0f /* lambda=2.5 is 99% */
-#define CV_BGFG_MOG2_WINDOW_SIZE 500 /* Learning rate; alpha = 1/CV_GBG_WINDOW_SIZE */
-#define CV_BGFG_MOG2_BACKGROUND_THRESHOLD 0.9f /* threshold sum of weights for background test */
-#define CV_BGFG_MOG2_STD_THRESHOLD_GENERATE 3.0f /* lambda=2.5 is 99% */
-#define CV_BGFG_MOG2_NGAUSSIANS 5 /* = K = number of Gaussians in mixture */
-#define CV_BGFG_MOG2_VAR_INIT 15.0f /* initial variance for new components*/
-#define CV_BGFG_MOG2_VAR_MIN 4.0f
-#define CV_BGFG_MOG2_VAR_MAX 5*CV_BGFG_MOG2_VAR_INIT
-#define CV_BGFG_MOG2_MINAREA 15.0f /* for postfiltering */
-
-/* additional parameters */
-#define CV_BGFG_MOG2_CT 0.05f /* complexity reduction prior constant 0 - no reduction of number of components*/
-#define CV_BGFG_MOG2_SHADOW_VALUE 127 /* value to use in the segmentation mask for shadows, sot 0 not to do shadow detection*/
-#define CV_BGFG_MOG2_SHADOW_TAU 0.5f /* Tau - shadow threshold, see the paper for explanation*/
-
-typedef struct CvGaussBGStatModel2Params
+*/
+
+// default parameters of gaussian background detection algorithm
+static const int defaultHistory2 = 500; // Learning rate; alpha = 1/defaultHistory2
+static const float defaultVarThreshold2 = 4.0f*4.0f;
+static const int defaultNMixtures2 = 5; // maximal number of Gaussians in mixture
+static const float defaultBackgroundRatio2 = 0.9f; // threshold sum of weights for background test
+static const float defaultVarThresholdGen2 = 3.0f*3.0f;
+static const float defaultVarInit2 = 15.0f; // initial variance for new components
+static const float defaultVarMax2 = 5*defaultVarInit2;
+static const float defaultVarMin2 = 4.0f;
+
+// additional parameters
+static const float defaultfCT2 = 0.05f; // complexity reduction prior constant 0 - no reduction of number of components
+static const unsigned char defaultnShadowDetection2 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
+static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
+
+struct GaussBGStatModel2Params
{
//image info
int nWidth;
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
-} CvGaussBGStatModel2Params;
-
-#define CV_BGFG_MOG2_NDMAX 3
+};
-typedef struct CvPBGMMGaussian
+struct GMM
{
float weight;
- float mean[CV_BGFG_MOG2_NDMAX];
float variance;
-}CvPBGMMGaussian;
-
-typedef struct CvGaussBGStatModel2Data
-{
- CvPBGMMGaussian* rGMM; //array for the mixture of Gaussians
- unsigned char* rnUsedModes;//number of Gaussian components per pixel (maximum 255)
-} CvGaussBGStatModel2Data;
-
+};
-
-//shadow detection performed per pixel
+// shadow detection performed per pixel
// should work for rgb data, could be usefull for gray scale and depth data as well
-// See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
-CV_INLINE int _icvRemoveShadowGMM(float* data, int nD,
- unsigned char nModes,
- CvPBGMMGaussian* pGMM,
- float m_fTb,
- float m_fTB,
- float m_fTau)
+// See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
+static CV_INLINE bool
+detectShadowGMM(const float* data, int nchannels, int nmodes,
+ const GMM* gmm, const float* mean,
+ float Tb, float TB, float tau)
{
float tWeight = 0;
- float numerator, denominator;
+
// check all the components marked as background:
- for (int iModes=0;iModes<nModes;iModes++)
+ for( int mode = 0; mode < nmodes; mode++, mean += nchannels )
{
+ GMM g = gmm[mode];
- CvPBGMMGaussian g=pGMM[iModes];
-
- numerator = 0.0f;
- denominator = 0.0f;
- for (int iD=0;iD<nD;iD++)
+ float numerator = 0.0f;
+ float denominator = 0.0f;
+ for( int c = 0; c < nchannels; c++ )
{
- numerator += data[iD] * g.mean[iD];
- denominator += g.mean[iD]* g.mean[iD];
+ numerator += data[c] * mean[c];
+ denominator += mean[c] * mean[c];
}
// no division by zero allowed
- if (denominator == 0)
- {
- return 0;
- };
- float a = numerator / denominator;
+ if( denominator == 0 )
+ return false;
// if tau < a < 1 then also check the color distortion
- if ((a <= 1) && (a >= m_fTau))
+ if( numerator <= denominator && numerator >= tau*denominator )
{
-
- float dist2a=0.0f;
+ float a = numerator / denominator;
+ float dist2a = 0.0f;
- for (int iD=0;iD<nD;iD++)
+ for( int c = 0; c < nchannels; c++ )
{
- float dD= a*g.mean[iD] - data[iD];
- dist2a += (dD*dD);
+ float dD= a*mean[c] - data[c];
+ dist2a += dD*dD;
}
- if (dist2a<m_fTb*g.variance*a*a)
- {
- return 2;
- }
+ if (dist2a < Tb*g.variance*a*a)
+ return true;
};
tWeight += g.weight;
- if (tWeight > m_fTB)
- {
- return 0;
- };
+ if( tWeight > TB )
+ return false;
};
- return 0;
+ return false;
}
//update GMM - the base update function performed per pixel
//IEEE Trans. on Pattern Analysis and Machine Intelligence, vol.26, no.5, pages 651-656, 2004
//http://www.zoranz.net/Publications/zivkovic2004PAMI.pdf
-CV_INLINE int _icvUpdateGMM(float* data, int nD,
- unsigned char* pModesUsed,
- CvPBGMMGaussian* pGMM,
- int m_nM,
- float m_fAlphaT,
- float m_fTb,
- float m_fTB,
- float m_fTg,
- float m_fVarInit,
- float m_fVarMax,
- float m_fVarMin,
- float m_fPrune)
+struct MOG2Invoker
{
- //calculate distances to the modes (+ sort)
- //here we need to go in descending order!!!
- bool bBackground=0;//return value -> true - the pixel classified as background
-
- //internal:
- bool bFitsPDF=0;//if it remains zero a new GMM mode will be added
- float m_fOneMinAlpha=1-m_fAlphaT;
- unsigned char nModes=*pModesUsed;//current number of modes in GMM
- float totalWeight=0.0f;
-
- //////
- //go through all modes
- int iMode=0;
- CvPBGMMGaussian* pGauss=pGMM;
- for (;iMode<nModes;iMode++,pGauss++)
+ MOG2Invoker(const Mat& _src, Mat& _dst,
+ GMM* _gmm, float* _mean,
+ uchar* _modesUsed,
+ int _nmixtures, float _alphaT,
+ float _Tb, float _TB, float _Tg,
+ float _varInit, float _varMin, float _varMax,
+ float _prune, float _tau, bool _detectShadows,
+ uchar _shadowVal)
{
- float weight = pGauss->weight;//need only weight if fit is found
- weight=m_fOneMinAlpha*weight+m_fPrune;
-
- ////
- //fit not found yet
- if (!bFitsPDF)
- {
- //check if it belongs to some of the remaining modes
- float var=pGauss->variance;
-
- //calculate difference and distance
- float dist2=0.0f;
-#if (CV_BGFG_MOG2_NDMAX==1)
- float dData=pGauss->mean[0]-data[0];
- dist2=dData*dData;
-#else
- float dData[CV_BGFG_MOG2_NDMAX];
-
- for (int iD=0;iD<nD;iD++)
- {
- dData[iD]=pGauss->mean[iD]-data[iD];
- dist2+=dData[iD]*dData[iD];
- }
-#endif
- //background? - m_fTb - usually larger than m_fTg
- if ((totalWeight<m_fTB)&&(dist2<m_fTb*var))
- bBackground=1;
-
- //check fit
- if (dist2<m_fTg*var)
- {
- /////
- //belongs to the mode - bFitsPDF becomes 1
- bFitsPDF=1;
-
- //update distribution
-
- //update weight
- weight+=m_fAlphaT;
-
- float k = m_fAlphaT/weight;
-
- //update mean
-#if (CV_BGFG_MOG2_NDMAX==1)
- pGauss->mean[0]-=k*dData;
-#else
- for (int iD=0;iD<nD;iD++)
- {
- pGauss->mean[iD]-=k*dData[iD];
- }
-#endif
-
- //update variance
- float varnew = var + k*(dist2-var);
- //limit the variance
- pGauss->variance = MIN(m_fVarMax,MAX(varnew,m_fVarMin));
-
- //sort
- //all other weights are at the same place and
- //only the matched (iModes) is higher -> just find the new place for it
- for (int iLocal = iMode;iLocal>0;iLocal--)
- {
- //check one up
- if (weight < (pGMM[iLocal-1].weight))
- {
- break;
- }
- else
- {
- //swap one up
- CvPBGMMGaussian temp = pGMM[iLocal];
- pGMM[iLocal] = pGMM[iLocal-1];
- pGMM[iLocal-1] = temp;
- pGauss--;
- }
- }
- //belongs to the mode - bFitsPDF becomes 1
- /////
- }
- }//!bFitsPDF)
-
- //check prune
- if (weight<-m_fPrune)
- {
- weight=0.0;
- nModes--;
- }
-
- pGauss->weight=weight;//update weight by the calculated value
- totalWeight+=weight;
- }
- //go through all modes
- //////
-
- //renormalize weights
- for (iMode = 0; iMode < nModes; iMode++)
- {
- pGMM[iMode].weight = pGMM[iMode].weight/totalWeight;
- }
+ src = &_src;
+ dst = &_dst;
+ gmm0 = _gmm;
+ mean0 = _mean;
+ modesUsed0 = _modesUsed;
+ nmixtures = _nmixtures;
+ alphaT = _alphaT;
+ Tb = _Tb;
+ TB = _TB;
+ Tg = _Tg;
+ varInit = _varInit;
+ varMin = MIN(_varMin, _varMax);
+ varMax = MAX(_varMin, _varMax);
+ prune = _prune;
+ tau = _tau;
+ detectShadows = _detectShadows;
+ shadowVal = _shadowVal;
- //make new mode if needed and exit
- if (!bFitsPDF)
- {
- if (nModes==m_nM)
- {
- //replace the weakest
- pGauss=pGMM+m_nM-1;
- }
- else
- {
- //add a new one
- pGauss=pGMM+nModes;
- nModes++;
- }
-
- if (nModes==1)
- {
- pGauss->weight=1;
- }
- else
- {
- pGauss->weight=m_fAlphaT;
-
- //renormalize all weights
- for (iMode = 0; iMode < nModes-1; iMode++)
- {
- pGMM[iMode].weight *=m_fOneMinAlpha;
- }
- }
-
- //init
- memcpy(pGauss->mean,data,nD*sizeof(float));
- pGauss->variance=m_fVarInit;
-
- //sort
- //find the new place for it
- for (int iLocal = nModes-1;iLocal>0;iLocal--)
- {
- //check one up
- if (m_fAlphaT < (pGMM[iLocal-1].weight))
- {
- break;
- }
- else
- {
- //swap one up
- CvPBGMMGaussian temp = pGMM[iLocal];
- pGMM[iLocal] = pGMM[iLocal-1];
- pGMM[iLocal-1] = temp;
- }
- }
+ cvtfunc = src->depth() != CV_32F ? getConvertFunc(src->depth(), CV_32F) : 0;
}
-
- //set the number of modes
- *pModesUsed=nModes;
-
- return bBackground;
-}
-
-// a bit more efficient implementation for common case of 3 channel (rgb) images
-CV_INLINE int _icvUpdateGMM_C3(float r,float g, float b,
- unsigned char* pModesUsed,
- CvPBGMMGaussian* pGMM,
- int m_nM,
- float m_fAlphaT,
- float m_fTb,
- float m_fTB,
- float m_fTg,
- float m_fVarInit,
- float m_fVarMax,
- float m_fVarMin,
- float m_fPrune)
-{
- //calculate distances to the modes (+ sort)
- //here we need to go in descending order!!!
- bool bBackground=0;//return value -> true - the pixel classified as background
-
- //internal:
- bool bFitsPDF=0;//if it remains zero a new GMM mode will be added
- float m_fOneMinAlpha=1-m_fAlphaT;
- unsigned char nModes=*pModesUsed;//current number of modes in GMM
- float totalWeight=0.0f;
-
- //////
- //go through all modes
- int iMode=0;
- CvPBGMMGaussian* pGauss=pGMM;
- for (;iMode<nModes;iMode++,pGauss++)
+
+ void operator()(const BlockedRange& range) const
{
- float weight = pGauss->weight;//need only weight if fit is found
- weight=m_fOneMinAlpha*weight+m_fPrune;
-
- ////
- //fit not found yet
- if (!bFitsPDF)
- {
- //check if it belongs to some of the remaining modes
- float var=pGauss->variance;
-
- //calculate difference and distance
- float muR = pGauss->mean[0];
- float muG = pGauss->mean[1];
- float muB = pGauss->mean[2];
+ int y0 = range.begin(), y1 = range.end();
+ int ncols = src->cols, nchannels = src->channels();
+ AutoBuffer<float> buf(src->cols*nchannels);
+ float alpha1 = 1.f - alphaT;
+ float dData[CV_CN_MAX];
- float dR=muR - r;
- float dG=muG - g;
- float dB=muB - b;
-
- float dist2=(dR*dR+dG*dG+dB*dB);
+ for( int y = y0; y < y1; y++ )
+ {
+ const float* data = buf;
+ if( cvtfunc )
+ cvtfunc( src->ptr(y), src->step, 0, 0, (uchar*)data, 0, Size(ncols*nchannels, 1), 0);
+ else
+ data = src->ptr<float>(y);
- //background? - m_fTb - usually larger than m_fTg
- if ((totalWeight<m_fTB)&&(dist2<m_fTb*var))
- bBackground=1;
-
- //check fit
- if (dist2<m_fTg*var)
+ float* mean = mean0 + ncols*nmixtures*nchannels*y;
+ GMM* gmm = gmm0 + ncols*nmixtures*y;
+ uchar* modesUsed = modesUsed0 + ncols*y;
+ uchar* mask = dst->ptr(y);
+
+ for( int x = 0; x < ncols; x++, data += nchannels, gmm += nmixtures, mean += nmixtures*nchannels )
{
- /////
- //belongs to the mode - bFitsPDF becomes 1
- bFitsPDF=1;
-
- //update distribution
+ //calculate distances to the modes (+ sort)
+ //here we need to go in descending order!!!
+ bool background = false;//return value -> true - the pixel classified as background
- //update weight
- weight+=m_fAlphaT;
+ //internal:
+ bool fitsPDF = false;//if it remains zero a new GMM mode will be added
+ int nmodes = modesUsed[x], nNewModes = nmodes;//current number of modes in GMM
+ float totalWeight = 0.f;
- float k = m_fAlphaT/weight;
-
- //update mean
- pGauss->mean[0] = muR - k*(dR);
- pGauss->mean[1] = muG - k*(dG);
- pGauss->mean[2] = muB - k*(dB);
-
- //update variance
- float varnew = var + k*(dist2-var);
- //limit the variance
- pGauss->variance = MIN(m_fVarMax,MAX(varnew,m_fVarMin));
-
- //sort
- //all other weights are at the same place and
- //only the matched (iModes) is higher -> just find the new place for it
- for (int iLocal = iMode;iLocal>0;iLocal--)
+ float* mean_m = mean;
+
+ //////
+ //go through all modes
+ for( int mode = 0; mode < nmodes; mode++, mean_m += nchannels )
{
- //check one up
- if (weight < (pGMM[iLocal-1].weight))
+ float weight = alpha1*gmm[mode].weight + prune;//need only weight if fit is found
+
+ ////
+ //fit not found yet
+ if( !fitsPDF )
{
- break;
- }
- else
+ //check if it belongs to some of the remaining modes
+ float var = gmm[mode].variance;
+
+ //calculate difference and distance
+ float dist2;
+
+ if( nchannels == 3 )
+ {
+ dData[0] = mean_m[0] - data[0];
+ dData[1] = mean_m[1] - data[1];
+ dData[2] = mean_m[2] - data[2];
+ dist2 = dData[0]*dData[0] + dData[1]*dData[1] + dData[2]*dData[2];
+ }
+ else
+ {
+ dist2 = 0.f;
+ for( int c = 0; c < nchannels; c++ )
+ {
+ dData[c] = mean_m[c] - data[c];
+ dist2 += dData[c]*dData[c];
+ }
+ }
+
+ //background? - Tb - usually larger than Tg
+ if( totalWeight < TB && dist2 < Tb*var )
+ background = true;
+
+ //check fit
+ if( dist2 < Tg*var )
+ {
+ /////
+ //belongs to the mode
+ fitsPDF = true;
+
+ //update distribution
+
+ //update weight
+ weight += alphaT;
+ float k = alphaT/weight;
+
+ //update mean
+ for( int c = 0; c < nchannels; c++ )
+ mean_m[c] -= k*dData[c];
+
+ //update variance
+ float varnew = var + k*(dist2-var);
+ //limit the variance
+ varnew = MAX(varnew, varMin);
+ varnew = MIN(varnew, varMax);
+ gmm[mode].variance = varnew;
+
+ //sort
+ //all other weights are at the same place and
+ //only the matched (iModes) is higher -> just find the new place for it
+ for( int i = mode; i > 0; i-- )
+ {
+ //check one up
+ if( weight < gmm[i-1].weight )
+ break;
+
+ //swap one up
+ std::swap(gmm[i], gmm[i-1]);
+ for( int c = 0; c < nchannels; c++ )
+ std::swap(mean[i*nchannels + c], mean[(i-1)*nchannels + c]);
+ }
+ //belongs to the mode - bFitsPDF becomes 1
+ /////
+ }
+ }//!bFitsPDF)
+
+ //check prune
+ if( weight < -prune )
{
- //swap one up
- CvPBGMMGaussian temp = pGMM[iLocal];
- pGMM[iLocal] = pGMM[iLocal-1];
- pGMM[iLocal-1] = temp;
- pGauss--;
+ weight = 0.0;
+ nmodes--;
}
+
+ gmm[mode].weight = weight;//update weight by the calculated value
+ totalWeight += weight;
}
- //belongs to the mode - bFitsPDF becomes 1
- /////
- }
-
- }//!bFitsPDF)
-
- //check prunning
- if (weight<-m_fPrune)
- {
- weight=0.0;
- nModes--;
- }
-
- pGauss->weight=weight;
- totalWeight+=weight;
- }
- //go through all modes
- //////
-
- //renormalize weights
- for (iMode = 0; iMode < nModes; iMode++)
- {
- pGMM[iMode].weight = pGMM[iMode].weight/totalWeight;
- }
-
- //make new mode if needed and exit
- if (!bFitsPDF)
- {
- if (nModes==m_nM)
- {
- //replace the weakest
- pGauss=pGMM+m_nM-1;
- }
- else
- {
- //add a new one
- pGauss=pGMM+nModes;
- nModes++;
- }
-
- if (nModes==1)
- {
- pGauss->weight=1;
- }
- else
- {
- pGauss->weight=m_fAlphaT;
-
- //renormalize all weights
- for (iMode = 0; iMode < nModes-1; iMode++)
- {
- pGMM[iMode].weight *=m_fOneMinAlpha;
- }
- }
-
- //init
- pGauss->mean[0]=r;
- pGauss->mean[1]=g;
- pGauss->mean[2]=b;
-
- pGauss->variance=m_fVarInit;
-
- //sort
- //find the new place for it
- for (int iLocal = nModes-1;iLocal>0;iLocal--)
- {
- //check one up
- if (m_fAlphaT < (pGMM[iLocal-1].weight))
+ //go through all modes
+ //////
+
+ //renormalize weights
+ totalWeight = 1.f/totalWeight;
+ for( int mode = 0; mode < nmodes; mode++ )
+ {
+ gmm[mode].weight *= totalWeight;
+ }
+
+ nmodes = nNewModes;
+
+ //make new mode if needed and exit
+ if( !fitsPDF )
+ {
+ // replace the weakest or add a new one
+ int mode = nmodes == nmixtures ? nmixtures-1 : nmodes++;
+
+ if (nmodes==1)
+ gmm[mode].weight = 1.f;
+ else
{
- break;
+ gmm[mode].weight = alphaT;
+
+ // renormalize all other weights
+ for( int i = 0; i < nmodes-1; i++ )
+ gmm[i].weight *= alpha1;
}
- else
+
+ // init
+ for( int c = 0; c < nchannels; c++ )
+ mean[mode*nchannels + c] = data[c];
+
+ gmm[mode].variance = varInit;
+
+ //sort
+ //find the new place for it
+ for( int i = nmodes - 1; i > 0; i-- )
{
- //swap one up
- CvPBGMMGaussian temp = pGMM[iLocal];
- pGMM[iLocal] = pGMM[iLocal-1];
- pGMM[iLocal-1] = temp;
+ // check one up
+ if( alphaT < gmm[i-1].weight )
+ break;
+
+ // swap one up
+ std::swap(gmm[i], gmm[i-1]);
+ for( int c = 0; c < nchannels; c++ )
+ std::swap(mean[i*nchannels + c], mean[(i-1)*nchannels + c]);
}
- }
- }
-
- //set the number of modes
- *pModesUsed=nModes;
-
- return bBackground;
-}
-
-//the main function to update the background model
-static void icvUpdatePixelBackgroundGMM2( const CvArr* srcarr, CvArr* dstarr ,
- CvPBGMMGaussian *pGMM,
- unsigned char *pUsedModes,
- //CvGaussBGStatModel2Params* pGMMPar,
- int nM,
- float fTb,
- float fTB,
- float fTg,
- float fVarInit,
- float fVarMax,
- float fVarMin,
- float fCT,
- float fTau,
- bool bShadowDetection,
- unsigned char nShadowDetection,
- float alpha)
-{
- CvMat sstub, *src = cvGetMat(srcarr, &sstub);
- CvMat dstub, *dst = cvGetMat(dstarr, &dstub);
- CvSize size = cvGetMatSize(src);
- int nD=CV_MAT_CN(src->type);
-
- //reshape if possible
- if( CV_IS_MAT_CONT(src->type & dst->type) )
- {
- size.width *= size.height;
- size.height = 1;
- }
-
- int x, y;
- float data[CV_BGFG_MOG2_NDMAX];
- float prune=-alpha*fCT;
-
- //general nD
-
- if (nD!=3)
- {
- switch (CV_MAT_DEPTH(src->type))
- {
- case CV_8U:
- for( y = 0; y < size.height; y++ )
- {
- uchar* sptr = src->data.ptr + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
- //update GMM model
- int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_16S:
- for( y = 0; y < size.height; y++ )
- {
- short* sptr = src->data.s + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
- //update GMM model
- int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_16U:
- for( y = 0; y < size.height; y++ )
- {
- unsigned short* sptr = (unsigned short*) (src->data.s + src->step*y);
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
- //update GMM model
- int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_32S:
- for( y = 0; y < size.height; y++ )
- {
- int* sptr = src->data.i + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
- //update GMM model
- int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_32F:
- for( y = 0; y < size.height; y++ )
- {
- float* sptr = src->data.fl + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //update GMM model
- int result = _icvUpdateGMM(sptr,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_64F:
- for( y = 0; y < size.height; y++ )
- {
- double* sptr = src->data.db + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]);
- //update GMM model
- int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- }
- }else ///if (nD==3) - a bit faster
- {
- switch (CV_MAT_DEPTH(src->type))
- {
- case CV_8U:
- for( y = 0; y < size.height; y++ )
- {
- uchar* sptr = src->data.ptr + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
- //update GMM model
- int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_16S:
- for( y = 0; y < size.height; y++ )
- {
- short* sptr = src->data.s + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
- //update GMM model
- int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_16U:
- for( y = 0; y < size.height; y++ )
- {
- unsigned short* sptr = (unsigned short*) src->data.s + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
- //update GMM model
- int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_32S:
- for( y = 0; y < size.height; y++ )
- {
- int* sptr = src->data.i + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
- //update GMM model
- int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_32F:
- for( y = 0; y < size.height; y++ )
- {
- float* sptr = src->data.fl + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //update GMM model
- int result = _icvUpdateGMM_C3(sptr[0],sptr[1],sptr[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
- }
- }
- break;
- case CV_64F:
- for( y = 0; y < size.height; y++ )
- {
- double* sptr = src->data.db + src->step*y;
- uchar* pDataOutput = dst->data.ptr + dst->step*y;
- for( x = 0; x < size.width; x++,
- pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD)
- {
- //convert data
- data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]);
- //update GMM model
- int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune);
- //detect shadows in the foreground
- if (bShadowDetection)
- if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau);
- //generate output
- (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255;
+ }
+
+ //set the number of modes
+ modesUsed[x] = nmodes;
+ mask[x] = background ? 0 :
+ detectShadows && detectShadowGMM(data, nchannels, nmodes, gmm, mean, Tb, TB, tau) ?
+ shadowVal : 255;
}
}
- break;
}
- }//a bit faster for nD=3;
-}
-
-
-namespace cv
-{
-
-static const int defaultHistory2 = CV_BGFG_MOG2_WINDOW_SIZE;
-static const float defaultVarThreshold2 = CV_BGFG_MOG2_STD_THRESHOLD*CV_BGFG_MOG2_STD_THRESHOLD;
-static const int defaultNMixtures2 = CV_BGFG_MOG2_NGAUSSIANS;
-static const float defaultBackgroundRatio2 = CV_BGFG_MOG2_BACKGROUND_THRESHOLD;
-static const float defaultVarThresholdGen2 = CV_BGFG_MOG2_STD_THRESHOLD_GENERATE*CV_BGFG_MOG2_STD_THRESHOLD_GENERATE;
-static const float defaultVarInit2 = CV_BGFG_MOG2_VAR_INIT;
-static const float defaultVarMax2 = CV_BGFG_MOG2_VAR_MAX;
-static const float defaultVarMin2 = CV_BGFG_MOG2_VAR_MIN;
-static const float defaultfCT2 = CV_BGFG_MOG2_CT;
-static const unsigned char defaultnShadowDetection2 = (unsigned char)CV_BGFG_MOG2_SHADOW_VALUE;
-static const float defaultfTau = CV_BGFG_MOG2_SHADOW_TAU;
-
+
+ const Mat* src;
+ Mat* dst;
+ GMM* gmm0;
+ float* mean0;
+ uchar* modesUsed0;
+
+ int nmixtures;
+ float alphaT, Tb, TB, Tg;
+ float varInit, varMin, varMax, prune, tau;
+
+ bool detectShadows;
+ uchar shadowVal;
+
+ BinaryFunc cvtfunc;
+};
BackgroundSubtractorMOG2::BackgroundSubtractorMOG2()
{
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
- CV_Assert( nchannels <= CV_BGFG_MOG2_NDMAX );
+ CV_Assert( nchannels <= CV_CN_MAX );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture weight (w),
// the mean (nchannels values) and
// the covariance
- bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + CV_BGFG_MOG2_NDMAX), CV_32F );
+ bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + nchannels), CV_32F );
//make the array for keeping track of the used modes per pixel - all zeros at start
bgmodelUsedModes.create(frameSize,CV_8U);
bgmodelUsedModes = Scalar::all(0);
++nframes;
learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./min( 2*nframes, history );
CV_Assert(learningRate >= 0);
- CvMat _cimage = image, _cfgmask = fgmask;
- if (learningRate > 0)
- icvUpdatePixelBackgroundGMM2( &_cimage, &_cfgmask,
- (CvPBGMMGaussian*) bgmodel.data,
- bgmodelUsedModes.data,
- nmixtures,//nM
- varThreshold,//fTb
- backgroundRatio,//fTB
- varThresholdGen,//fTg,
- fVarInit,
- fVarMax,
- fVarMin,
- fCT,
- fTau,
- bShadowDetection,
- nShadowDetection,
- float(learningRate));
+ if (learningRate > 0)
+ {
+ parallel_for(BlockedRange(0, image.rows),
+ MOG2Invoker(image, fgmask,
+ (GMM*)bgmodel.data,
+ (float*)(bgmodel.data + sizeof(GMM)*nmixtures*image.rows*image.cols),
+ bgmodelUsedModes.data, nmixtures, (float)learningRate,
+ (float)varThreshold,
+ backgroundRatio, varThresholdGen,
+ fVarInit, fVarMin, fVarMax, fCT, fTau,
+ bShadowDetection, nShadowDetection));
+ }
}
void BackgroundSubtractorMOG2::getBackgroundImage(OutputArray backgroundImage) const
{
-#if _MSC_VER >= 1200
- #pragma warning( push )
- #pragma warning( disable : 4127 )
-#endif
- CV_Assert(CV_BGFG_MOG2_NDMAX == 3);
-#if _MSC_VER >= 1200
- #pragma warning( pop )
-#endif
+ int nchannels = CV_MAT_CN(frameType);
+ CV_Assert( nchannels == 3 );
Mat meanBackground(frameSize, CV_8UC3, Scalar::all(0));
int firstGaussianIdx = 0;
- CvPBGMMGaussian* pGMM = (CvPBGMMGaussian*)bgmodel.data;
+ const GMM* gmm = (GMM*)bgmodel.data;
+ const Vec3f* mean = reinterpret_cast<const Vec3f*>(gmm + frameSize.width*frameSize.height*nmixtures);
for(int row=0; row<meanBackground.rows; row++)
{
for(int col=0; col<meanBackground.cols; col++)
{
- int nModes = static_cast<int>(bgmodelUsedModes.at<uchar>(row, col));
- double meanVal[CV_BGFG_MOG2_NDMAX] = {0.0, 0.0, 0.0};
-
- double totalWeight = 0.0;
- for(int gaussianIdx = firstGaussianIdx; gaussianIdx < firstGaussianIdx + nModes; gaussianIdx++)
+ int nmodes = bgmodelUsedModes.at<uchar>(row, col);
+ Vec3f meanVal;
+ float totalWeight = 0.f;
+ for(int gaussianIdx = firstGaussianIdx; gaussianIdx < firstGaussianIdx + nmodes; gaussianIdx++)
{
- CvPBGMMGaussian gaussian = pGMM[gaussianIdx];
+ GMM gaussian = gmm[gaussianIdx];
+ meanVal += gaussian.weight * mean[gaussianIdx];
totalWeight += gaussian.weight;
- for(int chIdx = 0; chIdx < CV_BGFG_MOG2_NDMAX; chIdx++)
- {
- meanVal[chIdx] += gaussian.weight * gaussian.mean[chIdx];
- }
-
if(totalWeight > backgroundRatio)
break;
}
- Vec3f val = Vec3f((float)meanVal[0], (float)meanVal[1], (float)meanVal[2]) * (float)(1.0 / totalWeight);
- meanBackground.at<Vec3b>(row, col) = Vec3b(val);
+ meanVal *= (1.f / totalWeight);
+ meanBackground.at<Vec3b>(row, col) = Vec3b(meanVal);
firstGaussianIdx += nmixtures;
}
}
switch(CV_MAT_CN(frameType))
{
- case 1:
- {
- vector<Mat> channels;
- split(meanBackground, channels);
- channels[0].copyTo(backgroundImage);
- break;
- }
+ case 1:
+ {
+ vector<Mat> channels;
+ split(meanBackground, channels);
+ channels[0].copyTo(backgroundImage);
+ break;
+ }
- case 3:
- {
- meanBackground.copyTo(backgroundImage);
- break;
- }
+ case 3:
+ {
+ meanBackground.copyTo(backgroundImage);
+ break;
+ }
- default:
- CV_Error(CV_StsUnsupportedFormat, "");
+ default:
+ CV_Error(CV_StsUnsupportedFormat, "");
}
}
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "precomp.hpp"
+
+namespace cv
+{
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static Algorithm* createMOG()
+{
+ return new BackgroundSubtractorMOG;
+}
+
+static AlgorithmInfo& mog_info()
+{
+ static AlgorithmInfo mog_info_var("BackgroundSubtractor.MOG", createMOG);
+ return mog_info_var;
+}
+
+static AlgorithmInfo& mog_info_auto = mog_info();
+
+AlgorithmInfo* BackgroundSubtractorMOG::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ BackgroundSubtractorMOG obj;
+
+ mog_info().addParam(obj, "history", obj.history);
+ mog_info().addParam(obj, "nmixtures", obj.nmixtures);
+ mog_info().addParam(obj, "backgroundRatio", obj.backgroundRatio);
+ mog_info().addParam(obj, "noiseSigma", obj.noiseSigma);
+
+ initialized = true;
+ }
+ return &mog_info();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static Algorithm* createMOG2()
+{
+ return new BackgroundSubtractorMOG2;
+}
+
+static AlgorithmInfo& mog2_info()
+{
+ static AlgorithmInfo mog2_info_var("BackgroundSubtractor.MOG2", createMOG2);
+ return mog2_info_var;
+}
+
+static AlgorithmInfo& mog2_info_auto = mog2_info();
+
+AlgorithmInfo* BackgroundSubtractorMOG2::info() const
+{
+ static volatile bool initialized = false;
+ if( !initialized )
+ {
+ BackgroundSubtractorMOG2 obj;
+
+ mog2_info().addParam(obj, "history", obj.history);
+ mog2_info().addParam(obj, "varThreshold", obj.varThreshold);
+ mog2_info().addParam(obj, "detectShadows", obj.bShadowDetection);
+
+ initialized = true;
+ }
+ return &mog2_info();
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+bool initModule_video(void)
+{
+ Ptr<Algorithm> mog = createMOG(), mog2 = createMOG2();
+ return mog->info() != 0 && mog2->info() != 0;
+}
+
+}
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);\r
int cols = mRgba.cols();\r
int rows = mRgba.rows();\r
-\r
+
+ rows = rows - rows%4;
+ cols = cols - cols%4;
+
if (mCells == null)\r
createPuzzle(cols, rows);\r
\r
add_subdirectory(15-puzzle)
add_subdirectory(face-detection)
add_subdirectory(image-manipulations)
+add_subdirectory(color-blob-detection)
+
add_subdirectory(tutorial-0-androidcamera)
add_subdirectory(tutorial-1-addopencv)
add_subdirectory(tutorial-2-opencvcamera)
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry kind="src" path="src"/>
+ <classpathentry kind="src" path="gen"/>
+ <classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>
+ <classpathentry kind="con" path="com.android.ide.eclipse.adt.LIBRARIES"/>
+ <classpathentry kind="output" path="bin/classes"/>
+</classpath>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>ColorBlobDetection</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ApkBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>com.android.ide.eclipse.adt.AndroidNature</nature>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ </natures>
+</projectDescription>
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="org.opencv.example.colorblobdetect"
+ android:versionCode="1"
+ android:versionName="1.0" >
+
+ <uses-sdk android:minSdkVersion="8" />
+
+ <application
+ android:icon="@drawable/ic_launcher"
+ android:label="@string/app_name" >
+ <activity
+ android:name="org.opencv.samples.colorblobdetect.ColorBlobDetectionActivity"
+ android:label="@string/app_name" >
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ </application>
+
+ <uses-permission android:name="android.permission.CAMERA"/>
+ <uses-feature android:name="android.hardware.camera" />
+ <uses-feature android:name="android.hardware.camera.autofocus" />
+
+</manifest>
\ No newline at end of file
--- /dev/null
+set(sample example-color-blob-detection)
+
+add_android_project(${sample} "${CMAKE_CURRENT_SOURCE_DIR}" LIBRARY_DEPS ${OpenCV_BINARY_DIR} SDK_TARGET 11 ${ANDROID_SDK_TARGET})
+if(TARGET ${sample})
+ add_dependencies(opencv_android_examples ${sample})
+endif()
+
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:layout_width="fill_parent"
+ android:layout_height="fill_parent"
+ android:orientation="vertical" >
+
+ <TextView
+ android:layout_width="fill_parent"
+ android:layout_height="wrap_content"
+ android:text="@string/hello" />
+
+</LinearLayout>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<resources>
+
+ <string name="hello">Hello World, ColorBlobDetectionActivity!</string>
+ <string name="app_name">ColorBlobDetection</string>
+
+</resources>
\ No newline at end of file
--- /dev/null
+package org.opencv.samples.colorblobdetect;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.Window;
+
+public class ColorBlobDetectionActivity extends Activity {
+
+ private static final String TAG = "Example/CollorBlobDetection";
+ private ColorBlobDetectionView mView;
+
+ public ColorBlobDetectionActivity()
+ {
+ Log.i(TAG, "Instantiated new " + this.getClass());
+ }
+
+ /** Called when the activity is first created. */
+ @Override
+ public void onCreate(Bundle savedInstanceState) {
+ Log.i(TAG, "onCreate");
+ super.onCreate(savedInstanceState);
+ requestWindowFeature(Window.FEATURE_NO_TITLE);
+ mView = new ColorBlobDetectionView(this);
+ setContentView(mView);
+ }
+}
\ No newline at end of file
--- /dev/null
+package org.opencv.samples.colorblobdetect;
+
+import org.opencv.android.Utils;
+import org.opencv.core.Mat;
+import org.opencv.highgui.Highgui;
+import org.opencv.highgui.VideoCapture;
+
+import android.content.Context;
+import android.graphics.Bitmap;
+import android.util.Log;
+import android.view.MotionEvent;
+import android.view.SurfaceHolder;
+import android.view.View;
+import android.view.View.OnTouchListener;
+
+public class ColorBlobDetectionView extends SampleCvViewBase implements
+ OnTouchListener {
+
+ private Mat mRgba;
+ private static final String TAG = "Example/CollorBlobDetection";
+
+ public ColorBlobDetectionView(Context context)
+ {
+ super(context);
+ setOnTouchListener(this);
+ }
+
+ @Override
+ public void surfaceChanged(SurfaceHolder _holder, int format, int width, int height) {
+ super.surfaceChanged(_holder, format, width, height);
+ synchronized (this) {
+ // initialize Mat before usage
+ mRgba = new Mat();
+ }
+ }
+
+ @Override
+ public boolean onTouch(View v, MotionEvent event)
+ {
+ // TODO Auto-generated method stub
+ int cols = mRgba.cols();
+ int rows = mRgba.rows();
+ int xoffset = (getWidth() - cols) / 2;
+ int yoffset = (getHeight() - rows) / 2;
+
+ int x = (int)event.getX() - xoffset;
+ int y = (int)event.getY() - yoffset;
+
+ double TouchedColor[] = mRgba.get(x,y);
+
+ Log.i(TAG, "Touch coordinates: (" + x + ", " + y + ")");
+ Log.i(TAG, "Touched rgba color: (" + TouchedColor[0] + ", " + TouchedColor[1] +
+ ", " + TouchedColor[2] + ", " + TouchedColor[0] + ",)");
+
+ return false; // don't need subsequent touch events
+ }
+
+ @Override
+ protected Bitmap processFrame(VideoCapture capture) {
+ // TODO Auto-generated method stub
+ capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
+
+ Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);
+ try {
+ Utils.matToBitmap(mRgba, bmp);
+ } catch(Exception e) {
+ Log.e(TAG, "Utils.matToBitmap() throws an exception: " + e.getMessage());
+ bmp.recycle();
+ bmp = null;
+ }
+
+ return bmp;
+ }
+
+ @Override
+ public void run() {
+ super.run();
+
+ synchronized (this) {
+ // Explicitly deallocate Mats
+ if (mRgba != null)
+ mRgba.release();
+
+ mRgba = null;
+ }
+ }
+}
--- /dev/null
+package org.opencv.samples.colorblobdetect;
+
+import java.util.List;
+
+import org.opencv.core.Size;
+import org.opencv.highgui.VideoCapture;
+import org.opencv.highgui.Highgui;
+
+import android.content.Context;
+import android.graphics.Bitmap;
+import android.graphics.Canvas;
+import android.util.Log;
+import android.view.SurfaceHolder;
+import android.view.SurfaceView;
+
+public abstract class SampleCvViewBase extends SurfaceView implements SurfaceHolder.Callback, Runnable {
+ private static final String TAG = "Sample::SurfaceView";
+
+ private SurfaceHolder mHolder;
+ private VideoCapture mCamera;
+
+ public SampleCvViewBase(Context context) {
+ super(context);
+ mHolder = getHolder();
+ mHolder.addCallback(this);
+ Log.i(TAG, "Instantiated new " + this.getClass());
+ }
+
+ public void surfaceChanged(SurfaceHolder _holder, int format, int width, int height) {
+ Log.i(TAG, "surfaceCreated");
+ synchronized (this) {
+ if (mCamera != null && mCamera.isOpened()) {
+ Log.i(TAG, "before mCamera.getSupportedPreviewSizes()");
+ List<Size> sizes = mCamera.getSupportedPreviewSizes();
+ Log.i(TAG, "after mCamera.getSupportedPreviewSizes()");
+ int mFrameWidth = width;
+ int mFrameHeight = height;
+
+ // selecting optimal camera preview size
+ {
+ double minDiff = Double.MAX_VALUE;
+ for (Size size : sizes) {
+ if (Math.abs(size.height - height) < minDiff) {
+ mFrameWidth = (int) size.width;
+ mFrameHeight = (int) size.height;
+ minDiff = Math.abs(size.height - height);
+ }
+ }
+ }
+
+ mCamera.set(Highgui.CV_CAP_PROP_FRAME_WIDTH, mFrameWidth);
+ mCamera.set(Highgui.CV_CAP_PROP_FRAME_HEIGHT, mFrameHeight);
+ }
+ }
+ }
+
+ public void surfaceCreated(SurfaceHolder holder) {
+ Log.i(TAG, "surfaceCreated");
+ mCamera = new VideoCapture(Highgui.CV_CAP_ANDROID);
+ if (mCamera.isOpened()) {
+ (new Thread(this)).start();
+ } else {
+ mCamera.release();
+ mCamera = null;
+ Log.e(TAG, "Failed to open native camera");
+ }
+ }
+
+ public void surfaceDestroyed(SurfaceHolder holder) {
+ Log.i(TAG, "surfaceDestroyed");
+ if (mCamera != null) {
+ synchronized (this) {
+ mCamera.release();
+ mCamera = null;
+ }
+ }
+ }
+
+ protected abstract Bitmap processFrame(VideoCapture capture);
+
+ public void run() {
+ Log.i(TAG, "Starting processing thread");
+ while (true) {
+ Bitmap bmp = null;
+
+ synchronized (this) {
+ if (mCamera == null)
+ break;
+
+ if (!mCamera.grab()) {
+ Log.e(TAG, "mCamera.grab() failed");
+ break;
+ }
+
+ bmp = processFrame(mCamera);
+ }
+
+ if (bmp != null) {
+ Canvas canvas = mHolder.lockCanvas();
+ if (canvas != null) {
+ canvas.drawBitmap(bmp, (canvas.getWidth() - bmp.getWidth()) / 2, (canvas.getHeight() - bmp.getHeight()) / 2, null);
+ mHolder.unlockCanvasAndPost(canvas);
+ }
+ bmp.recycle();
+ }
+ }
+
+ Log.i(TAG, "Finishing processing thread");
+ }
+}
\ No newline at end of file
import android.view.Window;
public class ImageManipulationsActivity extends Activity {
- private static final String TAG = "Sample::Activity";
+ private static final String TAG = "Sample::Activity";
public static final int VIEW_MODE_RGBA = 0;
- public static final int VIEW_MODE_CANNY = 1;
- public static final int VIEW_MODE_SEPIA = 2;
- public static final int VIEW_MODE_SOBEL = 3;
- public static final int VIEW_MODE_BLUR = 4;
+ public static final int VIEW_MODE_HIST = 1;
+ public static final int VIEW_MODE_CANNY = 2;
+ public static final int VIEW_MODE_SEPIA = 3;
+ public static final int VIEW_MODE_SOBEL = 4;
public static final int VIEW_MODE_ZOOM = 5;
+ public static final int VIEW_MODE_PIXELIZE = 6;
+ public static final int VIEW_MODE_POSTERIZE = 7;
private MenuItem mItemPreviewRGBA;
+ private MenuItem mItemPreviewHist;
private MenuItem mItemPreviewCanny;
private MenuItem mItemPreviewSepia;
private MenuItem mItemPreviewSobel;
- private MenuItem mItemPreviewBlur;
private MenuItem mItemPreviewZoom;
+ private MenuItem mItemPreviewPixelize;
+ private MenuItem mItemPreviewPosterize;
- public static int viewMode = VIEW_MODE_RGBA;
+ public static int viewMode = VIEW_MODE_RGBA;
public ImageManipulationsActivity() {
Log.i(TAG, "Instantiated new " + this.getClass());
@Override
public boolean onCreateOptionsMenu(Menu menu) {
Log.i(TAG, "onCreateOptionsMenu");
- mItemPreviewRGBA = menu.add("Preview RGBA");
+ mItemPreviewRGBA = menu.add("Preview RGBA");
+ mItemPreviewHist = menu.add("Histograms");
mItemPreviewCanny = menu.add("Canny");
mItemPreviewSepia = menu.add("Sepia");
mItemPreviewSobel = menu.add("Sobel");
- mItemPreviewBlur = menu.add("Blur");
- mItemPreviewZoom = menu.add("Zoom");
+ mItemPreviewZoom = menu.add("Zoom");
+ mItemPreviewPixelize = menu.add("Pixelize");
+ mItemPreviewPosterize = menu.add("Posterize");
return true;
}
Log.i(TAG, "Menu Item selected " + item);
if (item == mItemPreviewRGBA)
viewMode = VIEW_MODE_RGBA;
+ if (item == mItemPreviewHist)
+ viewMode = VIEW_MODE_HIST;
else if (item == mItemPreviewCanny)
viewMode = VIEW_MODE_CANNY;
else if (item == mItemPreviewSepia)
viewMode = VIEW_MODE_SEPIA;
else if (item == mItemPreviewSobel)
viewMode = VIEW_MODE_SOBEL;
- else if (item == mItemPreviewBlur)
- viewMode = VIEW_MODE_BLUR;
else if (item == mItemPreviewZoom)
viewMode = VIEW_MODE_ZOOM;
+ else if (item == mItemPreviewPixelize)
+ viewMode = VIEW_MODE_PIXELIZE;
+ else if (item == mItemPreviewPosterize)
+ viewMode = VIEW_MODE_POSTERIZE;
return true;
}
}
package org.opencv.samples.imagemanipulations;
+import java.util.Arrays;
+
import org.opencv.android.Utils;
import org.opencv.core.Core;
import org.opencv.core.Mat;
+import org.opencv.core.MatOfFloat;
+import org.opencv.core.MatOfInt;
import org.opencv.core.Size;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import android.view.SurfaceHolder;
class ImageManipulationsView extends SampleCvViewBase {
+ private Size mSize0;
+ private Size mSizeRgba;
+ private Size mSizeRgbaInner;
+
private Mat mRgba;
private Mat mGray;
private Mat mIntermediateMat;
+ private Mat mHist, mMat0;
+ private MatOfInt mChannels[], mHistSize;
+ private int mHistSizeNum;
+ private MatOfFloat mRanges;
+ private Scalar mColorsRGB[], mColorsHue[], mWhilte;
+ private Point mP1, mP2;
+ float mBuff[];
private Mat mRgbaInnerWindow;
private Mat mGrayInnerWindow;
mGray = new Mat();
mRgba = new Mat();
mIntermediateMat = new Mat();
+ mSize0 = new Size();
+ mHist = new Mat();
+ mChannels = new MatOfInt[] { new MatOfInt(0), new MatOfInt(1), new MatOfInt(2) };
+ mHistSizeNum = 25;
+ mBuff = new float[mHistSizeNum];
+ mHistSize = new MatOfInt(mHistSizeNum);
+ mRanges = new MatOfFloat(0f, 256f);
+ mMat0 = new Mat();
+ mColorsRGB = new Scalar[] { new Scalar(200, 0, 0, 255), new Scalar(0, 200, 0, 255), new Scalar(0, 0, 200, 255) };
+ mColorsHue = new Scalar[] {
+ new Scalar(255, 0, 0, 255), new Scalar(255, 60, 0, 255), new Scalar(255, 120, 0, 255), new Scalar(255, 180, 0, 255), new Scalar(255, 240, 0, 255),
+ new Scalar(215, 213, 0, 255), new Scalar(150, 255, 0, 255), new Scalar(85, 255, 0, 255), new Scalar(20, 255, 0, 255), new Scalar(0, 255, 30, 255),
+ new Scalar(0, 255, 85, 255), new Scalar(0, 255, 150, 255), new Scalar(0, 255, 215, 255), new Scalar(0, 234, 255, 255), new Scalar(0, 170, 255, 255),
+ new Scalar(0, 120, 255, 255), new Scalar(0, 60, 255, 255), new Scalar(0, 0, 255, 255), new Scalar(64, 0, 255, 255), new Scalar(120, 0, 255, 255),
+ new Scalar(180, 0, 255, 255), new Scalar(255, 0, 255, 255), new Scalar(255, 0, 215, 255), new Scalar(255, 0, 85, 255), new Scalar(255, 0, 0, 255)
+ };
+ mWhilte = Scalar.all(255);
+ mP1 = new Point();
+ mP2 = new Point();
}
}
if (mRgba.empty())
return;
- int rows = mRgba.rows();
- int cols = mRgba.cols();
+ mSizeRgba = mRgba.size();
+
+ int rows = (int) mSizeRgba.height;
+ int cols = (int) mSizeRgba.width;
int left = cols / 8;
int top = rows / 8;
if (mRgbaInnerWindow == null)
mRgbaInnerWindow = mRgba.submat(top, top + height, left, left + width);
+ mSizeRgbaInner = mRgbaInnerWindow.size();
if (mGrayInnerWindow == null && !mGray.empty())
mGrayInnerWindow = mGray.submat(top, top + height, left, left + width);
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
break;
+ case ImageManipulationsActivity.VIEW_MODE_HIST:
+ capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
+ if (mSizeRgba == null)
+ CreateAuxiliaryMats();
+ int thikness = (int) (mSizeRgba.width / (mHistSizeNum + 10) / 5);
+ if(thikness > 5) thikness = 5;
+ int offset = (int) ((mSizeRgba.width - (5*mHistSizeNum + 4*10)*thikness)/2);
+ // RGB
+ for(int c=0; c<3; c++) {
+ Imgproc.calcHist(Arrays.asList(mRgba), mChannels[c], mMat0, mHist, mHistSize, mRanges);
+ Core.normalize(mHist, mHist, mSizeRgba.height/2, 0, Core.NORM_INF);
+ mHist.get(0, 0, mBuff);
+ for(int h=0; h<mHistSizeNum; h++) {
+ mP1.x = mP2.x = offset + (c * (mHistSizeNum + 10) + h) * thikness;
+ mP1.y = mSizeRgba.height-1;
+ mP2.y = mP1.y - 2 - (int)mBuff[h];
+ Core.line(mRgba, mP1, mP2, mColorsRGB[c], thikness);
+ }
+ }
+ // Value and Hue
+ Imgproc.cvtColor(mRgba, mIntermediateMat, Imgproc.COLOR_RGB2HSV_FULL);
+ // Value
+ Imgproc.calcHist(Arrays.asList(mIntermediateMat), mChannels[2], mMat0, mHist, mHistSize, mRanges);
+ Core.normalize(mHist, mHist, mSizeRgba.height/2, 0, Core.NORM_INF);
+ mHist.get(0, 0, mBuff);
+ for(int h=0; h<mHistSizeNum; h++) {
+ mP1.x = mP2.x = offset + (3 * (mHistSizeNum + 10) + h) * thikness;
+ mP1.y = mSizeRgba.height-1;
+ mP2.y = mP1.y - 2 - (int)mBuff[h];
+ Core.line(mRgba, mP1, mP2, mWhilte, thikness);
+ }
+ // Hue
+ Imgproc.calcHist(Arrays.asList(mIntermediateMat), mChannels[0], mMat0, mHist, mHistSize, mRanges);
+ Core.normalize(mHist, mHist, mSizeRgba.height/2, 0, Core.NORM_INF);
+ mHist.get(0, 0, mBuff);
+ for(int h=0; h<mHistSizeNum; h++) {
+ mP1.x = mP2.x = offset + (4 * (mHistSizeNum + 10) + h) * thikness;
+ mP1.y = mSizeRgba.height-1;
+ mP2.y = mP1.y - 2 - (int)mBuff[h];
+ Core.line(mRgba, mP1, mP2, mColorsHue[h], thikness);
+ }
+ break;
+
case ImageManipulationsActivity.VIEW_MODE_CANNY:
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
- capture.retrieve(mGray, Highgui.CV_CAP_ANDROID_GREY_FRAME);
if (mRgbaInnerWindow == null || mGrayInnerWindow == null)
CreateAuxiliaryMats();
-
- Imgproc.Canny(mGrayInnerWindow, mGrayInnerWindow, 80, 90);
- Imgproc.cvtColor(mGrayInnerWindow, mRgbaInnerWindow, Imgproc.COLOR_GRAY2BGRA, 4);
+ Imgproc.Canny(mRgbaInnerWindow, mIntermediateMat, 80, 90);
+ Imgproc.cvtColor(mIntermediateMat, mRgbaInnerWindow, Imgproc.COLOR_GRAY2BGRA, 4);
break;
case ImageManipulationsActivity.VIEW_MODE_SOBEL:
Core.transform(mRgba, mRgba, mSepiaKernel);
break;
- case ImageManipulationsActivity.VIEW_MODE_BLUR:
- capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
- if (mBlurWindow == null)
- CreateAuxiliaryMats();
- Imgproc.blur(mBlurWindow, mBlurWindow, new Size(15, 15));
- break;
-
case ImageManipulationsActivity.VIEW_MODE_ZOOM:
capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
if (mZoomCorner == null || mZoomWindow == null)
Size wsize = mZoomWindow.size();
Core.rectangle(mZoomWindow, new Point(1, 1), new Point(wsize.width - 2, wsize.height - 2), new Scalar(255, 0, 0, 255), 2);
break;
+
+ case ImageManipulationsActivity.VIEW_MODE_PIXELIZE:
+ capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
+ if (mRgbaInnerWindow == null)
+ CreateAuxiliaryMats();
+ Imgproc.resize(mRgbaInnerWindow, mIntermediateMat, mSize0, 0.1, 0.1, Imgproc.INTER_NEAREST);
+ Imgproc.resize(mIntermediateMat, mRgbaInnerWindow, mSizeRgbaInner, 0., 0., Imgproc.INTER_NEAREST);
+ break;
+
+ case ImageManipulationsActivity.VIEW_MODE_POSTERIZE:
+ capture.retrieve(mRgba, Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA);
+ if (mRgbaInnerWindow == null)
+ CreateAuxiliaryMats();
+ /*
+ Imgproc.cvtColor(mRgbaInnerWindow, mIntermediateMat, Imgproc.COLOR_RGBA2RGB);
+ Imgproc.pyrMeanShiftFiltering(mIntermediateMat, mIntermediateMat, 5, 50);
+ Imgproc.cvtColor(mIntermediateMat, mRgbaInnerWindow, Imgproc.COLOR_RGB2RGBA);
+ */
+ Imgproc.Canny(mRgbaInnerWindow, mIntermediateMat, 80, 90);
+ mRgbaInnerWindow.setTo(new Scalar(0, 0, 0, 255), mIntermediateMat);
+ Core.convertScaleAbs(mRgbaInnerWindow, mIntermediateMat, 1./16, 0);
+ Core.convertScaleAbs(mIntermediateMat, mRgbaInnerWindow, 16, 0);
+ break;
}
Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);
if (bmp != null) {
Canvas canvas = mHolder.lockCanvas();
if (canvas != null) {
- canvas.drawBitmap(bmp, (canvas.getWidth() - bmp.getWidth()) / 2, (canvas.getHeight() - bmp.getHeight()) / 2, null);
+ canvas.drawBitmap(bmp, (canvas.getWidth() - bmp.getWidth()) / 2, (canvas.getHeight() - bmp.getHeight()), null);
mFps.draw(canvas, (canvas.getWidth() - bmp.getWidth()) / 2, 0);
mHolder.unlockCanvasAndPost(canvas);
}
#include "opencv2/core/core.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>
const char* keys =
{
- "{c |camera |false | use camera or not}"
+ "{c |camera |true | use camera or not}"
"{fn|file_name|tree.avi | movie file }"
};
namedWindow("foreground image", CV_WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL);
- BackgroundSubtractorMOG2 bg_model;
+ BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5);
+
Mat img, fgmask, fgimg;
for(;;)
if( img.empty() )
break;
+ //cvtColor(_img, img, COLOR_BGR2GRAY);
+
if( fgimg.empty() )
fgimg.create(img.size(), img.type());
Mat trainSamples, trainClasses;
prepare_train_data( trainSamples, trainClasses );
- cv::EM em;
- cv::EM::Params params;
- params.nclusters = classColors.size();
- params.covMatType = cv::EM::COV_MAT_GENERIC;
- params.startStep = cv::EM::START_AUTO_STEP;
- params.termCrit = cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::COUNT, 10, 0.1);
+ vector<cv::EM> em_models(classColors.size());
- // learn classifier
- em.train( trainSamples, Mat(), params, &trainClasses );
+ CV_Assert((int)trainClasses.total() == trainSamples.rows);
+ CV_Assert((int)trainClasses.type() == CV_32SC1);
+
+ for(size_t modelIndex = 0; modelIndex < em_models.size(); modelIndex++)
+ {
+ const int componentCount = 3;
+ em_models[modelIndex] = EM(componentCount, cv::EM::COV_MAT_DIAGONAL);
+
+ Mat modelSamples;
+ for(int sampleIndex = 0; sampleIndex < trainSamples.rows; sampleIndex++)
+ {
+ if(trainClasses.at<int>(sampleIndex) == (int)modelIndex)
+ modelSamples.push_back(trainSamples.row(sampleIndex));
+ }
+
+ // learn models
+ if(!modelSamples.empty())
+ em_models[modelIndex].train(modelSamples);
+ }
+ // classify coordinate plane points using the bayes classifier, i.e.
+ // y(x) = arg max_i=1_modelsCount likelihoods_i(x)
Mat testSample(1, 2, CV_32FC1 );
for( int y = 0; y < img.rows; y += testStep )
{
testSample.at<float>(0) = (float)x;
testSample.at<float>(1) = (float)y;
- int response = (int)em.predict( testSample );
+ Mat logLikelihoods(1, em_models.size(), CV_64FC1, Scalar(-DBL_MAX));
+ for(size_t modelIndex = 0; modelIndex < em_models.size(); modelIndex++)
+ {
+ if(em_models[modelIndex].isTrained())
+ logLikelihoods.at<double>(modelIndex) = em_models[modelIndex].predict(testSample)[0];
+ }
+ Point maxLoc;
+ minMaxLoc(logLikelihoods, 0, 0, 0, &maxLoc);
+
+ int response = maxLoc.x;
circle( imgDst, Point(x,y), 2, classColors[response], 1 );
}
}
namedWindow("segmented", 1);
BackgroundSubtractorMOG bgsubtractor;
- bgsubtractor.noiseSigma = 10;
+ bgsubtractor.set("noiseSigma", 10);
for(;;)
{
//hide the local functions in an anon namespace
namespace
{
-void help(char** av)
-{
- cout << "\nThis program justs gets you started reading images from video\n"
- "Usage:\n./" << av[0] << " <video device number>\n" << "q,Q,esc -- quit\n"
- << "space -- save frame\n\n"
- << "\tThis is a starter sample, to get you up and going in a copy pasta fashion\n"
- << "\tThe program captures frames from a camera connected to your computer.\n"
- << "\tTo find the video device number, try ls /dev/video* \n"
- << "\tYou may also pass a video file, like my_vide.avi instead of a device number"
- << "\n"
- << "DATA:\n"
- << "Generate a datamatrix from from http://datamatrix.kaywa.com/ \n"
- << " NOTE: This only handles strings of len 3 or less\n"
- << " Resize the screen to be large enough for your camera to see, and it should find an read it.\n\n"
- << endl;
-}
-
-int process(VideoCapture& capture)
-{
- std::vector<DataMatrixCode> codes;
- int n = 0;
- char filename[200];
- string window_name = "video | q or esc to quit";
- cout << "press space to save a picture. q or esc to quit" << endl;
- namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window;
- Mat frame;
- for (;;)
- {
- capture >> frame;
- if (frame.empty())
- break;
- cv::Mat gray;
- cv::cvtColor(frame,gray,CV_RGB2GRAY);
- findDataMatrix(gray, codes);
- drawDataMatrixCodes(codes, frame);
- imshow(window_name, frame);
- char key = (char) waitKey(5); //delay N millis, usually long enough to display and capture input
- switch (key)
+ void help(char** av)
{
- case 'q':
- case 'Q':
- case 27: //escape key
- return 0;
- case ' ': //Save an image
- sprintf(filename, "filename%.3d.jpg", n++);
- imwrite(filename, frame);
- cout << "Saved " << filename << endl;
- break;
- default:
- break;
+ cout << "\nThis program justs gets you started reading images from video\n"
+ "Usage:\n./" << av[0] << " <video device number>\n" << "q,Q,esc -- quit\n"
+ << "space -- save frame\n\n"
+ << "\tThis is a starter sample, to get you up and going in a copy pasta fashion\n"
+ << "\tThe program captures frames from a camera connected to your computer.\n"
+ << "\tTo find the video device number, try ls /dev/video* \n"
+ << "\tYou may also pass a video file, like my_vide.avi instead of a device number"
+ << "\n"
+ << "DATA:\n"
+ << "Generate a datamatrix from from http://datamatrix.kaywa.com/ \n"
+ << " NOTE: This only handles strings of len 3 or less\n"
+ << " Resize the screen to be large enough for your camera to see, and it should find an read it.\n\n"
+ << endl;
}
- }
- return 0;
-}
-
+
+ int process(VideoCapture& capture)
+ {
+ int n = 0;
+ char filename[200];
+ string window_name = "video | q or esc to quit";
+ cout << "press space to save a picture. q or esc to quit" << endl;
+ namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window;
+ Mat frame;
+ for (;;)
+ {
+ capture >> frame;
+ if (frame.empty())
+ break;
+ cv::Mat gray;
+ cv::cvtColor(frame,gray,CV_RGB2GRAY);
+ vector<string> codes;
+ Mat corners;
+ findDataMatrix(gray, codes, corners);
+ drawDataMatrixCodes(frame, codes, corners);
+ imshow(window_name, frame);
+ char key = (char) waitKey(5); //delay N millis, usually long enough to display and capture input
+ switch (key)
+ {
+ case 'q':
+ case 'Q':
+ case 27: //escape key
+ return 0;
+ case ' ': //Save an image
+ sprintf(filename, "filename%.3d.jpg", n++);
+ imwrite(filename, frame);
+ cout << "Saved " << filename << endl;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+ }
+
}
int main(int ac, char** av)
{
-
- if (ac != 2)
- {
- help(av);
- return 1;
- }
- std::string arg = av[1];
- VideoCapture capture(arg); //try to open string, this will attempt to open it as a video file
- if (!capture.isOpened()) //if this fails, try to open as a video camera, through the use of an integer param
- capture.open(atoi(arg.c_str()));
- if (!capture.isOpened())
- {
- cerr << "Failed to open a video device or video file!\n" << endl;
- help(av);
- return 1;
- }
- return process(capture);
+
+ if (ac != 2)
+ {
+ help(av);
+ return 1;
+ }
+ std::string arg = av[1];
+ VideoCapture capture(arg); //try to open string, this will attempt to open it as a video file
+ if (!capture.isOpened()) //if this fails, try to open as a video camera, through the use of an integer param
+ capture.open(atoi(arg.c_str()));
+ if (!capture.isOpened())
+ {
+ cerr << "Failed to open a video device or video file!\n" << endl;
+ help(av);
+ return 1;
+ }
+ return process(capture);
}
switch (key)\r
{\r
case 27:\r
- return 0;\r
\r
case 'A':\r
if (currentFrame > 0) \r
--- /dev/null
+help='''
+Data matrix detector sample.
+Usage:
+ video_dmtx {<video device number>|<video file name>}
+
+ Generate a datamatrix from from http://datamatrix.kaywa.com/ and print it out.
+ NOTE: This only handles data matrices, generated for text strings of max 3 characters
+
+ Resize the screen to be large enough for your camera to see, and it should find an read it.
+
+Keyboard shortcuts:
+
+ q or ESC - exit
+ space - save current image as datamatrix<frame_number>.jpg
+'''
+
+import cv2
+import numpy as np
+import sys
+
+def data_matrix_demo(cap):
+ window_name = "Data Matrix Detector"
+ frame_number = 0
+ need_to_save = False
+
+ while 1:
+ ret, frame = cap.read()
+ if not ret:
+ break
+
+ gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
+ codes, corners, dmtx = cv2.findDataMatrix(gray)
+
+ cv2.drawDataMatrixCodes(frame, codes, corners)
+ cv2.imshow(window_name, frame)
+
+ key = cv2.waitKey(30)
+ c = chr(key & 255)
+ if c in ['q', 'Q', chr(27)]:
+ break
+
+ if c == ' ':
+ need_to_save = True
+
+ if need_to_save and codes:
+ filename = ("datamatrix%03d.jpg" % frame_number)
+ cv2.imwrite(filename, frame)
+ print "Saved frame to " + filename
+ need_to_save = False
+
+ frame_number += 1
+
+
+if __name__ == '__main__':
+ print help
+
+ if len(sys.argv) == 1:
+ cap = cv2.VideoCapture(0)
+ else:
+ cap = cv2.VideoCapture(sys.argv[1])
+ if not cap.isOpened():
+ cap = cv2.VideoCapture(int(sys.argv[1]))
+
+ if not cap.isOpened():
+ print 'Cannot initialize video capture'
+ sys.exit(-1)
+
+ data_matrix_demo(cap)