From f62028dc160c9938d1841d78e76e6f15a38f7f38 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Fri, 8 Jun 2012 03:15:08 +0000 Subject: [PATCH] Fixed mingw build. --- 3rdparty/libjasper/CMakeLists.txt | 32 +- 3rdparty/libjpeg/CMakeLists.txt | 23 +- 3rdparty/libpng/CMakeLists.txt | 20 +- 3rdparty/libtiff/CMakeLists.txt | 21 +- 3rdparty/libtiff/tif_config.h.cmakein | 2 +- 3rdparty/zlib/CMakeLists.txt | 15 +- 3rdparty/zlib/zconf.h.cmakein | 8 + CMakeLists.txt | 9 + apps/haartraining/cvboost.cpp | 35 +- apps/haartraining/cvhaarclassifier.cpp | 4 +- apps/haartraining/cvhaartraining.cpp | 3 +- cmake/OpenCVDetectCXXCompiler.cmake | 6 + cmake/OpenCVUtils.cmake | 40 ++ modules/calib3d/src/quadsubpix.cpp | 2 +- modules/contrib/src/retina.cpp | 472 +++++++++++---------- modules/core/src/system.cpp | 2 + modules/features2d/src/features2d_init.cpp | 2 +- modules/features2d/test/test_features2d.cpp | 4 +- modules/flann/include/opencv2/flann/defines.h | 2 +- .../flann/include/opencv2/flann/index_testing.h | 4 +- modules/highgui/CMakeLists.txt | 13 +- modules/highgui/src/cap_cmu.cpp | 2 +- modules/highgui/src/cap_dshow.cpp | 60 +-- modules/highgui/src/cap_vfw.cpp | 2 +- modules/highgui/src/window_w32.cpp | 184 ++++---- modules/imgproc/src/rotcalipers.cpp | 38 +- modules/legacy/src/extendededges.cpp | 63 +-- modules/objdetect/src/featurepyramid.cpp | 8 +- modules/objdetect/src/latentsvm.cpp | 6 +- modules/objdetect/src/lsvmparser.cpp | 34 +- modules/objdetect/src/matching.cpp | 20 +- .../opencv2/stitching/detail/motion_estimators.hpp | 34 +- modules/stitching/perf/perf_stich.cpp | 6 +- modules/stitching/src/exposure_compensate.cpp | 8 +- modules/stitching/src/stitcher.cpp | 4 +- modules/ts/include/opencv2/ts/ts.hpp | 3 + modules/ts/include/opencv2/ts/ts_gtest.h | 7 +- samples/c/blobtrack_sample.cpp | 10 +- samples/cpp/brief_match_test.cpp | 5 +- samples/cpp/build3dmodel.cpp | 6 +- samples/cpp/stitching_detailed.cpp | 4 +- 41 files changed, 631 insertions(+), 592 deletions(-) diff --git a/3rdparty/libjasper/CMakeLists.txt b/3rdparty/libjasper/CMakeLists.txt index 61bb6a8..223c523 100644 --- a/3rdparty/libjasper/CMakeLists.txt +++ b/3rdparty/libjasper/CMakeLists.txt @@ -7,31 +7,23 @@ project(${JASPER_LIBRARY}) add_definitions(-DEXCLUDE_MIF_SUPPORT -DEXCLUDE_PNM_SUPPORT -DEXCLUDE_BMP_SUPPORT -DEXCLUDE_RAS_SUPPORT -DEXCLUDE_JPG_SUPPORT -DEXCLUDE_PGX_SUPPORT) -# List of C++ files: ocv_include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -# The .cpp files: file(GLOB lib_srcs *.c) file(GLOB lib_hdrs *.h) file(GLOB lib_ext_hdrs jasper/*.h) # ---------------------------------------------------------------------------------- -# Define the library target: +# Define the library target: # ---------------------------------------------------------------------------------- add_library(${JASPER_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs} ${lib_ext_hdrs}) if(MSVC) - if(NOT ENABLE_NOISY_WARNINGS) - string(REPLACE "/W3" "/W0" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - string(REPLACE "/W4" "/W0" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - endif() add_definitions(-DJAS_WIN_MSVC_BUILD) endif() -if(CMAKE_COMPILER_IS_GNUCXX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-uninitialized") -endif() +ocv_warnings_disable(CMAKE_C_FLAGS -Wno-implicit-function-declaration -Wno-uninitialized -Wmissing-prototypes -Wmissing-declarations -Wunused) if(UNIX) if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) @@ -39,21 +31,17 @@ if(UNIX) endif() endif() -if(CMAKE_COMPILER_IS_GNUCXX AND NOT ENABLE_NOISY_WARNINGS) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-implicit-function-declaration -Wno-unused") -endif() - set_target_properties(${JASPER_LIBRARY} - PROPERTIES - OUTPUT_NAME ${JASPER_LIBRARY} - DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" - ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}" - ) - + PROPERTIES + OUTPUT_NAME ${JASPER_LIBRARY} + DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" + ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}" + ) + if(ENABLE_SOLUTION_FOLDERS) set_target_properties(${JASPER_LIBRARY} PROPERTIES FOLDER "3rdparty") -endif() - +endif() + if(NOT BUILD_SHARED_LIBS) install(TARGETS ${JASPER_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main) endif() diff --git a/3rdparty/libjpeg/CMakeLists.txt b/3rdparty/libjpeg/CMakeLists.txt index a406d1c..3438544 100644 --- a/3rdparty/libjpeg/CMakeLists.txt +++ b/3rdparty/libjpeg/CMakeLists.txt @@ -4,24 +4,17 @@ # ---------------------------------------------------------------------------- project(${JPEG_LIBRARY}) -# List of C++ files: - ocv_include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -# The .cpp files: file(GLOB lib_srcs *.c) file(GLOB lib_hdrs *.h) # ---------------------------------------------------------------------------------- -# Define the library target: +# Define the library target: # ---------------------------------------------------------------------------------- add_library(${JPEG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs}) -if(MSVC) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W3") -endif() - if(UNIX) if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") @@ -33,15 +26,15 @@ if(CMAKE_COMPILER_IS_GNUCXX) endif() set_target_properties(${JPEG_LIBRARY} - PROPERTIES OUTPUT_NAME ${JPEG_LIBRARY} - DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" - ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH} - ) - + PROPERTIES OUTPUT_NAME ${JPEG_LIBRARY} + DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" + ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH} + ) + if(ENABLE_SOLUTION_FOLDERS) set_target_properties(${JPEG_LIBRARY} PROPERTIES FOLDER "3rdparty") -endif() - +endif() + if(NOT BUILD_SHARED_LIBS) install(TARGETS ${JPEG_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main) endif() diff --git a/3rdparty/libpng/CMakeLists.txt b/3rdparty/libpng/CMakeLists.txt index cb76610..449971b 100644 --- a/3rdparty/libpng/CMakeLists.txt +++ b/3rdparty/libpng/CMakeLists.txt @@ -4,23 +4,17 @@ # ---------------------------------------------------------------------------- project(${PNG_LIBRARY}) -# List of C++ files: - ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" ${ZLIB_INCLUDE_DIR}) file(GLOB lib_srcs *.c) file(GLOB lib_hdrs *.h) # ---------------------------------------------------------------------------------- -# Define the library target: +# Define the library target: # ---------------------------------------------------------------------------------- add_library(${PNG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs}) -if(MSVC) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W3") -endif() - if(UNIX) if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") @@ -28,15 +22,15 @@ if(UNIX) endif() set_target_properties(${PNG_LIBRARY} - PROPERTIES OUTPUT_NAME ${PNG_LIBRARY} - DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" - ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}" - ) - + PROPERTIES OUTPUT_NAME ${PNG_LIBRARY} + DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" + ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}" + ) + if(ENABLE_SOLUTION_FOLDERS) set_target_properties(${PNG_LIBRARY} PROPERTIES FOLDER "3rdparty") endif() - + if(NOT BUILD_SHARED_LIBS) install(TARGETS ${PNG_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main) endif() diff --git a/3rdparty/libtiff/CMakeLists.txt b/3rdparty/libtiff/CMakeLists.txt index 881eebd..dc55a6a 100644 --- a/3rdparty/libtiff/CMakeLists.txt +++ b/3rdparty/libtiff/CMakeLists.txt @@ -26,7 +26,6 @@ configure_file("${CMAKE_CURRENT_SOURCE_DIR}/tif_config.h.cmakein" ocv_include_directories("${CMAKE_CURRENT_SOURCE_DIR}" "${CMAKE_CURRENT_BINARY_DIR}" ${ZLIB_INCLUDE_DIR}) -# List of C++ files: set(lib_srcs tif_aux.c tif_close.c @@ -91,10 +90,8 @@ if(WIN32) list(APPEND lib_srcs tif_win32.c) endif(WIN32) -if(MSVC AND NOT ENABLE_NOISY_WARNINGS) - string(REPLACE "/W4" "/W0" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - string(REPLACE "/W4" "/W0" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") -endif() +ocv_warnings_disable(CMAKE_C_FLAGS -Wno-unused-but-set-variable -Wmissing-prototypes -Wmissing-declarations -Wundef) +ocv_warnings_disable(CMAKE_CXX_FLAGS -Wmissing-declarations) if(UNIX AND (CMAKE_COMPILER_IS_GNUCXX OR CV_ICC)) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") @@ -104,15 +101,15 @@ add_library(${TIFF_LIBRARY} STATIC ${lib_srcs}) target_link_libraries(${TIFF_LIBRARY} ${ZLIB_LIBRARIES}) set_target_properties(${TIFF_LIBRARY} - PROPERTIES - OUTPUT_NAME "${TIFF_LIBRARY}" - DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" - ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}" - ) - + PROPERTIES + OUTPUT_NAME "${TIFF_LIBRARY}" + DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" + ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/3rdparty/${OPENCV_LIB_INSTALL_PATH}" + ) + if(ENABLE_SOLUTION_FOLDERS) set_target_properties(${TIFF_LIBRARY} PROPERTIES FOLDER "3rdparty") -endif() +endif() if(NOT BUILD_SHARED_LIBS) install(TARGETS ${TIFF_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main) diff --git a/3rdparty/libtiff/tif_config.h.cmakein b/3rdparty/libtiff/tif_config.h.cmakein index abb583e..1e6bc04 100644 --- a/3rdparty/libtiff/tif_config.h.cmakein +++ b/3rdparty/libtiff/tif_config.h.cmakein @@ -143,7 +143,7 @@ /* Signed 64-bit type formatter */ /* Unsigned 64-bit type formatter */ -#ifdef _MSC_VER +#if defined _MSC_VER || defined __MINGW__ || defined __MINGW32__ # define TIFF_UINT64_FORMAT "%I64u" # define TIFF_SSIZE_FORMAT "%Iu" #else diff --git a/3rdparty/zlib/CMakeLists.txt b/3rdparty/zlib/CMakeLists.txt index c298f7d..25df533 100644 --- a/3rdparty/zlib/CMakeLists.txt +++ b/3rdparty/zlib/CMakeLists.txt @@ -5,20 +5,11 @@ project(${ZLIB_LIBRARY} C) -include(CheckTypeSize) include(CheckFunctionExists) include(CheckIncludeFile) include(CheckCSourceCompiles) # -# Check to see if we have large file support -# -check_type_size(off64_t OFF64_T) -if(HAVE_OFF64_T) - add_definitions(-D_LARGEFILE64_SOURCE=1) -endif() - -# # Check for fseeko # check_function_exists(fseeko HAVE_FSEEKO) @@ -82,9 +73,7 @@ if(UNIX) endif() endif() -if(MSVC AND NOT ENABLE_NOISY_WARNINGS) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4013") -endif() +ocv_warnings_disable(CMAKE_C_FLAGS /wd4013 -Wattributes -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations) set_target_properties(${ZLIB_LIBRARY} PROPERTIES OUTPUT_NAME ${ZLIB_LIBRARY} @@ -95,7 +84,7 @@ set_target_properties(${ZLIB_LIBRARY} PROPERTIES if(ENABLE_SOLUTION_FOLDERS) set_target_properties(${ZLIB_LIBRARY} PROPERTIES FOLDER "3rdparty") endif() - + if(NOT BUILD_SHARED_LIBS) install(TARGETS ${ZLIB_LIBRARY} ARCHIVE DESTINATION share/OpenCV/3rdparty/${OPENCV_LIB_INSTALL_PATH} COMPONENT main) endif() diff --git a/3rdparty/zlib/zconf.h.cmakein b/3rdparty/zlib/zconf.h.cmakein index 3ea5531..7f09011 100644 --- a/3rdparty/zlib/zconf.h.cmakein +++ b/3rdparty/zlib/zconf.h.cmakein @@ -414,6 +414,14 @@ typedef uLong FAR uLongf; # undef _LARGEFILE64_SOURCE #endif +#ifndef _LFS64_LARGEFILE +# define _LFS64_LARGEFILE 0 +#endif + +#ifndef _FILE_OFFSET_BITS +# define _FILE_OFFSET_BITS +#endif + #if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0 # define Z_LARGE #endif diff --git a/CMakeLists.txt b/CMakeLists.txt index a8d6ba0..1ef25bc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -329,6 +329,15 @@ if(UNIX) endif() endif() +# +# Check to see if we have large file support (needed by zlib) +# +include(CheckTypeSize) +check_type_size(off64_t OFF64_T) +if(HAVE_OFF64_T) + add_definitions(-D_LARGEFILE64_SOURCE=1) +endif() + include(cmake/OpenCVPCHSupport.cmake REQUIRED) include(cmake/OpenCVModule.cmake REQUIRED) diff --git a/apps/haartraining/cvboost.cpp b/apps/haartraining/cvboost.cpp index 88ada60..8dfd3dd 100644 --- a/apps/haartraining/cvboost.cpp +++ b/apps/haartraining/cvboost.cpp @@ -213,13 +213,10 @@ static int icvFindStumpThreshold_##suffix( float* curval = NULL; \ float curlerror = 0.0F; \ float currerror = 0.0F; \ - float wposl; \ - float wposr; \ \ int i = 0; \ int idx = 0; \ \ - wposl = wposr = 0.0F; \ if( *sumw == FLT_MAX ) \ { \ /* calculate sums */ \ @@ -298,8 +295,8 @@ static int icvFindStumpThreshold_##suffix( */ #define ICV_DEF_FIND_STUMP_THRESHOLD_MISC( suffix, type ) \ ICV_DEF_FIND_STUMP_THRESHOLD( misc_##suffix, type, \ - wposl = 0.5F * ( wl + wyl ); \ - wposr = 0.5F * ( wr + wyr ); \ + float wposl = 0.5F * ( wl + wyl ); \ + float wposr = 0.5F * ( wr + wyr ); \ curleft = 0.5F * ( 1.0F + curleft ); \ curright = 0.5F * ( 1.0F + curright ); \ curlerror = MIN( wposl, wl - wposl ); \ @@ -311,8 +308,8 @@ static int icvFindStumpThreshold_##suffix( */ #define ICV_DEF_FIND_STUMP_THRESHOLD_GINI( suffix, type ) \ ICV_DEF_FIND_STUMP_THRESHOLD( gini_##suffix, type, \ - wposl = 0.5F * ( wl + wyl ); \ - wposr = 0.5F * ( wr + wyr ); \ + float wposl = 0.5F * ( wl + wyl ); \ + float wposr = 0.5F * ( wr + wyr ); \ curleft = 0.5F * ( 1.0F + curleft ); \ curright = 0.5F * ( 1.0F + curright ); \ curlerror = 2.0F * wposl * ( 1.0F - curleft ); \ @@ -326,8 +323,8 @@ static int icvFindStumpThreshold_##suffix( */ #define ICV_DEF_FIND_STUMP_THRESHOLD_ENTROPY( suffix, type ) \ ICV_DEF_FIND_STUMP_THRESHOLD( entropy_##suffix, type, \ - wposl = 0.5F * ( wl + wyl ); \ - wposr = 0.5F * ( wr + wyr ); \ + float wposl = 0.5F * ( wl + wyl ); \ + float wposr = 0.5F * ( wr + wyr ); \ curleft = 0.5F * ( 1.0F + curleft ); \ curright = 0.5F * ( 1.0F + curright ); \ curlerror = currerror = 0.0F; \ @@ -1560,7 +1557,7 @@ CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses, CV_MAT2VEC( *trainClasses, ydata, ystep, m ); CV_MAT2VEC( *weakTrainVals, traindata, trainstep, trainnum ); - assert( m == trainnum ); + CV_Assert( m == trainnum ); idxnum = 0; idxstep = 0; @@ -1640,8 +1637,8 @@ float icvBoostNextWeakClassifierDAB( CvMat* weakEvalVals, CV_MAT2VEC( *trainClasses, ydata, ystep, ynum ); CV_MAT2VEC( *weights, wdata, wstep, wnum ); - assert( m == ynum ); - assert( m == wnum ); + CV_Assert( m == ynum ); + CV_Assert( m == wnum ); sumw = 0.0F; err = 0.0F; @@ -1808,8 +1805,8 @@ CvBoostTrainer* icvBoostStartTrainingLB( CvMat* trainClasses, CV_MAT2VEC( *weakTrainVals, traindata, trainstep, trainnum ); CV_MAT2VEC( *weights, wdata, wstep, wnum ); - assert( m == trainnum ); - assert( m == wnum ); + CV_Assert( m == trainnum ); + CV_Assert( m == wnum ); idxnum = 0; @@ -1889,9 +1886,9 @@ float icvBoostNextWeakClassifierLB( CvMat* weakEvalVals, CV_MAT2VEC( *weakTrainVals, traindata, trainstep, trainnum ); CV_MAT2VEC( *weights, wdata, wstep, wnum ); - assert( m == ynum ); - assert( m == wnum ); - assert( m == trainnum ); + CV_Assert( m == ynum ); + CV_Assert( m == wnum ); + CV_Assert( m == trainnum ); //assert( m == trainer->count ); for( i = 0; i < trainer->count; i++ ) @@ -1944,8 +1941,8 @@ float icvBoostNextWeakClassifierGAB( CvMat* weakEvalVals, CV_MAT2VEC( *trainClasses, ydata, ystep, ynum ); CV_MAT2VEC( *weights, wdata, wstep, wnum ); - assert( m == ynum ); - assert( m == wnum ); + CV_Assert( m == ynum ); + CV_Assert( m == wnum ); sumw = 0.0F; for( i = 0; i < trainer->count; i++ ) diff --git a/apps/haartraining/cvhaarclassifier.cpp b/apps/haartraining/cvhaarclassifier.cpp index af69bf1..f217976 100644 --- a/apps/haartraining/cvhaarclassifier.cpp +++ b/apps/haartraining/cvhaarclassifier.cpp @@ -525,9 +525,9 @@ float icvEvalTreeCascadeClassifierFilter( CvIntHaarClassifier* classifier, sum_t sum_type* tilted, float normfactor ) { CvTreeCascadeNode* ptr; - CvTreeCascadeClassifier* tree; + //CvTreeCascadeClassifier* tree; - tree = (CvTreeCascadeClassifier*) classifier; + //tree = (CvTreeCascadeClassifier*) classifier; diff --git a/apps/haartraining/cvhaartraining.cpp b/apps/haartraining/cvhaartraining.cpp index 253ffe6..b42929f 100644 --- a/apps/haartraining/cvhaartraining.cpp +++ b/apps/haartraining/cvhaartraining.cpp @@ -169,10 +169,11 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize, int dx = 0; int dy = 0; +#if 0 float factor = 1.0F; factor = ((float) winsize.width) * winsize.height / (24 * 24); -#if 0 + s0 = (int) (s0 * factor); s1 = (int) (s1 * factor); s2 = (int) (s2 * factor); diff --git a/cmake/OpenCVDetectCXXCompiler.cmake b/cmake/OpenCVDetectCXXCompiler.cmake index 2c4acd5..1c4746c 100644 --- a/cmake/OpenCVDetectCXXCompiler.cmake +++ b/cmake/OpenCVDetectCXXCompiler.cmake @@ -44,6 +44,12 @@ if(MSVC AND CMAKE_C_COMPILER MATCHES "icc") set(CV_ICC __INTEL_COMPILER_FOR_WINDOWS) endif() +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR (UNIX AND CV_ICC)) + set(CV_COMPILER_IS_GNU TRUE) +else() + set(CV_COMPILER_IS_GNU FALSE) +endif() + # ---------------------------------------------------------------------------- # Detect GNU version: # ---------------------------------------------------------------------------- diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index caeaa68..b5d207a 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -32,6 +32,46 @@ macro(ocv_clear_vars) endforeach() endmacro() +# turns off warnings +macro(ocv_warnings_disable) + if(NOT ENABLE_NOISY_WARNINGS) + set(_flag_vars "") + set(_msvc_warnings "") + set(_gxx_warnings "") + foreach(arg ${ARGN}) + if(arg MATCHES "^CMAKE_") + list(APPEND _flag_vars ${arg}) + elseif(arg MATCHES "^/wd") + list(APPEND _msvc_warnings ${arg}) + elseif(arg MATCHES "^-W") + list(APPEND _gxx_warnings ${arg}) + endif() + endforeach() + if(MSVC AND _msvc_warnings AND _flag_vars) + foreach(var ${_flag_vars}) + foreach(warning ${_msvc_warnings}) + set(${var} "${${var}} ${warning}") + endforeach() + endforeach() + elseif(CV_COMPILER_IS_GNU AND _gxx_warnings AND _flag_vars) + foreach(var ${_flag_vars}) + foreach(warning ${_gxx_warnings}) + if(warning MATCHES "^-Wno-") + set(${var} "${${var}} ${warning}") + else() + string(REPLACE "${warning}" "" ${var} "${${var}}") + string(REPLACE "-W" "-Wno-" warning "${warning}") + set(${var} "${${var}} ${warning}") + endif() + endforeach() + endforeach() + endif() + unset(_flag_vars) + unset(_msvc_warnings) + unset(_gxx_warnings) + endif(NOT ENABLE_NOISY_WARNINGS) +endmacro() + # Provides an option that the user can optionally select. # Can accept condition to control when option is available for user. # Usage: diff --git a/modules/calib3d/src/quadsubpix.cpp b/modules/calib3d/src/quadsubpix.cpp index 2aec9d6..2f2dae3 100644 --- a/modules/calib3d/src/quadsubpix.cpp +++ b/modules/calib3d/src/quadsubpix.cpp @@ -252,7 +252,7 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size int black_thresh = histQuantile(hist, 0.45f); int white_thresh = histQuantile(hist, 0.55f); #else - int black_thresh, white_thresh; + int black_thresh = 0, white_thresh = 0; segment_hist_max(hist, black_thresh, white_thresh); #endif diff --git a/modules/contrib/src/retina.cpp b/modules/contrib/src/retina.cpp index c86f2d8..1280324 100644 --- a/modules/contrib/src/retina.cpp +++ b/modules/contrib/src/retina.cpp @@ -74,7 +74,7 @@ namespace cv { - + Retina::Retina(const cv::Size inputSize) { _retinaFilter = 0; @@ -94,19 +94,19 @@ Retina::~Retina() } /** -* retreive retina input buffer size +* retreive retina input buffer size */ Size Retina::inputSize(){return cv::Size(_retinaFilter->getInputNBcolumns(), _retinaFilter->getInputNBrows());} /** -* retreive retina output buffer size +* retreive retina output buffer size */ Size Retina::outputSize(){return cv::Size(_retinaFilter->getOutputNBcolumns(), _retinaFilter->getOutputNBrows());} void Retina::setColorSaturation(const bool saturateColors, const float colorSaturationValue) { - _retinaFilter->setColorSaturation(saturateColors, colorSaturationValue); + _retinaFilter->setColorSaturation(saturateColors, colorSaturationValue); } struct Retina::RetinaParameters Retina::getParameters(){return _retinaParameters;} @@ -121,71 +121,71 @@ void Retina::setup(std::string retinaParameterFile, const bool applyDefaultSetup setup(fs, applyDefaultSetupOnFailure); }catch(Exception &e) { - std::cout<<"Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>"<"< keeping current parameters"< keeping current parameters"<>_retinaParameters.OPLandIplParvo.colorMode; - currFn["normaliseOutput"]>>_retinaParameters.OPLandIplParvo.normaliseOutput; - currFn["photoreceptorsLocalAdaptationSensitivity"]>>_retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity; - currFn["photoreceptorsTemporalConstant"]>>_retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant; - currFn["photoreceptorsSpatialConstant"]>>_retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant; - currFn["horizontalCellsGain"]>>_retinaParameters.OPLandIplParvo.horizontalCellsGain; - currFn["hcellsTemporalConstant"]>>_retinaParameters.OPLandIplParvo.hcellsTemporalConstant; - currFn["hcellsSpatialConstant"]>>_retinaParameters.OPLandIplParvo.hcellsSpatialConstant; - currFn["ganglionCellsSensitivity"]>>_retinaParameters.OPLandIplParvo.ganglionCellsSensitivity; - setupOPLandIPLParvoChannel(_retinaParameters.OPLandIplParvo.colorMode, _retinaParameters.OPLandIplParvo.normaliseOutput, _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity, _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant, _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant, _retinaParameters.OPLandIplParvo.horizontalCellsGain, _retinaParameters.OPLandIplParvo.hcellsTemporalConstant, _retinaParameters.OPLandIplParvo.hcellsSpatialConstant, _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity); - - // init retina IPL magno setup... update at the same time the parameters structure and the retina core - currFn=rootFn["IPLmagno"]; - currFn["normaliseOutput"]>>_retinaParameters.IplMagno.normaliseOutput; - currFn["parasolCells_beta"]>>_retinaParameters.IplMagno.parasolCells_beta; - currFn["parasolCells_tau"]>>_retinaParameters.IplMagno.parasolCells_tau; - currFn["parasolCells_k"]>>_retinaParameters.IplMagno.parasolCells_k; - currFn["amacrinCellsTemporalCutFrequency"]>>_retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency; - currFn["V0CompressionParameter"]>>_retinaParameters.IplMagno.V0CompressionParameter; - currFn["localAdaptintegration_tau"]>>_retinaParameters.IplMagno.localAdaptintegration_tau; - currFn["localAdaptintegration_k"]>>_retinaParameters.IplMagno.localAdaptintegration_k; - - setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency,_retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k); - - }catch(Exception &e) - { - std::cout<<"Retina::setup: resetting retina with default parameters"<"< keeping current parameters"<>_retinaParameters.OPLandIplParvo.colorMode; + currFn["normaliseOutput"]>>_retinaParameters.OPLandIplParvo.normaliseOutput; + currFn["photoreceptorsLocalAdaptationSensitivity"]>>_retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity; + currFn["photoreceptorsTemporalConstant"]>>_retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant; + currFn["photoreceptorsSpatialConstant"]>>_retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant; + currFn["horizontalCellsGain"]>>_retinaParameters.OPLandIplParvo.horizontalCellsGain; + currFn["hcellsTemporalConstant"]>>_retinaParameters.OPLandIplParvo.hcellsTemporalConstant; + currFn["hcellsSpatialConstant"]>>_retinaParameters.OPLandIplParvo.hcellsSpatialConstant; + currFn["ganglionCellsSensitivity"]>>_retinaParameters.OPLandIplParvo.ganglionCellsSensitivity; + setupOPLandIPLParvoChannel(_retinaParameters.OPLandIplParvo.colorMode, _retinaParameters.OPLandIplParvo.normaliseOutput, _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity, _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant, _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant, _retinaParameters.OPLandIplParvo.horizontalCellsGain, _retinaParameters.OPLandIplParvo.hcellsTemporalConstant, _retinaParameters.OPLandIplParvo.hcellsSpatialConstant, _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity); + + // init retina IPL magno setup... update at the same time the parameters structure and the retina core + currFn=rootFn["IPLmagno"]; + currFn["normaliseOutput"]>>_retinaParameters.IplMagno.normaliseOutput; + currFn["parasolCells_beta"]>>_retinaParameters.IplMagno.parasolCells_beta; + currFn["parasolCells_tau"]>>_retinaParameters.IplMagno.parasolCells_tau; + currFn["parasolCells_k"]>>_retinaParameters.IplMagno.parasolCells_k; + currFn["amacrinCellsTemporalCutFrequency"]>>_retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency; + currFn["V0CompressionParameter"]>>_retinaParameters.IplMagno.V0CompressionParameter; + currFn["localAdaptintegration_tau"]>>_retinaParameters.IplMagno.localAdaptintegration_tau; + currFn["localAdaptintegration_k"]>>_retinaParameters.IplMagno.localAdaptintegration_k; + + setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency,_retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k); + + }catch(Exception &e) + { + std::cout<<"Retina::setup: resetting retina with default parameters"<"< keeping current parameters"< colorMode : " << _retinaParameters.OPLandIplParvo.colorMode - << "\n==> normalizeParvoOutput :" << _retinaParameters.OPLandIplParvo.normaliseOutput - << "\n==> photoreceptorsLocalAdaptationSensitivity : " << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity - << "\n==> photoreceptorsTemporalConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant - << "\n==> photoreceptorsSpatialConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant - << "\n==> horizontalCellsGain : " << _retinaParameters.OPLandIplParvo.horizontalCellsGain - << "\n==> hcellsTemporalConstant : " << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant - << "\n==> hcellsSpatialConstant : " << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant - << "\n==> parvoGanglionCellsSensitivity : " << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity - <<"}\n"; - - // displaying IPL magno setup - outmessage<<"Current Retina instance setup :" - <<"\nIPLmagno"<<"{" - << "\n==> normaliseOutput : " << _retinaParameters.IplMagno.normaliseOutput - << "\n==> parasolCells_beta : " << _retinaParameters.IplMagno.parasolCells_beta - << "\n==> parasolCells_tau : " << _retinaParameters.IplMagno.parasolCells_tau - << "\n==> parasolCells_k : " << _retinaParameters.IplMagno.parasolCells_k - << "\n==> amacrinCellsTemporalCutFrequency : " << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency - << "\n==> V0CompressionParameter : " << _retinaParameters.IplMagno.V0CompressionParameter - << "\n==> localAdaptintegration_tau : " << _retinaParameters.IplMagno.localAdaptintegration_tau - << "\n==> localAdaptintegration_k : " << _retinaParameters.IplMagno.localAdaptintegration_k - <<"}"; - return outmessage.str(); + std::stringstream outmessage; + + // displaying OPL and IPL parvo setup + outmessage<<"Current Retina instance setup :" + <<"\nOPLandIPLparvo"<<"{" + << "\n==> colorMode : " << _retinaParameters.OPLandIplParvo.colorMode + << "\n==> normalizeParvoOutput :" << _retinaParameters.OPLandIplParvo.normaliseOutput + << "\n==> photoreceptorsLocalAdaptationSensitivity : " << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity + << "\n==> photoreceptorsTemporalConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant + << "\n==> photoreceptorsSpatialConstant : " << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant + << "\n==> horizontalCellsGain : " << _retinaParameters.OPLandIplParvo.horizontalCellsGain + << "\n==> hcellsTemporalConstant : " << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant + << "\n==> hcellsSpatialConstant : " << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant + << "\n==> parvoGanglionCellsSensitivity : " << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity + <<"}\n"; + + // displaying IPL magno setup + outmessage<<"Current Retina instance setup :" + <<"\nIPLmagno"<<"{" + << "\n==> normaliseOutput : " << _retinaParameters.IplMagno.normaliseOutput + << "\n==> parasolCells_beta : " << _retinaParameters.IplMagno.parasolCells_beta + << "\n==> parasolCells_tau : " << _retinaParameters.IplMagno.parasolCells_tau + << "\n==> parasolCells_k : " << _retinaParameters.IplMagno.parasolCells_k + << "\n==> amacrinCellsTemporalCutFrequency : " << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency + << "\n==> V0CompressionParameter : " << _retinaParameters.IplMagno.V0CompressionParameter + << "\n==> localAdaptintegration_tau : " << _retinaParameters.IplMagno.localAdaptintegration_tau + << "\n==> localAdaptintegration_k : " << _retinaParameters.IplMagno.localAdaptintegration_k + <<"}"; + return outmessage.str(); } void Retina::write( std::string fs ) const @@ -240,98 +240,98 @@ void Retina::write( std::string fs ) const void Retina::write( FileStorage& fs ) const { - if (!fs.isOpened()) - return; // basic error case - fs<<"OPLandIPLparvo"<<"{"; - fs << "colorMode" << _retinaParameters.OPLandIplParvo.colorMode; - fs << "normaliseOutput" << _retinaParameters.OPLandIplParvo.normaliseOutput; - fs << "photoreceptorsLocalAdaptationSensitivity" << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity; - fs << "photoreceptorsTemporalConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant; - fs << "photoreceptorsSpatialConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant; - fs << "horizontalCellsGain" << _retinaParameters.OPLandIplParvo.horizontalCellsGain; - fs << "hcellsTemporalConstant" << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant; - fs << "hcellsSpatialConstant" << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant; - fs << "ganglionCellsSensitivity" << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity; - fs << "}"; - fs<<"IPLmagno"<<"{"; - fs << "normaliseOutput" << _retinaParameters.IplMagno.normaliseOutput; - fs << "parasolCells_beta" << _retinaParameters.IplMagno.parasolCells_beta; - fs << "parasolCells_tau" << _retinaParameters.IplMagno.parasolCells_tau; - fs << "parasolCells_k" << _retinaParameters.IplMagno.parasolCells_k; - fs << "amacrinCellsTemporalCutFrequency" << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency; - fs << "V0CompressionParameter" << _retinaParameters.IplMagno.V0CompressionParameter; - fs << "localAdaptintegration_tau" << _retinaParameters.IplMagno.localAdaptintegration_tau; - fs << "localAdaptintegration_k" << _retinaParameters.IplMagno.localAdaptintegration_k; - fs<<"}"; + if (!fs.isOpened()) + return; // basic error case + fs<<"OPLandIPLparvo"<<"{"; + fs << "colorMode" << _retinaParameters.OPLandIplParvo.colorMode; + fs << "normaliseOutput" << _retinaParameters.OPLandIplParvo.normaliseOutput; + fs << "photoreceptorsLocalAdaptationSensitivity" << _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity; + fs << "photoreceptorsTemporalConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant; + fs << "photoreceptorsSpatialConstant" << _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant; + fs << "horizontalCellsGain" << _retinaParameters.OPLandIplParvo.horizontalCellsGain; + fs << "hcellsTemporalConstant" << _retinaParameters.OPLandIplParvo.hcellsTemporalConstant; + fs << "hcellsSpatialConstant" << _retinaParameters.OPLandIplParvo.hcellsSpatialConstant; + fs << "ganglionCellsSensitivity" << _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity; + fs << "}"; + fs<<"IPLmagno"<<"{"; + fs << "normaliseOutput" << _retinaParameters.IplMagno.normaliseOutput; + fs << "parasolCells_beta" << _retinaParameters.IplMagno.parasolCells_beta; + fs << "parasolCells_tau" << _retinaParameters.IplMagno.parasolCells_tau; + fs << "parasolCells_k" << _retinaParameters.IplMagno.parasolCells_k; + fs << "amacrinCellsTemporalCutFrequency" << _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency; + fs << "V0CompressionParameter" << _retinaParameters.IplMagno.V0CompressionParameter; + fs << "localAdaptintegration_tau" << _retinaParameters.IplMagno.localAdaptintegration_tau; + fs << "localAdaptintegration_k" << _retinaParameters.IplMagno.localAdaptintegration_k; + fs<<"}"; } void Retina::setupOPLandIPLParvoChannel(const bool colorMode, const bool normaliseOutput, const float photoreceptorsLocalAdaptationSensitivity, const float photoreceptorsTemporalConstant, const float photoreceptorsSpatialConstant, const float horizontalCellsGain, const float HcellsTemporalConstant, const float HcellsSpatialConstant, const float ganglionCellsSensitivity) { - // retina core parameters setup - _retinaFilter->setColorMode(colorMode); - _retinaFilter->setPhotoreceptorsLocalAdaptationSensitivity(photoreceptorsLocalAdaptationSensitivity); - _retinaFilter->setOPLandParvoParameters(0, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, HcellsTemporalConstant, HcellsSpatialConstant, ganglionCellsSensitivity); - _retinaFilter->setParvoGanglionCellsLocalAdaptationSensitivity(ganglionCellsSensitivity); - _retinaFilter->activateNormalizeParvoOutput_0_maxOutputValue(normaliseOutput); - + // retina core parameters setup + _retinaFilter->setColorMode(colorMode); + _retinaFilter->setPhotoreceptorsLocalAdaptationSensitivity(photoreceptorsLocalAdaptationSensitivity); + _retinaFilter->setOPLandParvoParameters(0, photoreceptorsTemporalConstant, photoreceptorsSpatialConstant, horizontalCellsGain, HcellsTemporalConstant, HcellsSpatialConstant, ganglionCellsSensitivity); + _retinaFilter->setParvoGanglionCellsLocalAdaptationSensitivity(ganglionCellsSensitivity); + _retinaFilter->activateNormalizeParvoOutput_0_maxOutputValue(normaliseOutput); + // update parameters struture - _retinaParameters.OPLandIplParvo.colorMode = colorMode; - _retinaParameters.OPLandIplParvo.normaliseOutput = normaliseOutput; - _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity = photoreceptorsLocalAdaptationSensitivity; - _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant = photoreceptorsTemporalConstant; - _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant = photoreceptorsSpatialConstant; - _retinaParameters.OPLandIplParvo.horizontalCellsGain = horizontalCellsGain; - _retinaParameters.OPLandIplParvo.hcellsTemporalConstant = HcellsTemporalConstant; - _retinaParameters.OPLandIplParvo.hcellsSpatialConstant = HcellsSpatialConstant; - _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity = ganglionCellsSensitivity; + _retinaParameters.OPLandIplParvo.colorMode = colorMode; + _retinaParameters.OPLandIplParvo.normaliseOutput = normaliseOutput; + _retinaParameters.OPLandIplParvo.photoreceptorsLocalAdaptationSensitivity = photoreceptorsLocalAdaptationSensitivity; + _retinaParameters.OPLandIplParvo.photoreceptorsTemporalConstant = photoreceptorsTemporalConstant; + _retinaParameters.OPLandIplParvo.photoreceptorsSpatialConstant = photoreceptorsSpatialConstant; + _retinaParameters.OPLandIplParvo.horizontalCellsGain = horizontalCellsGain; + _retinaParameters.OPLandIplParvo.hcellsTemporalConstant = HcellsTemporalConstant; + _retinaParameters.OPLandIplParvo.hcellsSpatialConstant = HcellsSpatialConstant; + _retinaParameters.OPLandIplParvo.ganglionCellsSensitivity = ganglionCellsSensitivity; } void Retina::setupIPLMagnoChannel(const bool normaliseOutput, const float parasolCells_beta, const float parasolCells_tau, const float parasolCells_k, const float amacrinCellsTemporalCutFrequency, const float V0CompressionParameter, const float localAdaptintegration_tau, const float localAdaptintegration_k) { - _retinaFilter->setMagnoCoefficientsTable(parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k); - _retinaFilter->activateNormalizeMagnoOutput_0_maxOutputValue(normaliseOutput); + _retinaFilter->setMagnoCoefficientsTable(parasolCells_beta, parasolCells_tau, parasolCells_k, amacrinCellsTemporalCutFrequency, V0CompressionParameter, localAdaptintegration_tau, localAdaptintegration_k); + _retinaFilter->activateNormalizeMagnoOutput_0_maxOutputValue(normaliseOutput); // update parameters struture - _retinaParameters.IplMagno.normaliseOutput = normaliseOutput; - _retinaParameters.IplMagno.parasolCells_beta = parasolCells_beta; - _retinaParameters.IplMagno.parasolCells_tau = parasolCells_tau; - _retinaParameters.IplMagno.parasolCells_k = parasolCells_k; - _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency = amacrinCellsTemporalCutFrequency; - _retinaParameters.IplMagno.V0CompressionParameter = V0CompressionParameter; - _retinaParameters.IplMagno.localAdaptintegration_tau = localAdaptintegration_tau; - _retinaParameters.IplMagno.localAdaptintegration_k = localAdaptintegration_k; + _retinaParameters.IplMagno.normaliseOutput = normaliseOutput; + _retinaParameters.IplMagno.parasolCells_beta = parasolCells_beta; + _retinaParameters.IplMagno.parasolCells_tau = parasolCells_tau; + _retinaParameters.IplMagno.parasolCells_k = parasolCells_k; + _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency = amacrinCellsTemporalCutFrequency; + _retinaParameters.IplMagno.V0CompressionParameter = V0CompressionParameter; + _retinaParameters.IplMagno.localAdaptintegration_tau = localAdaptintegration_tau; + _retinaParameters.IplMagno.localAdaptintegration_k = localAdaptintegration_k; } void Retina::run(const cv::Mat &inputMatToConvert) { - // first convert input image to the compatible format : std::valarray - const bool colorMode = _convertCvMat2ValarrayBuffer(inputMatToConvert, _inputBuffer); - // process the retina - if (!_retinaFilter->runFilter(_inputBuffer, colorMode, false, _retinaParameters.OPLandIplParvo.colorMode && colorMode, false)) - throw cv::Exception(-1, "Retina cannot be applied, wrong input buffer size", "Retina::run", "Retina.h", 0); + // first convert input image to the compatible format : std::valarray + const bool colorMode = _convertCvMat2ValarrayBuffer(inputMatToConvert, _inputBuffer); + // process the retina + if (!_retinaFilter->runFilter(_inputBuffer, colorMode, false, _retinaParameters.OPLandIplParvo.colorMode && colorMode, false)) + throw cv::Exception(-1, "Retina cannot be applied, wrong input buffer size", "Retina::run", "Retina.h", 0); } void Retina::getParvo(cv::Mat &retinaOutput_parvo) { - if (_retinaFilter->getColorMode()) - { - // reallocate output buffer (if necessary) - _convertValarrayBuffer2cvMat(_retinaFilter->getColorOutput(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), true, retinaOutput_parvo); - }else - { - // reallocate output buffer (if necessary) - _convertValarrayBuffer2cvMat(_retinaFilter->getContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_parvo); - } - //retinaOutput_parvo/=255.0; + if (_retinaFilter->getColorMode()) + { + // reallocate output buffer (if necessary) + _convertValarrayBuffer2cvMat(_retinaFilter->getColorOutput(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), true, retinaOutput_parvo); + }else + { + // reallocate output buffer (if necessary) + _convertValarrayBuffer2cvMat(_retinaFilter->getContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_parvo); + } + //retinaOutput_parvo/=255.0; } void Retina::getMagno(cv::Mat &retinaOutput_magno) { - // reallocate output buffer (if necessary) - _convertValarrayBuffer2cvMat(_retinaFilter->getMovingContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_magno); - //retinaOutput_magno/=255.0; + // reallocate output buffer (if necessary) + _convertValarrayBuffer2cvMat(_retinaFilter->getMovingContours(), _retinaFilter->getOutputNBrows(), _retinaFilter->getOutputNBcolumns(), false, retinaOutput_magno); + //retinaOutput_magno/=255.0; } // original API level data accessors : copy buffers if size matches @@ -344,110 +344,112 @@ const std::valarray & Retina::getParvo() const {if (_retinaFilter->getCol // private method called by constructirs void Retina::_init(const cv::Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght) { - // basic error check - if (inputSize.height*inputSize.width <= 0) - throw cv::Exception(-1, "Bad retina size setup : size height and with must be superior to zero", "Retina::setup", "Retina.h", 0); + // basic error check + if (inputSize.height*inputSize.width <= 0) + throw cv::Exception(-1, "Bad retina size setup : size height and with must be superior to zero", "Retina::setup", "Retina.h", 0); - unsigned int nbPixels=inputSize.height*inputSize.width; - // resize buffers if size does not match - _inputBuffer.resize(nbPixels*3); // buffer supports gray images but also 3 channels color buffers... (larger is better...) + unsigned int nbPixels=inputSize.height*inputSize.width; + // resize buffers if size does not match + _inputBuffer.resize(nbPixels*3); // buffer supports gray images but also 3 channels color buffers... (larger is better...) - // allocate the retina model + // allocate the retina model if (_retinaFilter) delete _retinaFilter; - _retinaFilter = new RetinaFilter(inputSize.height, inputSize.width, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght); + _retinaFilter = new RetinaFilter(inputSize.height, inputSize.width, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght); - // prepare the default parameter XML file with default setup + // prepare the default parameter XML file with default setup setup(_retinaParameters); - // init retina - _retinaFilter->clearAllBuffers(); + // init retina + _retinaFilter->clearAllBuffers(); - // report current configuration - std::cout< &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, cv::Mat &outBuffer) { - // fill output buffer with the valarray buffer - const float *valarrayPTR=get_data(grayMatrixToConvert); - if (!colorMode) - { - outBuffer.create(cv::Size(nbColumns, nbRows), CV_8U); - for (unsigned int i=0;i(pixel)=(unsigned char)*(valarrayPTR++); - } - } - }else - { - const unsigned int doubleNBpixels=_retinaFilter->getOutputNBpixels()*2; - outBuffer.create(cv::Size(nbColumns, nbRows), CV_8UC3); - for (unsigned int i=0;igetOutputNBpixels()); - pixelValues[0]=(unsigned char)*(valarrayPTR+doubleNBpixels); - - outBuffer.at(pixel)=pixelValues; - } - } - } + // fill output buffer with the valarray buffer + const float *valarrayPTR=get_data(grayMatrixToConvert); + if (!colorMode) + { + outBuffer.create(cv::Size(nbColumns, nbRows), CV_8U); + for (unsigned int i=0;i(pixel)=(unsigned char)*(valarrayPTR++); + } + } + }else + { + const unsigned int doubleNBpixels=_retinaFilter->getOutputNBpixels()*2; + outBuffer.create(cv::Size(nbColumns, nbRows), CV_8UC3); + for (unsigned int i=0;igetOutputNBpixels()); + pixelValues[0]=(unsigned char)*(valarrayPTR+doubleNBpixels); + + outBuffer.at(pixel)=pixelValues; + } + } + } } bool Retina::_convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std::valarray &outputValarrayMatrix) { - // first check input consistency - if (inputMatToConvert.empty()) - throw cv::Exception(-1, "Retina cannot be applied, input buffer is empty", "Retina::run", "Retina.h", 0); + // first check input consistency + if (inputMatToConvert.empty()) + throw cv::Exception(-1, "Retina cannot be applied, input buffer is empty", "Retina::run", "Retina.h", 0); + + // retreive color mode from image input + int imageNumberOfChannels = inputMatToConvert.channels(); - // retreive color mode from image input - int imageNumberOfChannels = inputMatToConvert.channels(); - // convert to float AND fill the valarray buffer - typedef float T; // define here the target pixel format, here, float + typedef float T; // define here the target pixel format, here, float const int dsttype = DataType::depth; // output buffer is float format - if(imageNumberOfChannels==4) + if(imageNumberOfChannels==4) + { + // create a cv::Mat table (for RGBA planes) + cv::Mat planes[4] = { - // create a cv::Mat table (for RGBA planes) - cv::Mat planes[] = - { - cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()*2]), - cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()]), - cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0]), - cv::Mat(inputMatToConvert.size(), dsttype) // last channel (alpha) does not point on the valarray (not usefull in our case) - }; - // split color cv::Mat in 4 planes... it fills valarray directely - cv::split(cv::Mat_ >(inputMatToConvert), planes); - }else if (imageNumberOfChannels==3) - { - // create a cv::Mat table (for RGB planes) - cv::Mat planes[] = - { - cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()*2]), - cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()]), - cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0]) - }; - // split color cv::Mat in 3 planes... it fills valarray directely - cv::split(cv::Mat_ >(inputMatToConvert), planes); - }else if(imageNumberOfChannels==1) - { - // create a cv::Mat header for the valarray - cv::Mat dst(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0]); - inputMatToConvert.convertTo(dst, dsttype); - } + cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()*2]), + cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()]), + cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0]) + }; + planes[3] = cv::Mat(inputMatToConvert.size(), dsttype); // last channel (alpha) does not point on the valarray (not usefull in our case) + // split color cv::Mat in 4 planes... it fills valarray directely + cv::split(cv::Mat_ >(inputMatToConvert), planes); + } + else if (imageNumberOfChannels==3) + { + // create a cv::Mat table (for RGB planes) + cv::Mat planes[] = + { + cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()*2]), + cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[_retinaFilter->getInputNBpixels()]), + cv::Mat(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0]) + }; + // split color cv::Mat in 3 planes... it fills valarray directely + cv::split(cv::Mat_ >(inputMatToConvert), planes); + } + else if(imageNumberOfChannels==1) + { + // create a cv::Mat header for the valarray + cv::Mat dst(inputMatToConvert.size(), dsttype, &outputValarrayMatrix[0]); + inputMatToConvert.convertTo(dst, dsttype); + } else CV_Error(CV_StsUnsupportedFormat, "input image must be single channel (gray levels), bgr format (color) or bgra (color with transparency which won't be considered"); - + return imageNumberOfChannels>1; // return bool : false for gray level image processing, true for color mode } diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index b48467c..0c75202 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -869,6 +869,8 @@ cvGetModuleInfo( const char* name, const char **version, const char **plugin_lis } #if defined BUILD_SHARED_LIBS && defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE +BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID ); + BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID ) { if( fdwReason == DLL_THREAD_DETACH || fdwReason == DLL_PROCESS_DETACH ) diff --git a/modules/features2d/src/features2d_init.cpp b/modules/features2d/src/features2d_init.cpp index 508e034..09b9322 100644 --- a/modules/features2d/src/features2d_init.cpp +++ b/modules/features2d/src/features2d_init.cpp @@ -133,7 +133,7 @@ CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense", obj.info()->addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale)); CV_INIT_ALGORITHM(GridAdaptedFeatureDetector, "Feature2D.Grid", - //obj.info()->addParam(obj, "detector", (Ptr&)obj.detector); + obj.info()->addParam(obj, "detector", (Ptr&)obj.detector); obj.info()->addParam(obj, "maxTotalKeypoints", obj.maxTotalKeypoints); obj.info()->addParam(obj, "gridRows", obj.gridRows); obj.info()->addParam(obj, "gridCols", obj.gridCols)); diff --git a/modules/features2d/test/test_features2d.cpp b/modules/features2d/test/test_features2d.cpp index 958d891..10805d5 100644 --- a/modules/features2d/test/test_features2d.cpp +++ b/modules/features2d/test/test_features2d.cpp @@ -911,7 +911,7 @@ void CV_DescriptorMatcherTest::radiusMatchTest( const Mat& query, const Mat& tra dmatcher->radiusMatch( query, matches, radius, masks ); - int curRes = cvtest::TS::OK; + //int curRes = cvtest::TS::OK; if( (int)matches.size() != queryDescCount ) { ts->printf(cvtest::TS::LOG, "Incorrect matches count while test radiusMatch() function (1).\n"); @@ -951,7 +951,7 @@ void CV_DescriptorMatcherTest::radiusMatchTest( const Mat& query, const Mat& tra } if( (float)badCount > (float)queryDescCount*badPart ) { - curRes = cvtest::TS::FAIL_INVALID_OUTPUT; + //curRes = cvtest::TS::FAIL_INVALID_OUTPUT; ts->printf( cvtest::TS::LOG, "%f - too large bad matches part while test radiusMatch() function (2).\n", (float)badCount/(float)queryDescCount ); ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY ); diff --git a/modules/flann/include/opencv2/flann/defines.h b/modules/flann/include/opencv2/flann/defines.h index 7bd8964..178f07b 100644 --- a/modules/flann/include/opencv2/flann/defines.h +++ b/modules/flann/include/opencv2/flann/defines.h @@ -65,7 +65,7 @@ #undef FLANN_PLATFORM_32_BIT #undef FLANN_PLATFORM_64_BIT -#if __amd64__ || __x86_64__ || _WIN64 || _M_X64 +#if defined __amd64__ || defined __x86_64__ || defined _WIN64 || defined _M_X64 #define FLANN_PLATFORM_64_BIT #else #define FLANN_PLATFORM_32_BIT diff --git a/modules/flann/include/opencv2/flann/index_testing.h b/modules/flann/include/opencv2/flann/index_testing.h index ab80dd7..d764004 100644 --- a/modules/flann/include/opencv2/flann/index_testing.h +++ b/modules/flann/include/opencv2/flann/index_testing.h @@ -164,7 +164,7 @@ float test_index_precision(NNIndex& index, const Matrix& index, const Matrix= 100 +#if defined _MSC_VER && _MSC_VER >= 100 #pragma warning(disable: 4995) #endif @@ -103,17 +103,18 @@ Thanks to: #include //Include Directshow stuff here so we don't worry about needing all the h files. -#if _MSC_VER >= 1500 -#include "DShow.h" -#include "strmif.h" -#include "Aviriff.h" -#include "dvdmedia.h" -#include "bdaiface.h" +#if defined _MSC_VER && _MSC_VER >= 1500 +# include "DShow.h" +# include "strmif.h" +# include "Aviriff.h" +# include "dvdmedia.h" +# include "bdaiface.h" #else -#ifdef _MSC_VER -#define __extension__ -typedef BOOL WINBOOL; +# ifdef _MSC_VER +# define __extension__ + typedef BOOL WINBOOL; #endif + #include "dshow/dshow.h" #include "dshow/dvdmedia.h" #include "dshow/bdatypes.h" @@ -133,6 +134,8 @@ public: virtual HRESULT STDMETHODCALLTYPE Clone( /* [out] */ IEnumPIDMap **ppIEnumPIDMap) = 0; + + virtual ~IEnumPIDMap() {} }; interface IMPEG2PIDMap : public IUnknown @@ -148,6 +151,8 @@ interface IMPEG2PIDMap : public IUnknown virtual HRESULT STDMETHODCALLTYPE EnumPIDMap( /* [out] */ IEnumPIDMap **pIEnumPIDMap) = 0; + + virtual ~IMPEG2PIDMap() {} }; #endif @@ -234,6 +239,7 @@ interface ISampleGrabberCB : public IUnknown BYTE *pBuffer, LONG BufferLen) = 0; + virtual ~ISampleGrabberCB() {} }; interface ISampleGrabber : public IUnknown @@ -261,6 +267,7 @@ interface ISampleGrabber : public IUnknown ISampleGrabberCB *pCallback, LONG WhichMethodToCallback) = 0; + virtual ~ISampleGrabber() {} }; #ifndef HEADER @@ -519,12 +526,12 @@ class videoInput{ //Manual control over settings thanks..... //These are experimental for now. - bool setVideoSettingFilter(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false); - bool setVideoSettingFilterPct(int deviceID, long Property, float pctValue, long Flags = NULL); + bool setVideoSettingFilter(int deviceID, long Property, long lValue, long Flags = 0, bool useDefaultValue = false); + bool setVideoSettingFilterPct(int deviceID, long Property, float pctValue, long Flags = 0); bool getVideoSettingFilter(int deviceID, long Property, long &min, long &max, long &SteppingDelta, long ¤tValue, long &flags, long &defaultValue); - bool setVideoSettingCamera(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false); - bool setVideoSettingCameraPct(int deviceID, long Property, float pctValue, long Flags = NULL); + bool setVideoSettingCamera(int deviceID, long Property, long lValue, long Flags = 0, bool useDefaultValue = false); + bool setVideoSettingCameraPct(int deviceID, long Property, float pctValue, long Flags = 0); bool getVideoSettingCamera(int deviceID, long Property, long &min, long &max, long &SteppingDelta, long ¤tValue, long &flags, long &defaultValue); //bool setVideoSettingCam(int deviceID, long Property, long lValue, long Flags = NULL, bool useDefaultValue = false); @@ -597,7 +604,7 @@ class videoInput{ /////////////////////////// HANDY FUNCTIONS ///////////////////////////// -void MyFreeMediaType(AM_MEDIA_TYPE& mt){ +static void MyFreeMediaType(AM_MEDIA_TYPE& mt){ if (mt.cbFormat != 0) { CoTaskMemFree((PVOID)mt.pbFormat); @@ -612,7 +619,7 @@ void MyFreeMediaType(AM_MEDIA_TYPE& mt){ } } -void MyDeleteMediaType(AM_MEDIA_TYPE *pmt) +static void MyDeleteMediaType(AM_MEDIA_TYPE *pmt) { if (pmt != NULL) { @@ -642,7 +649,7 @@ public: //------------------------------------------------ - ~SampleGrabberCallback(){ + virtual ~SampleGrabberCallback(){ ptrBuffer = NULL; DeleteCriticalSection(&critSection); CloseHandle(hEvent); @@ -849,7 +856,7 @@ void videoDevice::NukeDownstream(IBaseFilter *pBF){ // ---------------------------------------------------------------------- void videoDevice::destroyGraph(){ - HRESULT hr = NULL; + HRESULT hr = 0; //int FuncRetval=0; //int NumFilters=0; @@ -867,7 +874,7 @@ void videoDevice::destroyGraph(){ IBaseFilter * pFilter = NULL; if (pEnum->Next(1, &pFilter, &cFetched) == S_OK) { - FILTER_INFO FilterInfo={0}; + FILTER_INFO FilterInfo; memset(&FilterInfo, 0, sizeof(FilterInfo)); hr = pFilter->QueryFilterInfo(&FilterInfo); FilterInfo.pGraph->Release(); @@ -1620,14 +1627,15 @@ void __cdecl videoInput::basicThread(void * objPtr){ void videoInput::showSettingsWindow(int id){ if(isDeviceSetup(id)){ - HANDLE myTempThread; + //HANDLE myTempThread; //we reconnect to the device as we have freed our reference to it //why have we freed our reference? because there seemed to be an issue //with some mpeg devices if we didn't HRESULT hr = getDevice(&VDList[id]->pVideoInputFilter, id, VDList[id]->wDeviceName, VDList[id]->nDeviceName); if(hr == S_OK){ - myTempThread = (HANDLE)_beginthread(basicThread, 0, (void *)&VDList[id]); + //myTempThread = (HANDLE) + _beginthread(basicThread, 0, (void *)&VDList[id]); } } } @@ -1705,7 +1713,7 @@ bool videoInput::setVideoSettingFilterPct(int deviceID, long Property, float pct float halfStep = (float)stepAmnt * 0.5f; if( mod < halfStep ) rasterValue -= mod; else rasterValue += stepAmnt - mod; - printf("RASTER - pctValue is %f - value is %i - step is %i - mod is %i - rasterValue is %i\n", pctValue, value, stepAmnt, mod, rasterValue); + printf("RASTER - pctValue is %f - value is %li - step is %li - mod is %li - rasterValue is %li\n", pctValue, value, stepAmnt, mod, rasterValue); } return setVideoSettingFilter(deviceID, Property, rasterValue, Flags, false); @@ -1795,7 +1803,7 @@ bool videoInput::setVideoSettingCameraPct(int deviceID, long Property, float pct float halfStep = (float)stepAmnt * 0.5f; if( mod < halfStep ) rasterValue -= mod; else rasterValue += stepAmnt - mod; - printf("RASTER - pctValue is %f - value is %i - step is %i - mod is %i - rasterValue is %i\n", pctValue, value, stepAmnt, mod, rasterValue); + printf("RASTER - pctValue is %f - value is %li - step is %li - mod is %li - rasterValue is %li\n", pctValue, value, stepAmnt, mod, rasterValue); } return setVideoSettingCamera(deviceID, Property, rasterValue, Flags, false); @@ -1920,7 +1928,7 @@ bool videoInput::restartDevice(int id){ stopDevice(id); //set our fps if needed - if( avgFrameTime != -1){ + if( avgFrameTime != (unsigned long)-1){ VDList[id]->requestedFrameTime = avgFrameTime; } @@ -2300,7 +2308,7 @@ static void findClosestSizeAndSubtype(videoDevice * VD, int widthIn, int heightI //find perfect match or closest size int nearW = 9999999; int nearH = 9999999; - bool foundClosestMatch = true; + //bool foundClosestMatch = true; int iCount = 0; int iSize = 0; @@ -2360,7 +2368,7 @@ static void findClosestSizeAndSubtype(videoDevice * VD, int widthIn, int heightI //see if we have an exact match! if(exactMatchX && exactMatchY){ - foundClosestMatch = false; + //foundClosestMatch = false; exactMatch = true; widthOut = widthIn; diff --git a/modules/highgui/src/cap_vfw.cpp b/modules/highgui/src/cap_vfw.cpp index 059be19..33d0c8b 100644 --- a/modules/highgui/src/cap_vfw.cpp +++ b/modules/highgui/src/cap_vfw.cpp @@ -43,7 +43,7 @@ #include -#if _MSC_VER >= 1200 +#if defined _MSC_VER && _MSC_VER >= 1200 #pragma warning( disable: 4711 ) #endif diff --git a/modules/highgui/src/window_w32.cpp b/modules/highgui/src/window_w32.cpp index b97be0f..1371ebd 100644 --- a/modules/highgui/src/window_w32.cpp +++ b/modules/highgui/src/window_w32.cpp @@ -43,7 +43,7 @@ #if defined WIN32 || defined _WIN32 -#if _MSC_VER >= 1200 +#if defined _MSC_VER && _MSC_VER >= 1200 #pragma warning( disable: 4710 ) #endif @@ -61,6 +61,10 @@ #ifndef __inout # define __inout #endif + +#ifdef __GNUC__ +# pragma GCC diagnostic ignored "-Wmissing-declarations" +#endif #include #include @@ -166,7 +170,7 @@ typedef struct CvWindow HGDIOBJ image; int last_key; int flags; - int status;//0 normal, 1 fullscreen (YV) + int status;//0 normal, 1 fullscreen (YV) CvMouseCallback on_mouse; void* on_mouse_param; @@ -360,7 +364,7 @@ icvSaveWindowPos( const char* name, CvRect rect ) char rootKey[1024]; strcpy( szKey, icvWindowPosRootKey ); strcat( szKey, name ); - + if( RegOpenKeyEx( HKEY_CURRENT_USER,szKey,0,KEY_READ,&hkey) != ERROR_SUCCESS ) { HKEY hroot; @@ -405,7 +409,7 @@ icvSaveWindowPos( const char* name, CvRect rect ) if( RegOpenKeyEx( HKEY_CURRENT_USER,szKey,0,KEY_WRITE,&hkey) != ERROR_SUCCESS ) return; } - + RegSetValueEx(hkey, "Left", 0, REG_DWORD, (BYTE*)&rect.x, sizeof(rect.x)); RegSetValueEx(hkey, "Top", 0, REG_DWORD, (BYTE*)&rect.y, sizeof(rect.y)); RegSetValueEx(hkey, "Width", 0, REG_DWORD, (BYTE*)&rect.width, sizeof(rect.width)); @@ -415,9 +419,9 @@ icvSaveWindowPos( const char* name, CvRect rect ) double cvGetModeWindow_W32(const char* name)//YV { - double result = -1; - - CV_FUNCNAME( "cvGetModeWindow_W32" ); + double result = -1; + + CV_FUNCNAME( "cvGetModeWindow_W32" ); __BEGIN__; @@ -429,75 +433,75 @@ double cvGetModeWindow_W32(const char* name)//YV window = icvFindWindowByName( name ); if (!window) EXIT; // keep silence here - + result = window->status; - + __END__; - return result; + return result; } void cvSetModeWindow_W32( const char* name, double prop_value)//Yannick Verdie { - CV_FUNCNAME( "cvSetModeWindow_W32" ); + CV_FUNCNAME( "cvSetModeWindow_W32" ); - __BEGIN__; + __BEGIN__; - CvWindow* window; + CvWindow* window; - if(!name) - CV_ERROR( CV_StsNullPtr, "NULL name string" ); + if(!name) + CV_ERROR( CV_StsNullPtr, "NULL name string" ); - window = icvFindWindowByName( name ); - if( !window ) - CV_ERROR( CV_StsNullPtr, "NULL window" ); + window = icvFindWindowByName( name ); + if( !window ) + CV_ERROR( CV_StsNullPtr, "NULL window" ); - if(window->flags & CV_WINDOW_AUTOSIZE)//if the flag CV_WINDOW_AUTOSIZE is set - EXIT; + if(window->flags & CV_WINDOW_AUTOSIZE)//if the flag CV_WINDOW_AUTOSIZE is set + EXIT; - { - DWORD dwStyle = (DWORD)GetWindowLongPtr(window->frame, GWL_STYLE); - CvRect position; + { + DWORD dwStyle = (DWORD)GetWindowLongPtr(window->frame, GWL_STYLE); + CvRect position; - if (window->status==CV_WINDOW_FULLSCREEN && prop_value==CV_WINDOW_NORMAL) - { - icvLoadWindowPos(window->name,position ); - SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle | WS_CAPTION | WS_THICKFRAME); + if (window->status==CV_WINDOW_FULLSCREEN && prop_value==CV_WINDOW_NORMAL) + { + icvLoadWindowPos(window->name,position ); + SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle | WS_CAPTION | WS_THICKFRAME); - SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED); - window->status=CV_WINDOW_NORMAL; + SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED); + window->status=CV_WINDOW_NORMAL; - EXIT; - } + EXIT; + } - if (window->status==CV_WINDOW_NORMAL && prop_value==CV_WINDOW_FULLSCREEN) - { - //save dimension - RECT rect; - GetWindowRect(window->frame, &rect); - CvRect RectCV = cvRect(rect.left, rect.top,rect.right - rect.left, rect.bottom - rect.top); - icvSaveWindowPos(window->name,RectCV ); + if (window->status==CV_WINDOW_NORMAL && prop_value==CV_WINDOW_FULLSCREEN) + { + //save dimension + RECT rect; + GetWindowRect(window->frame, &rect); + CvRect RectCV = cvRect(rect.left, rect.top,rect.right - rect.left, rect.bottom - rect.top); + icvSaveWindowPos(window->name,RectCV ); - //Look at coordinate for fullscreen - HMONITOR hMonitor; - MONITORINFO mi; - hMonitor = MonitorFromRect(&rect, MONITOR_DEFAULTTONEAREST); + //Look at coordinate for fullscreen + HMONITOR hMonitor; + MONITORINFO mi; + hMonitor = MonitorFromRect(&rect, MONITOR_DEFAULTTONEAREST); - mi.cbSize = sizeof(mi); - GetMonitorInfo(hMonitor, &mi); + mi.cbSize = sizeof(mi); + GetMonitorInfo(hMonitor, &mi); - //fullscreen - position.x=mi.rcMonitor.left;position.y=mi.rcMonitor.top; - position.width=mi.rcMonitor.right - mi.rcMonitor.left;position.height=mi.rcMonitor.bottom - mi.rcMonitor.top; - SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle & ~WS_CAPTION & ~WS_THICKFRAME); + //fullscreen + position.x=mi.rcMonitor.left;position.y=mi.rcMonitor.top; + position.width=mi.rcMonitor.right - mi.rcMonitor.left;position.height=mi.rcMonitor.bottom - mi.rcMonitor.top; + SetWindowLongPtr(window->frame, GWL_STYLE, dwStyle & ~WS_CAPTION & ~WS_THICKFRAME); - SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED); - window->status=CV_WINDOW_FULLSCREEN; + SetWindowPos(window->frame, HWND_TOP, position.x, position.y , position.width,position.height, SWP_NOZORDER | SWP_FRAMECHANGED); + window->status=CV_WINDOW_FULLSCREEN; - EXIT; - } - } + EXIT; + } + } - __END__; + __END__; } double cvGetPropWindowAutoSize_W32(const char* name) @@ -526,9 +530,9 @@ double cvGetPropWindowAutoSize_W32(const char* name) double cvGetRatioWindow_W32(const char* name) { - double result = -1; - - CV_FUNCNAME( "cvGetRatioWindow_W32" ); + double result = -1; + + CV_FUNCNAME( "cvGetRatioWindow_W32" ); __BEGIN__; @@ -540,20 +544,20 @@ double cvGetRatioWindow_W32(const char* name) window = icvFindWindowByName( name ); if (!window) EXIT; // keep silence here - + result = static_cast(window->width) / window->height; - + __END__; - return result; + return result; } double cvGetOpenGlProp_W32(const char* name) { - double result = -1; + double result = -1; -#ifdef HAVE_OPENGL - CV_FUNCNAME( "cvGetOpenGlProp_W32" ); +#ifdef HAVE_OPENGL + CV_FUNCNAME( "cvGetOpenGlProp_W32" ); __BEGIN__; @@ -565,14 +569,14 @@ double cvGetOpenGlProp_W32(const char* name) window = icvFindWindowByName( name ); if (!window) EXIT; // keep silence here - + result = window->useGl; - + __END__; #endif - (void)name; + (void)name; - return result; + return result; } @@ -626,7 +630,7 @@ namespace void generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const; bool isGlContextInitialized() const; - + PFNGLGENBUFFERSPROC glGenBuffersExt; PFNGLDELETEBUFFERSPROC glDeleteBuffersExt; @@ -787,8 +791,8 @@ namespace weight, // font weight italic ? TRUE : FALSE, // Italic underline ? TRUE : FALSE, // Underline - FALSE, // StrikeOut - ANSI_CHARSET, // CharSet + FALSE, // StrikeOut + ANSI_CHARSET, // CharSet OUT_TT_PRECIS, // OutPrecision CLIP_DEFAULT_PRECIS, // ClipPrecision ANTIALIASED_QUALITY, // Quality @@ -870,12 +874,12 @@ namespace 0, // Shift Bit Ignored 0, // No Accumulation Buffer 0, 0, 0, 0, // Accumulation Bits Ignored - 32, // 32 Bit Z-Buffer (Depth Buffer) + 32, // 32 Bit Z-Buffer (Depth Buffer) 0, // No Stencil Buffer 0, // No Auxiliary Buffer PFD_MAIN_PLANE, // Main Drawing Layer 0, // Reserved - 0, 0, 0 // Layer Masks Ignored + 0, 0, 0 // Layer Masks Ignored }; hGLDC = GetDC(hWnd); @@ -915,7 +919,7 @@ namespace window->hGLRC = NULL; } - if (window->dc) + if (window->dc) { ReleaseDC(window->hwnd, window->dc); window->dc = NULL; @@ -935,7 +939,7 @@ namespace if (!wglMakeCurrent(window->dc, window->hGLRC)) CV_ERROR( CV_OpenGlApiCallError, "Can't Activate The GL Rendering Context" ); - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); if (window->glDrawCallback) window->glDrawCallback(window->glDrawData); @@ -1009,7 +1013,7 @@ CV_IMPL int cvNamedWindow( const char* name, int flags ) ShowWindow(mainhWnd, SW_SHOW); - //YV- remove one border by changing the style + //YV- remove one border by changing the style hWnd = CreateWindow("HighGUI class", "", (defStyle & ~WS_SIZEBOX) | WS_CHILD, CW_USEDEFAULT, 0, rect.width, rect.height, mainhWnd, 0, hg_hinstance, 0); if( !hWnd ) CV_ERROR( CV_StsError, "Frame window can not be created" ); @@ -1400,16 +1404,16 @@ cvShowImage( const char* name, const CvArr* arr ) CV_ERROR( CV_StsNullPtr, "NULL name" ); window = icvFindWindowByName(name); - if(!window) - { + if(!window) + { #ifndef HAVE_OPENGL - cvNamedWindow(name, CV_WINDOW_AUTOSIZE); + cvNamedWindow(name, CV_WINDOW_AUTOSIZE); #else - cvNamedWindow(name, CV_WINDOW_AUTOSIZE | CV_WINDOW_OPENGL); + cvNamedWindow(name, CV_WINDOW_AUTOSIZE | CV_WINDOW_OPENGL); #endif - window = icvFindWindowByName(name); - } + window = icvFindWindowByName(name); + } if( !window || !arr ) EXIT; // keep silence here. @@ -1467,6 +1471,7 @@ cvShowImage( const char* name, const CvArr* arr ) __END__; } +#if 0 CV_IMPL void cvShowImageHWND(HWND w_hWnd, const CvArr* arr) { @@ -1494,7 +1499,7 @@ cvShowImageHWND(HWND w_hWnd, const CvArr* arr) if( CV_IS_IMAGE_HDR( arr ) ) origin = ((IplImage*)arr)->origin; - CV_CALL( image = cvGetMat( arr, &stub ) ); + CV_CALL( image = cvGetMat( arr, &stub ) ); if ( hdc ) { @@ -1512,7 +1517,7 @@ cvShowImageHWND(HWND w_hWnd, const CvArr* arr) dst_ptr = bmp.bmBits; } - if( size.cx != image->width || size.cy != image->height || channels != channels0 ) + if( size.cx != image->width || size.cy != image->height || channels != channels0 ) { changed_size = true; @@ -1544,6 +1549,7 @@ cvShowImageHWND(HWND w_hWnd, const CvArr* arr) __END__; } +#endif CV_IMPL void cvResizeWindow(const char* name, int width, int height ) { @@ -1666,7 +1672,7 @@ MainWindowProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam ) { // Snap window to screen edges with multi-monitor support. // Adi Shavit LPWINDOWPOS pos = (LPWINDOWPOS)lParam; - + RECT rect; GetWindowRect(window->frame, &rect); @@ -1679,15 +1685,15 @@ MainWindowProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam ) const int SNAP_DISTANCE = 15; - if (abs(pos->x - mi.rcMonitor.left) <= SNAP_DISTANCE) + if (abs(pos->x - mi.rcMonitor.left) <= SNAP_DISTANCE) pos->x = mi.rcMonitor.left; // snap to left edge - else + else if (abs(pos->x + pos->cx - mi.rcMonitor.right) <= SNAP_DISTANCE) pos->x = mi.rcMonitor.right - pos->cx; // snap to right edge if (abs(pos->y - mi.rcMonitor.top) <= SNAP_DISTANCE) pos->y = mi.rcMonitor.top; // snap to top edge - else + else if (abs(pos->y + pos->cy - mi.rcMonitor.bottom) <= SNAP_DISTANCE) pos->y = mi.rcMonitor.bottom - pos->cy; // snap to bottom edge } @@ -1848,9 +1854,9 @@ static LRESULT CALLBACK HighGUIProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM EndPaint(hwnd, &paint); } #ifdef HAVE_OPENGL - else if(window->useGl) + else if(window->useGl) { - drawGl(window); + drawGl(window); return DefWindowProc(hwnd, uMsg, wParam, lParam); } #endif diff --git a/modules/imgproc/src/rotcalipers.cpp b/modules/imgproc/src/rotcalipers.cpp index 4ea8f75..2171ec1 100644 --- a/modules/imgproc/src/rotcalipers.cpp +++ b/modules/imgproc/src/rotcalipers.cpp @@ -64,18 +64,18 @@ icvMinAreaState; // Parameters: // points - convex hull vertices ( any orientation ) // n - number of vertices -// mode - concrete application of algorithm -// can be CV_CALIPERS_MAXDIST or -// CV_CALIPERS_MINAREARECT +// mode - concrete application of algorithm +// can be CV_CALIPERS_MAXDIST or +// CV_CALIPERS_MINAREARECT // left, bottom, right, top - indexes of extremal points // out - output info. -// In case CV_CALIPERS_MAXDIST it points to float value - +// In case CV_CALIPERS_MAXDIST it points to float value - // maximal height of polygon. // In case CV_CALIPERS_MINAREARECT -// ((CvPoint2D32f*)out)[0] - corner +// ((CvPoint2D32f*)out)[0] - corner // ((CvPoint2D32f*)out)[1] - vector1 // ((CvPoint2D32f*)out)[0] - corner2 -// +// // ^ // | // vector2 | @@ -94,15 +94,15 @@ icvRotatingCalipers( CvPoint2D32f* points, int n, int mode, float* out ) { float minarea = FLT_MAX; float max_dist = 0; - char buffer[32]; + char buffer[32] = {}; int i, k; CvPoint2D32f* vect = (CvPoint2D32f*)cvAlloc( n * sizeof(vect[0]) ); float* inv_vect_length = (float*)cvAlloc( n * sizeof(inv_vect_length[0]) ); int left = 0, bottom = 0, right = 0, top = 0; int seq[4] = { -1, -1, -1, -1 }; - /* rotating calipers sides will always have coordinates - (a,b) (-b,a) (-a,-b) (b, -a) + /* rotating calipers sides will always have coordinates + (a,b) (-b,a) (-a,-b) (b, -a) */ /* this is a first base bector (a,b) initialized by (1,0) */ float orientation = 0; @@ -111,14 +111,14 @@ icvRotatingCalipers( CvPoint2D32f* points, int n, int mode, float* out ) float left_x, right_x, top_y, bottom_y; CvPoint2D32f pt0 = points[0]; - + left_x = right_x = pt0.x; top_y = bottom_y = pt0.y; - + for( i = 0; i < n; i++ ) { double dx, dy; - + if( pt0.x < left_x ) left_x = pt0.x, left = i; @@ -132,7 +132,7 @@ icvRotatingCalipers( CvPoint2D32f* points, int n, int mode, float* out ) bottom_y = pt0.y, bottom = i; CvPoint2D32f pt = points[(i+1) & (i+1 < n ? -1 : 0)]; - + dx = pt.x - pt0.x; dy = pt.y - pt0.y; @@ -149,7 +149,7 @@ icvRotatingCalipers( CvPoint2D32f* points, int n, int mode, float* out ) { double ax = vect[n-1].x; double ay = vect[n-1].y; - + for( i = 0; i < n; i++ ) { double bx = vect[i].x; @@ -218,7 +218,7 @@ icvRotatingCalipers( CvPoint2D32f* points, int n, int mode, float* out ) base_b = lead_y; break; case 1: - base_a = lead_y; + base_a = lead_y; base_b = -lead_x; break; case 2: @@ -231,12 +231,12 @@ icvRotatingCalipers( CvPoint2D32f* points, int n, int mode, float* out ) break; default: assert(0); } - } + } /* change base point of main edge */ seq[main_element] += 1; seq[main_element] = (seq[main_element] == n) ? 0 : seq[main_element]; - + switch (mode) { case CV_CALIPERS_MAXHEIGHT: @@ -351,7 +351,7 @@ cvMinAreaRect2( const CvArr* array, CvMemStorage* storage ) CvBox2D box; cv::AutoBuffer _points; CvPoint2D32f* points; - + memset(&box, 0, sizeof(box)); int i, n; @@ -409,7 +409,7 @@ cvMinAreaRect2( const CvArr* array, CvMemStorage* storage ) CV_READ_SEQ_ELEM( points[i], reader ); } } - + if( n > 2 ) { icvRotatingCalipers( points, n, CV_CALIPERS_MINAREARECT, (float*)out ); diff --git a/modules/legacy/src/extendededges.cpp b/modules/legacy/src/extendededges.cpp index 2671ddb..8ade446 100644 --- a/modules/legacy/src/extendededges.cpp +++ b/modules/legacy/src/extendededges.cpp @@ -41,14 +41,15 @@ #include "precomp.hpp" -#ifdef WIN32 /* make sure it builds under Linux whenever it is included into Makefile.am or not. */ +#if 0 +//#ifdef WIN32 /* make sure it builds under Linux whenever it is included into Makefile.am or not. */ //void icvCutContour( CvSeq* current, IplImage* image ); CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* image ); //create lists of segments of all contours from image -CvSeq* cvExtractSingleEdges( IplImage* image, //bw image - it's content will be destroyed by cvFindContours +CvSeq* cvExtractSingleEdges( IplImage* image, //bw image - it's content will be destroyed by cvFindContours CvMemStorage* storage ) { CvMemStorage* tmp_storage = cvCreateChildMemStorage( storage ); @@ -57,29 +58,29 @@ CvSeq* cvExtractSingleEdges( IplImage* image, //bw image - it's content will be cvZero( image ); //iterate through contours - //iterate through tree + //iterate through tree CvSeq* current = contours; int number = 0; int level = 1; CvSeq* output = 0; - CvSeq* tail_seq = 0; + CvSeq* tail_seq = 0; //actually this loop can iterates through tree, //but still we use CV_RETR_LIST it is not useful while( current ) { - number++; - + number++; + //get vertical list of segments for one contour CvSeq* new_seq = icvCutContourRaster( current, storage, image ); //add this vertical list to horisontal list if( new_seq ) { - if( tail_seq ) - { - tail_seq->h_next = new_seq; + if( tail_seq ) + { + tail_seq->h_next = new_seq; new_seq->h_prev = tail_seq; tail_seq = new_seq; } @@ -90,13 +91,13 @@ CvSeq* cvExtractSingleEdges( IplImage* image, //bw image - it's content will be } //iteration through tree - if( current->v_next ) - { + if( current->v_next ) + { //goto child current = current->v_next; level++; } - else + else { //go parent while( !current->h_next ) @@ -105,7 +106,7 @@ CvSeq* cvExtractSingleEdges( IplImage* image, //bw image - it's content will be level--; if( !level ) break; } - + if( current ) //go brother current = current->h_next; } @@ -114,25 +115,25 @@ CvSeq* cvExtractSingleEdges( IplImage* image, //bw image - it's content will be //free temporary memstorage with initial contours cvReleaseMemStorage( &tmp_storage ); - return output; + return output; } //makes vertical list of segments for 1 contour CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* image /*tmp image*/) { //iplSet(image, 0 ); // this can cause double edges if two contours have common edge - // for example if object is circle with 1 pixel width - // to remove such problem - remove this iplSet + // for example if object is circle with 1 pixel width + // to remove such problem - remove this iplSet //approx contour by single edges CvSeqReader reader; CvSeqWriter writer; - + int writing = 0; cvStartReadSeq( current, &reader, 0 ); //below line just to avoid warning cvStartWriteSeq( current->flags, sizeof(CvContour), sizeof(CvPoint), storage, &writer ); - + CvSeq* output = 0; CvSeq* tail = 0; @@ -147,7 +148,7 @@ CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* ima //mark point ((uchar*)image->imageData)[image->widthStep * cur.y + cur.x]++; assert( ((uchar*)image->imageData)[image->widthStep * cur.y + cur.x] != 255 ); - + } //second pass - create separate edges @@ -161,22 +162,22 @@ CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* ima uchar flag = image->imageData[image->widthStep * cur.y + cur.x]; if( flag != 255 && flag < 3) // { - if(!writing) + if(!writing) { cvStartWriteSeq( current->flags, sizeof(CvContour), sizeof(CvPoint), storage, &writer ); - writing = 1 ; + writing = 1 ; } //mark point if( flag < 3 ) ((uchar*)image->imageData)[image->widthStep * cur.y + cur.x] = 255; //add it to another seq CV_WRITE_SEQ_ELEM( cur, writer ); - + } else { //exclude this point from contour - if( writing ) + if( writing ) { CvSeq* newseq = cvEndWriteSeq( &writer ); writing = 0; @@ -191,7 +192,7 @@ CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* ima { output = tail = newseq; } - } + } } } @@ -211,7 +212,7 @@ CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* ima { output = tail = newseq; } - } + } return output; @@ -224,12 +225,12 @@ CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* ima //approx contour by single edges CvSeqReader reader; CvSeqReader rev_reader; - + cvStartReadSeq( current, &reader, 0 ); int64* cur_pt = (int64*)reader.ptr; int64* prev_pt = (int64*)reader.prev_elem; - + //search for point a in aba position for( int i = 0; i < current->total; i++ ) { @@ -240,7 +241,7 @@ CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* ima { //return to prev pos CV_PREV_SEQ_ELEM( sizeof(int64), reader ); - + //this point is end of edge //start going both directions and collect edge @@ -248,7 +249,7 @@ CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* ima int pos = cvGetSeqReaderPos( &reader ); cvSetSeqReaderPos( &rev_reader, pos ); - + //walk in both directions while(1); @@ -259,10 +260,10 @@ CvSeq* icvCutContourRaster( CvSeq* current, CvMemStorage* storage, IplImage* ima } } - + */ #endif /* WIN32 */ - + diff --git a/modules/objdetect/src/featurepyramid.cpp b/modules/objdetect/src/featurepyramid.cpp index 12dc5c4..fb7806a 100644 --- a/modules/objdetect/src/featurepyramid.cpp +++ b/modules/objdetect/src/featurepyramid.cpp @@ -446,15 +446,15 @@ static int getPathOfFeaturePyramid(IplImage * image, CvLSVMFeatureMap *map; IplImage *scaleTmp; float scale; - int i, err; + int i; for(i = 0; i < numStep; i++) { scale = 1.0f / powf(step, (float)i); scaleTmp = resize_opencv (image, scale); - err = getFeatureMaps(scaleTmp, sideLength, &map); - err = normalizeAndTruncate(map, VAL_OF_TRUNCATE); - err = PCAFeatureMaps(map); + getFeatureMaps(scaleTmp, sideLength, &map); + normalizeAndTruncate(map, VAL_OF_TRUNCATE); + PCAFeatureMaps(map); (*maps)->pyramid[startIndex + i] = map; cvReleaseImage(&scaleTmp); }/*for(i = 0; i < numStep; i++)*/ diff --git a/modules/objdetect/src/latentsvm.cpp b/modules/objdetect/src/latentsvm.cpp index 3856cd3..2c3cc73 100644 --- a/modules/objdetect/src/latentsvm.cpp +++ b/modules/objdetect/src/latentsvm.cpp @@ -560,7 +560,7 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H, float **score, int *kPoints, int numThreads) { - int error = 0; + //int error = 0; int i, j, s, f, componentIndex; unsigned int maxXBorder, maxYBorder; CvPoint **pointsArr, **oppPointsArr, ***partsDisplacementArr; @@ -576,14 +576,14 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H, partsDisplacementArr = (CvPoint ***)malloc(sizeof(CvPoint **) * kComponents); // Getting maximum filter dimensions - error = getMaxFilterDims(filters, kComponents, kPartFilters, &maxXBorder, &maxYBorder); + /*error = */getMaxFilterDims(filters, kComponents, kPartFilters, &maxXBorder, &maxYBorder); componentIndex = 0; *kPoints = 0; // For each component perform searching for (i = 0; i < kComponents; i++) { #ifdef HAVE_TBB - error = searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i], + /*error = */searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i], b[i], maxXBorder, maxYBorder, scoreThreshold, &(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]), &(scoreArr[i]), &(partsDisplacementArr[i]), numThreads); diff --git a/modules/objdetect/src/lsvmparser.cpp b/modules/objdetect/src/lsvmparser.cpp index bff7696..fcee048 100644 --- a/modules/objdetect/src/lsvmparser.cpp +++ b/modules/objdetect/src/lsvmparser.cpp @@ -534,12 +534,12 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last int tag; int tagVal; char ch; - int i,j; - char buf[1024]; + int /*i,*/j; + //char buf[1024]; char tagBuf[1024]; //printf("\n"); - i = 0; + //i = 0; j = 0; st = 0; tag = 0; @@ -566,10 +566,10 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last N_path++; } tag = 0; - i = 0; + //i = 0; }else{ if((tag == 0)&& (st == 1)){ - buf[i] = ch; i++; + //buf[i] = ch; i++; }else{ tagBuf[j] = ch; j++; } @@ -582,12 +582,12 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model, int tag; int tagVal; char ch; - int i,j; - char buf[1024]; + int /*i,*/j; + //char buf[1024]; char tagBuf[1024]; //printf(" %d\n", *N_comp); - i = 0; + //i = 0; j = 0; st = 0; tag = 0; @@ -616,10 +616,10 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model, parserPFilterS (xmlf, p, model, last, max); } tag = 0; - i = 0; + //i = 0; }else{ if((tag == 0)&& (st == 1)){ - buf[i] = ch; i++; + //buf[i] = ch; i++; }else{ tagBuf[j] = ch; j++; } @@ -726,12 +726,12 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max, }//namespace int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, int *max, int **comp, float **b, int *count, float * score){ - int st = 0; + //int st = 0; int tag; char ch; - int i,j; + int /*i,*/j; FILE *xmlf; - char buf[1024]; + //char buf[1024]; char tagBuf[1024]; (*max) = 10; @@ -744,9 +744,9 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i if(xmlf == NULL) return LSVM_PARSER_FILE_NOT_FOUND; - i = 0; + //i = 0; j = 0; - st = 0; + //st = 0; tag = 0; while(!feof(xmlf)){ ch = (char)fgetc( xmlf ); @@ -757,7 +757,7 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i }else { if(ch == '>'){ tag = 0; - i = 0; + //i = 0; tagBuf[j ] = ch; tagBuf[j + 1] = '\0'; if(getTeg(tagBuf) == MODEL){ @@ -765,7 +765,7 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i } }else{ if(tag == 0){ - buf[i] = ch; i++; + //buf[i] = ch; i++; }else{ tagBuf[j] = ch; j++; } diff --git a/modules/objdetect/src/matching.cpp b/modules/objdetect/src/matching.cpp index 79d8bd3..6055a77 100644 --- a/modules/objdetect/src/matching.cpp +++ b/modules/objdetect/src/matching.cpp @@ -23,7 +23,7 @@ */ int convolution(const CvLSVMFilterObject *Fi, const CvLSVMFeatureMap *map, float *f) { - int n1, m1, n2, m2, p, size, diff1, diff2; + int n1, m1, n2, m2, p, /*size,*/ diff1, diff2; int i1, i2, j1, j2, k; float tmp_f1, tmp_f2, tmp_f3, tmp_f4; float *pMap = NULL; @@ -37,7 +37,7 @@ int convolution(const CvLSVMFilterObject *Fi, const CvLSVMFeatureMap *map, float diff1 = n1 - n2 + 1; diff2 = m1 - m2 + 1; - size = diff1 * diff2; + //size = diff1 * diff2; for (j1 = diff2 - 1; j1 >= 0; j1--) { @@ -333,7 +333,7 @@ int filterDispositionLevel(const CvLSVMFilterObject *Fi, const CvLSVMFeatureMap float **scoreFi, int **pointsX, int **pointsY) { - int n1, m1, n2, m2, p, size, diff1, diff2; + int n1, m1, n2, m2, /*p,*/ size, diff1, diff2; float *f; int i1, j1; int res; @@ -342,7 +342,7 @@ int filterDispositionLevel(const CvLSVMFilterObject *Fi, const CvLSVMFeatureMap m1 = pyramid->sizeX; n2 = Fi->sizeY; m2 = Fi->sizeX; - p = pyramid->numFeatures; + //p = pyramid->numFeatures; (*scoreFi) = NULL; (*pointsX) = NULL; (*pointsY) = NULL; @@ -418,7 +418,7 @@ int filterDispositionLevelFFT(const CvLSVMFilterObject *Fi, const CvLSVMFftImage float **scoreFi, int **pointsX, int **pointsY) { - int n1, m1, n2, m2, p, size, diff1, diff2; + int n1, m1, n2, m2, /*p,*/ size, diff1, diff2; float *f; int i1, j1; int res; @@ -428,7 +428,7 @@ int filterDispositionLevelFFT(const CvLSVMFilterObject *Fi, const CvLSVMFftImage m1 = featMapImage->dimX; n2 = Fi->sizeY; m2 = Fi->sizeX; - p = featMapImage->numFeatures; + //p = featMapImage->numFeatures; (*scoreFi) = NULL; (*pointsX) = NULL; (*pointsY) = NULL; @@ -611,7 +611,7 @@ int maxFunctionalScoreFixedLevel(const CvLSVMFilterObject **all_F, int n, float *score, CvPoint **points, int *kPoints, CvPoint ***partsDisplacement) { - int i, j, k, dimX, dimY, nF0, mF0, p; + int i, j, k, dimX, dimY, nF0, mF0/*, p*/; int diff1, diff2, index, last, partsLevel; CvLSVMFilterDisposition **disposition; float *f; @@ -639,7 +639,7 @@ int maxFunctionalScoreFixedLevel(const CvLSVMFilterObject **all_F, int n, dimY = H->pyramid[level]->sizeY; // Number of features - p = H->pyramid[level]->numFeatures; + //p = H->pyramid[level]->numFeatures; // Getting dimension of root filter nF0 = all_F[0]->sizeY; @@ -860,7 +860,7 @@ int thresholdFunctionalScoreFixedLevel(const CvLSVMFilterObject **all_F, int n, float **score, CvPoint **points, int *kPoints, CvPoint ***partsDisplacement) { - int i, j, k, dimX, dimY, nF0, mF0, p; + int i, j, k, dimX, dimY, nF0, mF0/*, p*/; int diff1, diff2, index, last, partsLevel; CvLSVMFilterDisposition **disposition; float *f; @@ -887,7 +887,7 @@ int thresholdFunctionalScoreFixedLevel(const CvLSVMFilterObject **all_F, int n, dimY = H->pyramid[level]->sizeY; // Number of features - p = H->pyramid[level]->numFeatures; + //p = H->pyramid[level]->numFeatures; // Getting dimension of root filter nF0 = all_F[0]->sizeY; diff --git a/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp b/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp index a54792f..8357155 100644 --- a/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp @@ -56,12 +56,12 @@ class CV_EXPORTS Estimator public: virtual ~Estimator() {} - void operator ()(const std::vector &features, const std::vector &pairwise_matches, + void operator ()(const std::vector &features, const std::vector &pairwise_matches, std::vector &cameras) { estimate(features, pairwise_matches, cameras); } protected: - virtual void estimate(const std::vector &features, const std::vector &pairwise_matches, + virtual void estimate(const std::vector &features, const std::vector &pairwise_matches, std::vector &cameras) = 0; }; @@ -72,8 +72,8 @@ public: HomographyBasedEstimator(bool is_focals_estimated = false) : is_focals_estimated_(is_focals_estimated) {} -private: - void estimate(const std::vector &features, const std::vector &pairwise_matches, +private: + void estimate(const std::vector &features, const std::vector &pairwise_matches, std::vector &cameras); bool is_focals_estimated_; @@ -84,10 +84,10 @@ class CV_EXPORTS BundleAdjusterBase : public Estimator { public: const Mat refinementMask() const { return refinement_mask_.clone(); } - void setRefinementMask(const Mat &mask) - { + void setRefinementMask(const Mat &mask) + { CV_Assert(mask.type() == CV_8U && mask.size() == Size(3, 3)); - refinement_mask_ = mask.clone(); + refinement_mask_ = mask.clone(); } double confThresh() const { return conf_thresh_; } @@ -97,17 +97,17 @@ public: void setTermCriteria(const CvTermCriteria& term_criteria) { term_criteria_ = term_criteria; } protected: - BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement) - : num_params_per_cam_(num_params_per_cam), - num_errs_per_measurement_(num_errs_per_measurement) - { + BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement) + : num_params_per_cam_(num_params_per_cam), + num_errs_per_measurement_(num_errs_per_measurement) + { setRefinementMask(Mat::ones(3, 3, CV_8U)); - setConfThresh(1.); + setConfThresh(1.); setTermCriteria(cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON)); } // Runs bundle adjustment - virtual void estimate(const std::vector &features, + virtual void estimate(const std::vector &features, const std::vector &pairwise_matches, std::vector &cameras); @@ -143,7 +143,7 @@ protected: // Minimizes reprojection error. -// It can estimate focal length, aspect ratio, principal point. +// It can estimate focal length, aspect ratio, principal point. // You can affect only on them via the refinement mask. class CV_EXPORTS BundleAdjusterReproj : public BundleAdjusterBase { @@ -177,7 +177,7 @@ private: }; -enum CV_EXPORTS WaveCorrectKind +enum WaveCorrectKind { WAVE_CORRECT_HORIZ, WAVE_CORRECT_VERT @@ -193,10 +193,10 @@ void CV_EXPORTS waveCorrect(std::vector &rmats, WaveCorrectKind kind); std::string CV_EXPORTS matchesGraphAsString(std::vector &pathes, std::vector &pairwise_matches, float conf_threshold); -std::vector CV_EXPORTS leaveBiggestComponent(std::vector &features, std::vector &pairwise_matches, +std::vector CV_EXPORTS leaveBiggestComponent(std::vector &features, std::vector &pairwise_matches, float conf_threshold); -void CV_EXPORTS findMaxSpanningTree(int num_images, const std::vector &pairwise_matches, +void CV_EXPORTS findMaxSpanningTree(int num_images, const std::vector &pairwise_matches, Graph &span_tree, std::vector ¢ers); } // namespace detail diff --git a/modules/stitching/perf/perf_stich.cpp b/modules/stitching/perf/perf_stich.cpp index 63cb433..4051e92 100644 --- a/modules/stitching/perf/perf_stich.cpp +++ b/modules/stitching/perf/perf_stich.cpp @@ -33,7 +33,6 @@ PERF_TEST_P(stitch, a123, TEST_DETECTORS) imgs.push_back( imread( getDataPath("stitching/a2.jpg") ) ); imgs.push_back( imread( getDataPath("stitching/a3.jpg") ) ); - Stitcher::Status status; Ptr featuresFinder = GetParam() == "orb" ? (detail::FeaturesFinder*)new detail::OrbFeaturesFinder() : (detail::FeaturesFinder*)new detail::SurfFeaturesFinder(); @@ -53,7 +52,7 @@ PERF_TEST_P(stitch, a123, TEST_DETECTORS) stitcher.setRegistrationResol(WORK_MEGAPIX); startTimer(); - status = stitcher.stitch(imgs, pano); + stitcher.stitch(imgs, pano); stopTimer(); } } @@ -66,7 +65,6 @@ PERF_TEST_P(stitch, b12, TEST_DETECTORS) imgs.push_back( imread( getDataPath("stitching/b1.jpg") ) ); imgs.push_back( imread( getDataPath("stitching/b2.jpg") ) ); - Stitcher::Status status; Ptr featuresFinder = GetParam() == "orb" ? (detail::FeaturesFinder*)new detail::OrbFeaturesFinder() : (detail::FeaturesFinder*)new detail::SurfFeaturesFinder(); @@ -86,7 +84,7 @@ PERF_TEST_P(stitch, b12, TEST_DETECTORS) stitcher.setRegistrationResol(WORK_MEGAPIX); startTimer(); - status = stitcher.stitch(imgs, pano); + stitcher.stitch(imgs, pano); stopTimer(); } } diff --git a/modules/stitching/src/exposure_compensate.cpp b/modules/stitching/src/exposure_compensate.cpp index 8345423..23bc161 100644 --- a/modules/stitching/src/exposure_compensate.cpp +++ b/modules/stitching/src/exposure_compensate.cpp @@ -82,7 +82,7 @@ void GainCompensator::feed(const vector &corners, const vector &imag Mat_ N(num_images, num_images); N.setTo(0); Mat_ I(num_images, num_images); I.setTo(0); - Rect dst_roi = resultRoi(corners, images); + //Rect dst_roi = resultRoi(corners, images); Mat subimg1, subimg2; Mat_ submask1, submask2, intersect; @@ -190,7 +190,7 @@ void BlocksGainCompensator::feed(const vector &corners, const vector block_corners.push_back(corners[img_idx] + bl_tl); block_images.push_back(images[img_idx](Rect(bl_tl, bl_br))); - block_masks.push_back(make_pair(masks[img_idx].first(Rect(bl_tl, bl_br)), + block_masks.push_back(make_pair(masks[img_idx].first(Rect(bl_tl, bl_br)), masks[img_idx].second)); } } @@ -201,7 +201,7 @@ void BlocksGainCompensator::feed(const vector &corners, const vector vector gains = compensator.gains(); gain_maps_.resize(num_images); - Mat_ ker(1, 3); + Mat_ ker(1, 3); ker(0,0) = 0.25; ker(0,1) = 0.5; ker(0,2) = 0.25; int bl_idx = 0; @@ -213,7 +213,7 @@ void BlocksGainCompensator::feed(const vector &corners, const vector for (int by = 0; by < bl_per_img.height; ++by) for (int bx = 0; bx < bl_per_img.width; ++bx, ++bl_idx) gain_maps_[img_idx](by, bx) = static_cast(gains[bl_idx]); - + sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker); sepFilter2D(gain_maps_[img_idx], gain_maps_[img_idx], CV_32F, ker, ker); } diff --git a/modules/stitching/src/stitcher.cpp b/modules/stitching/src/stitcher.cpp index 63f3284..b332535 100644 --- a/modules/stitching/src/stitcher.cpp +++ b/modules/stitching/src/stitcher.cpp @@ -206,7 +206,7 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano) Mat img_warped, img_warped_s; Mat dilated_mask, seam_mask, mask, mask_warped; - double compose_seam_aspect = 1; + //double compose_seam_aspect = 1; double compose_work_aspect = 1; bool is_blender_prepared = false; @@ -227,7 +227,7 @@ Stitcher::Status Stitcher::composePanorama(InputArray images, OutputArray pano) is_compose_scale_set = true; // Compute relative scales - compose_seam_aspect = compose_scale / seam_scale_; + //compose_seam_aspect = compose_scale / seam_scale_; compose_work_aspect = compose_scale / work_scale_; // Update warped image scale diff --git a/modules/ts/include/opencv2/ts/ts.hpp b/modules/ts/include/opencv2/ts/ts.hpp index a27d8b6..0c68ddf 100644 --- a/modules/ts/include/opencv2/ts/ts.hpp +++ b/modules/ts/include/opencv2/ts/ts.hpp @@ -38,6 +38,9 @@ #ifndef GTEST_USES_SIMPLE_RE # define GTEST_USES_SIMPLE_RE 0 #endif +#ifndef GTEST_USES_POSIX_RE +# define GTEST_USES_POSIX_RE 0 +#endif #ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wmissing-declarations" #endif diff --git a/modules/ts/include/opencv2/ts/ts_gtest.h b/modules/ts/include/opencv2/ts/ts_gtest.h index dbcd06e..dc0fdfb 100644 --- a/modules/ts/include/opencv2/ts/ts_gtest.h +++ b/modules/ts/include/opencv2/ts/ts_gtest.h @@ -412,19 +412,22 @@ // . # include // NOLINT -# define GTEST_USES_POSIX_RE 1 +# define GTEST_USES_POSIX_RE 1 +# define GTEST_USES_SIMPLE_RE 0 #elif GTEST_OS_WINDOWS // is not available on Windows. Use our own simple regex // implementation instead. # define GTEST_USES_SIMPLE_RE 1 +# define GTEST_USES_POSIX_RE 0 #else // may not be available on this platform. Use our own // simple regex implementation instead. # define GTEST_USES_SIMPLE_RE 1 +# define GTEST_USES_POSIX_RE 0 #endif // GTEST_HAS_POSIX_RE @@ -1678,6 +1681,8 @@ inline bool operator!=(const GTEST_10_TUPLE_(T)& t, // Determines whether test results can be streamed to a socket. #if GTEST_OS_LINUX # define GTEST_CAN_STREAM_RESULTS_ 1 +#else +# define GTEST_CAN_STREAM_RESULTS_ 0 #endif // Defines some utility macros. diff --git a/samples/c/blobtrack_sample.cpp b/samples/c/blobtrack_sample.cpp index 7020b1b..fe57092 100644 --- a/samples/c/blobtrack_sample.cpp +++ b/samples/c/blobtrack_sample.cpp @@ -371,7 +371,7 @@ int main(int argc, char* argv[]) CvBlobTrackerAutoParam1 param = {0}; CvBlobTrackerAuto* pTracker = NULL; - float scale = 1; + //float scale = 1; const char* scale_name = NULL; char* yml_name = NULL; char** yml_video_names = NULL; @@ -387,7 +387,7 @@ int main(int argc, char* argv[]) const char* bta_name = NULL; char* bta_data_name = NULL; char* track_name = NULL; - char* comment_name = NULL; + //char* comment_name = NULL; char* FGTrainFrames = NULL; char* log_name = NULL; char* savestate_name = NULL; @@ -462,7 +462,7 @@ int main(int argc, char* argv[]) RO("bta_data=",bta_data_name); RO("btgen=",btgen_name); RO("track=",track_name); - RO("comment=",comment_name); + //RO("comment=",comment_name); RO("FGTrainFrames=",FGTrainFrames); RO("log=",log_name); RO("savestate=",savestate_name); @@ -512,8 +512,8 @@ int main(int argc, char* argv[]) if(!scale_name) scale_name = "1"; } - if(scale_name) - scale = (float)atof(scale_name); +// if(scale_name) + // scale = (float)atof(scale_name); for(pFGModule=FGDetector_Modules; pFGModule->nickname; ++pFGModule) if( fg_name && MY_STRICMP(fg_name,pFGModule->nickname)==0 ) break; diff --git a/samples/cpp/brief_match_test.cpp b/samples/cpp/brief_match_test.cpp index 291da19..9223f21 100644 --- a/samples/cpp/brief_match_test.cpp +++ b/samples/cpp/brief_match_test.cpp @@ -112,12 +112,11 @@ int main(int argc, const char ** argv) vector mpts_1, mpts_2; matches2points(matches_popcount, kpts_1, kpts_2, mpts_1, mpts_2); //Extract a list of the (x,y) location of the matches - vector outlier_mask; + vector outlier_mask; Mat H = findHomography(mpts_2, mpts_1, RANSAC, 1, outlier_mask); Mat outimg; - drawMatches(im2, kpts_2, im1, kpts_1, matches_popcount, outimg, Scalar::all(-1), Scalar::all(-1), - *(const vector*)(void*)(&outlier_mask)); + drawMatches(im2, kpts_2, im1, kpts_1, matches_popcount, outimg, Scalar::all(-1), Scalar::all(-1), outlier_mask); imshow("matches - popcount - outliers removed", outimg); Mat warped; diff --git a/samples/cpp/build3dmodel.cpp b/samples/cpp/build3dmodel.cpp index 8169c01..cd23aae 100644 --- a/samples/cpp/build3dmodel.cpp +++ b/samples/cpp/build3dmodel.cpp @@ -164,7 +164,7 @@ static void findConstrainedCorrespondences(const Mat& _F, { Point2f p1 = keypoints1[i].pt; double bestDist1 = DBL_MAX, bestDist2 = DBL_MAX; - int bestIdx1 = -1, bestIdx2 = -1; + int bestIdx1 = -1;//, bestIdx2 = -1; const float* d1 = descriptors1.ptr(i); for( int j = 0; j < (int)keypoints2.size(); j++ ) @@ -203,14 +203,14 @@ static void findConstrainedCorrespondences(const Mat& _F, if( dist < bestDist1 ) { bestDist2 = bestDist1; - bestIdx2 = bestIdx1; + //bestIdx2 = bestIdx1; bestDist1 = dist; bestIdx1 = (int)j; } else if( dist < bestDist2 ) { bestDist2 = dist; - bestIdx2 = (int)j; + //bestIdx2 = (int)j; } } } diff --git a/samples/cpp/stitching_detailed.cpp b/samples/cpp/stitching_detailed.cpp index 39d6b24..a1dad69 100644 --- a/samples/cpp/stitching_detailed.cpp +++ b/samples/cpp/stitching_detailed.cpp @@ -632,7 +632,7 @@ int main(int argc, char* argv[]) Mat img_warped, img_warped_s; Mat dilated_mask, seam_mask, mask, mask_warped; Ptr blender; - double compose_seam_aspect = 1; + //double compose_seam_aspect = 1; double compose_work_aspect = 1; for (int img_idx = 0; img_idx < num_images; ++img_idx) @@ -648,7 +648,7 @@ int main(int argc, char* argv[]) is_compose_scale_set = true; // Compute relative scales - compose_seam_aspect = compose_scale / seam_scale; + //compose_seam_aspect = compose_scale / seam_scale; compose_work_aspect = compose_scale / work_scale; // Update warped image scale -- 2.7.4