if(tbb_need_GENERIC_DWORD_LOAD_STORE)
#needed by TBB 4.0 update 1,2; fixed in TBB 4.0 update 3 but it has 2 new problems
add_definitions(-D__TBB_USE_GENERIC_DWORD_LOAD_STORE=1)
+ set(tbb_need_GENERIC_DWORD_LOAD_STORE ON PARENT_SCOPE)
endif()
add_library(tbb STATIC ${lib_srcs} ${lib_hdrs} "${CMAKE_CURRENT_SOURCE_DIR}/android_additional.h" "${CMAKE_CURRENT_SOURCE_DIR}/${tbb_version_file}")
--- /dev/null
+# ===================================================================================
+# The OpenCV CMake configuration file
+#
+# ** File generated automatically, do not modify **
+#
+# Usage from an external project:
+# In your CMakeLists.txt, add these lines:
+#
+# FIND_PACKAGE(OpenCV REQUIRED)
+# TARGET_LINK_LIBRARIES(MY_TARGET_NAME ${OpenCV_LIBS})
+#
+# Or you can search for specific OpenCV modules:
+#
+# FIND_PACKAGE(OpenCV REQUIRED core highgui)
+#
+# If the module is found then OPENCV_<MODULE>_FOUND is set to TRUE.
+#
+# This file will define the following variables:
+# - OpenCV_LIBS : The list of libraries to links against.
+# - OpenCV_LIB_DIR : The directory(es) where lib files are. Calling LINK_DIRECTORIES
+# with this path is NOT needed.
+# - OpenCV_INCLUDE_DIRS : The OpenCV include directories.
+# - OpenCV_COMPUTE_CAPABILITIES : The version of compute capability
+# - OpenCV_ANDROID_NATIVE_API_LEVEL : Minimum required level of Android API
+# - OpenCV_VERSION : The version of this OpenCV build. Example: "2.4.0"
+# - OpenCV_VERSION_MAJOR : Major version part of OpenCV_VERSION. Example: "2"
+# - OpenCV_VERSION_MINOR : Minor version part of OpenCV_VERSION. Example: "4"
+# - OpenCV_VERSION_PATCH : Patch version part of OpenCV_VERSION. Example: "0"
+#
+# Advanced variables:
+# - OpenCV_SHARED
+# - OpenCV_CONFIG_PATH
+# - OpenCV_INSTALL_PATH (not set on Windows)
+# - OpenCV_LIB_COMPONENTS
+# - OpenCV_USE_MANGLED_PATHS
+# - OpenCV_HAVE_ANDROID_CAMERA
+#
+# ===================================================================================
+#
+# Windows pack specific options:
+# - OpenCV_STATIC
+# - OpenCV_CUDA
+
+if(NOT DEFINED OpenCV_STATIC)
+ # look for global setting
+ if(NOT DEFINED BUILD_SHARED_LIBS OR BUILD_SHARED_LIBS)
+ set(OpenCV_STATIC OFF)
+ else()
+ set(OpenCV_STATIC ON)
+ endif()
+endif()
+
+if(NOT DEFINED OpenCV_CUDA)
+ # if user' app uses CUDA, then it probably wants CUDA-enabled OpenCV binaries
+ if(CUDA_FOUND)
+ set(OpenCV_CUDA ON)
+ endif()
+endif()
+
+if(MSVC)
+ if(CMAKE_CL_64)
+ set(OpenCV_ARCH x64)
+ set(OpenCV_TBB_ARCH intel64)
+ else()
+ set(OpenCV_ARCH x86)
+ set(OpenCV_TBB_ARCH ia32)
+ endif()
+ if(MSVC_VERSION EQUAL 1400)
+ set(OpenCV_RUNTIME vc8)
+ elseif(MSVC_VERSION EQUAL 1500)
+ set(OpenCV_RUNTIME vc9)
+ elseif(MSVC_VERSION EQUAL 1600)
+ set(OpenCV_RUNTIME vc10)
+ elseif(MSVC_VERSION EQUAL 1700)
+ set(OpenCV_RUNTIME vc11)
+ endif()
+elseif(MINGW)
+ set(OpenCV_RUNTIME mingw)
+
+ execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpmachine
+ OUTPUT_VARIABLE OPENCV_GCC_TARGET_MACHINE
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+ if(CMAKE_OPENCV_GCC_TARGET_MACHINE MATCHES "64")
+ set(MINGW64 1)
+ set(OpenCV_ARCH x64)
+ else()
+ set(OpenCV_ARCH x86)
+ endif()
+endif()
+
+if(CMAKE_VERSION VERSION_GREATER 2.6.2)
+ unset(OpenCV_CONFIG_PATH CACHE)
+endif()
+
+get_filename_component(OpenCV_CONFIG_PATH "${CMAKE_CURRENT_LIST_FILE}" PATH CACHE)
+if(OpenCV_RUNTIME AND OpenCV_ARCH)
+ if(OpenCV_STATIC AND EXISTS "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}/staticlib/OpenCVConfig.cmake")
+ if(OpenCV_CUDA AND EXISTS "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}/staticlib/OpenCVConfig.cmake")
+ set(OpenCV_LIB_PATH "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}/staticlib")
+ else()
+ set(OpenCV_LIB_PATH "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}/staticlib")
+ endif()
+ elseif(EXISTS "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}/lib/OpenCVConfig.cmake")
+ if(OpenCV_CUDA AND EXISTS "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}/lib/OpenCVConfig.cmake")
+ set(OpenCV_LIB_PATH "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}/lib")
+ else()
+ set(OpenCV_LIB_PATH "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}/lib")
+ endif()
+ endif()
+endif()
+
+if(OpenCV_LIB_PATH AND EXISTS "${OpenCV_LIB_PATH}/OpenCVConfig.cmake")
+ set(OpenCV_LIB_DIR_OPT "${OpenCV_LIB_PATH}" CACHE PATH "Path where release OpenCV libraries are located" FORCE)
+ set(OpenCV_LIB_DIR_DBG "${OpenCV_LIB_PATH}" CACHE PATH "Path where debug OpenCV libraries are located" FORCE)
+ set(OpenCV_3RDPARTY_LIB_DIR_OPT "${OpenCV_LIB_PATH}" CACHE PATH "Path where release 3rdpaty OpenCV dependencies are located" FORCE)
+ set(OpenCV_3RDPARTY_LIB_DIR_DBG "${OpenCV_LIB_PATH}" CACHE PATH "Path where debug 3rdpaty OpenCV dependencies are located" FORCE)
+
+ include("${OpenCV_LIB_PATH}/OpenCVConfig.cmake")
+
+ if(OpenCV_CUDA)
+ set(_OpenCV_LIBS "")
+ foreach(_lib ${OpenCV_LIBS})
+ string(REPLACE "${OpenCV_CONFIG_PATH}/gpu/${OpenCV_ARCH}/${OpenCV_RUNTIME}" "${OpenCV_CONFIG_PATH}/${OpenCV_ARCH}/${OpenCV_RUNTIME}" _lib2 "${_lib}")
+ if(NOT EXISTS "${_lib}" AND EXISTS "${_lib2}")
+ list(APPEND _OpenCV_LIBS "${_lib2}")
+ else()
+ list(APPEND _OpenCV_LIBS "${_lib}")
+ endif()
+ endforeach()
+ set(OpenCV_LIBS ${_OpenCV_LIBS})
+ endif()
+ set(OpenCV_FOUND TRUE CACHE BOOL "" FORCE)
+ set(OPENCV_FOUND TRUE CACHE BOOL "" FORCE)
+
+ if(NOT OpenCV_FIND_QUIETLY)
+ message(STATUS "Found OpenCV ${OpenCV_VERSION} in ${OpenCV_LIB_PATH}")
+ if(NOT OpenCV_LIB_PATH MATCHES "/staticlib")
+ get_filename_component(_OpenCV_LIB_PATH "${OpenCV_LIB_PATH}/../bin" ABSOLUTE)
+ file(TO_NATIVE_PATH "${_OpenCV_LIB_PATH}" _OpenCV_LIB_PATH)
+ message(STATUS "You might need to add ${_OpenCV_LIB_PATH} to your PATH to be able to run your applications.")
+ if(OpenCV_LIB_PATH MATCHES "/gpu/")
+ string(REPLACE "\\gpu" "" _OpenCV_LIB_PATH2 "${_OpenCV_LIB_PATH}")
+ message(STATUS "GPU support is enabled so you might also need ${_OpenCV_LIB_PATH2} in your PATH (it must go after the ${_OpenCV_LIB_PATH}).")
+ endif()
+ endif()
+ endif()
+else()
+ if(NOT OpenCV_FIND_QUIETLY)
+ message(WARNING "Found OpenCV 2.4.0 Windows Super Pack but it has not binaries compatible with your configuration.
+ You should manually point CMake variable OpenCV_DIR to your build of OpenCV library.")
+ endif()
+ set(OpenCV_FOUND FALSE CACHE BOOL "" FORCE)
+ set(OPENCV_FOUND FALSE CACHE BOOL "" FORCE)
+endif()
+
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/tbb")
include_directories(SYSTEM ${TBB_INCLUDE_DIRS})
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} tbb)
- add_definitions(-DTBB_USE_GCC_BUILTINS=1 -D__TBB_GCC_BUILTIN_ATOMICS_PRESENT=1 -D__TBB_USE_GENERIC_DWORD_LOAD_STORE=1)
+ add_definitions(-DTBB_USE_GCC_BUILTINS=1 -D__TBB_GCC_BUILTIN_ATOMICS_PRESENT=1)
+ if(tbb_need_GENERIC_DWORD_LOAD_STORE)
+ add_definitions(-D__TBB_USE_GENERIC_DWORD_LOAD_STORE=1)
+ endif()
set(HAVE_TBB 1)
elseif(UNIX AND NOT APPLE)
PKG_CHECK_MODULES(TBB tbb)
list(APPEND ${__depsvar} "${d}")
endif()
endforeach()
+ unset(__depsvar)
+
+ ocv_list_unique(OPENCV_MODULE_${full_modname}_REQ_DEPS)
+ ocv_list_unique(OPENCV_MODULE_${full_modname}_OPT_DEPS)
- if(OPENCV_MODULE_${full_modname}_REQ_DEPS)
- list(REMOVE_DUPLICATES OPENCV_MODULE_${full_modname}_REQ_DEPS)
- endif()
- if(OPENCV_MODULE_${full_modname}_OPT_DEPS)
- list(REMOVE_DUPLICATES OPENCV_MODULE_${full_modname}_OPT_DEPS)
- endif()
set(OPENCV_MODULE_${full_modname}_REQ_DEPS ${OPENCV_MODULE_${full_modname}_REQ_DEPS} CACHE INTERNAL "Required dependencies of ${full_modname} module")
set(OPENCV_MODULE_${full_modname}_OPT_DEPS ${OPENCV_MODULE_${full_modname}_OPT_DEPS} CACHE INTERNAL "Optional dependencies of ${full_modname} module")
-
- unset(__depsvar)
endmacro()
# declare new OpenCV module in current folder
")
endif()
- #remember module details
if(NOT DEFINED the_description)
set(the_description "The ${name} OpenCV module")
endif()
- set(OPENCV_MODULE_${the_module}_DESCRIPTION "${the_description}" CACHE INTERNAL "Brief description of ${the_module} module")
- set(OPENCV_MODULE_${the_module}_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}" CACHE INTERNAL "Location of ${the_module} module sources")
- #create option to enable/disable this module
if(NOT DEFINED BUILD_${the_module}_INIT)
set(BUILD_${the_module}_INIT ON)
endif()
+
+ # create option to enable/disable this module
option(BUILD_${the_module} "Include ${the_module} module into the OpenCV build" ${BUILD_${the_module}_INIT})
+ # remember the module details
+ set(OPENCV_MODULE_${the_module}_DESCRIPTION "${the_description}" CACHE INTERNAL "Brief description of ${the_module} module")
+ set(OPENCV_MODULE_${the_module}_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}" CACHE INTERNAL "Location of ${the_module} module sources")
+
+ # parse list of dependencies
if("${ARGV1}" STREQUAL "INTERNAL" OR "${ARGV1}" STREQUAL "BINDINGS")
set(OPENCV_MODULE_${the_module}_CLASS "${ARGV1}" CACHE INTERNAL "The cathegory of the module")
set(__ocv_argn__ ${ARGN})
set(OPENCV_MODULES_DISABLED_USER ${OPENCV_MODULES_DISABLED_USER} "${the_module}" CACHE INTERNAL "List of OpenCV modules explicitly disabled by user")
endif()
- #TODO: add submodules if any
+ # TODO: add submodules if any
- #stop processing of current file
+ # stop processing of current file
return()
else(OPENCV_INITIAL_PASS)
if(NOT BUILD_${the_module})
- #extra protection from redefinition
- return()
+ return() # extra protection from redefinition
endif()
project(${the_module})
endif(OPENCV_INITIAL_PASS)
endmacro()
-# Internal macro; disables OpenCV module
-# ocv_module_turn_off(<module name>)
-macro(__ocv_module_turn_off the_module)
- list(APPEND OPENCV_MODULES_DISABLED_AUTO "${the_module}")
- list(REMOVE_ITEM OPENCV_MODULES_BUILD "${the_module}")
- list(REMOVE_ITEM OPENCV_MODULES_PUBLIC "${the_module}")
- set(HAVE_${the_module} OFF CACHE INTERNAL "Module ${the_module} can not be built in current configuration")
-endmacro()
-
+# excludes module from current configuration
macro(ocv_module_disable module)
set(__modname ${module})
if(NOT __modname MATCHES "^opencv_")
set(OPENCV_MODULE_${__modname}_LOCATION "${CMAKE_CURRENT_SOURCE_DIR}" CACHE INTERNAL "Location of ${__modname} module sources")
set(OPENCV_MODULES_DISABLED_FORCE "${OPENCV_MODULES_DISABLED_FORCE}" CACHE INTERNAL "List of OpenCV modules which can not be build in current configuration")
unset(__modname)
- return()#leave the current folder
+ return() # leave the current folder
endmacro()
+# Internal macro; partly disables OpenCV module
+macro(__ocv_module_turn_off the_module)
+ list(APPEND OPENCV_MODULES_DISABLED_AUTO "${the_module}")
+ list(REMOVE_ITEM OPENCV_MODULES_BUILD "${the_module}")
+ list(REMOVE_ITEM OPENCV_MODULES_PUBLIC "${the_module}")
+ set(HAVE_${the_module} OFF CACHE INTERNAL "Module ${the_module} can not be built in current configuration")
+endmacro()
+
+# Internal macro for dependencies tracking
macro(__ocv_flatten_module_required_dependencies the_module)
set(__flattened_deps "")
set(__resolved_deps "")
set(__req_depends ${OPENCV_MODULE_${the_module}_REQ_DEPS})
while(__req_depends)
- list(GET __req_depends 0 __dep)
- list(REMOVE_AT __req_depends 0)
+ ocv_list_pop_front(__req_depends __dep)
if(__dep STREQUAL the_module)
- #TODO: think how to deal with cyclic dependency
- __ocv_module_turn_off(${the_module})
+ __ocv_module_turn_off(${the_module}) # TODO: think how to deal with cyclic dependency
break()
- elseif("${OPENCV_MODULES_DISABLED_USER};${OPENCV_MODULES_DISABLED_AUTO}" MATCHES "(^|;)${__dep}(;|$)")
- #depends on disabled module
- __ocv_module_turn_off(${the_module})
+ elseif(";${OPENCV_MODULES_DISABLED_USER};${OPENCV_MODULES_DISABLED_AUTO};" MATCHES ";${__dep};")
+ __ocv_module_turn_off(${the_module}) # depends on disabled module
break()
- elseif("${OPENCV_MODULES_BUILD}" MATCHES "(^|;)${__dep}(;|$)")
- if(__resolved_deps MATCHES "(^|;)${__dep}(;|$)")
- #all dependencies of this module are already resolved
- list(APPEND __flattened_deps "${__dep}")
+ elseif(";${OPENCV_MODULES_BUILD};" MATCHES ";${__dep};")
+ if(";${__resolved_deps};" MATCHES ";${__dep};")
+ list(APPEND __flattened_deps "${__dep}") # all dependencies of this module are already resolved
else()
- #put all required subdependencies before this dependency and mark it as resolved
+ # put all required subdependencies before this dependency and mark it as resolved
list(APPEND __resolved_deps "${__dep}")
list(INSERT __req_depends 0 ${OPENCV_MODULE_${__dep}_REQ_DEPS} ${__dep})
endif()
elseif(__dep MATCHES "^opencv_")
- #depends on missing module
- __ocv_module_turn_off(${the_module})
+ __ocv_module_turn_off(${the_module}) # depends on missing module
+ message(WARNING "Unknown \"${__dep}\" module is listened in the dependencies of \"${the_module}\" module")
break()
else()
- #skip non-modules
+ # skip non-modules
endif()
endwhile()
set(OPENCV_MODULE_${the_module}_DEPS "")
endif()
- unset(__resolved_deps)
- unset(__flattened_deps)
- unset(__req_depends)
- unset(__dep)
+ ocv_clear_vars(__resolved_deps __flattened_deps __req_depends __dep)
endmacro()
+# Internal macro for dependencies tracking
macro(__ocv_flatten_module_optional_dependencies the_module)
- set(__flattened_deps ${OPENCV_MODULE_${the_module}_DEPS})
- set(__resolved_deps ${OPENCV_MODULE_${the_module}_DEPS})
- set(__opt_depends ${OPENCV_MODULE_${the_module}_OPT_DEPS})
+ set(__flattened_deps "")
+ set(__resolved_deps "")
+ set(__opt_depends ${OPENCV_MODULE_${the_module}_REQ_DEPS} ${OPENCV_MODULE_${the_module}_OPT_DEPS})
while(__opt_depends)
- list(GET __opt_depends 0 __dep)
- list(REMOVE_AT __opt_depends 0)
+ ocv_list_pop_front(__opt_depends __dep)
if(__dep STREQUAL the_module)
- #TODO: think how to deal with cyclic dependency
- __ocv_module_turn_off(${the_module})
+ __ocv_module_turn_off(${the_module}) # TODO: think how to deal with cyclic dependency
break()
- elseif("${OPENCV_MODULES_BUILD}" MATCHES "(^|;)${__dep}(;|$)")
- if(__resolved_deps MATCHES "(^|;)${__dep}(;|$)")
- #all dependencies of this module are already resolved
- list(APPEND __flattened_deps "${__dep}")
+ elseif(";${OPENCV_MODULES_BUILD};" MATCHES ";${__dep};")
+ if(";${__resolved_deps};" MATCHES ";${__dep};")
+ list(APPEND __flattened_deps "${__dep}") # all dependencies of this module are already resolved
else()
- #put all subdependencies before this dependency and mark it as resolved
+ # put all subdependencies before this dependency and mark it as resolved
list(APPEND __resolved_deps "${__dep}")
list(INSERT __opt_depends 0 ${OPENCV_MODULE_${__dep}_REQ_DEPS} ${OPENCV_MODULE_${__dep}_OPT_DEPS} ${__dep})
endif()
else()
- #skip non-modules or missing modules
+ # skip non-modules or missing modules
endif()
endwhile()
+
if(__flattened_deps)
list(REMOVE_DUPLICATES __flattened_deps)
set(OPENCV_MODULE_${the_module}_DEPS ${__flattened_deps})
set(OPENCV_MODULE_${the_module}_DEPS "")
endif()
- unset(__resolved_deps)
- unset(__flattened_deps)
- unset(__opt_depends)
- unset(__dep)
+ ocv_clear_vars(__resolved_deps __flattened_deps __opt_depends __dep)
endmacro()
macro(__ocv_flatten_module_dependencies)
set(HAVE_${m} OFF CACHE INTERNAL "Module ${m} will not be built in current configuration")
endforeach()
foreach(m ${OPENCV_MODULES_BUILD})
- set(HAVE_${m} ON CACHE INTERNAL "Module ${m} will not be built in current configuration")
+ set(HAVE_${m} ON CACHE INTERNAL "Module ${m} will be built in current configuration")
__ocv_flatten_module_required_dependencies(${m})
endforeach()
foreach(m ${OPENCV_MODULES_BUILD})
__ocv_flatten_module_optional_dependencies(${m})
- #dependencies from other modules
+ # save dependencies from other modules
set(OPENCV_MODULE_${m}_DEPS ${OPENCV_MODULE_${m}_DEPS} CACHE INTERNAL "Flattened dependencies of ${m} module")
- #extra dependencies
+ # save extra dependencies
set(OPENCV_MODULE_${m}_DEPS_EXT ${OPENCV_MODULE_${m}_REQ_DEPS} ${OPENCV_MODULE_${m}_OPT_DEPS})
if(OPENCV_MODULE_${m}_DEPS_EXT AND OPENCV_MODULE_${m}_DEPS)
list(REMOVE_ITEM OPENCV_MODULE_${m}_DEPS_EXT ${OPENCV_MODULE_${m}_DEPS})
set(OPENCV_MODULE_${m}_DEPS_EXT ${OPENCV_MODULE_${m}_DEPS_EXT} CACHE INTERNAL "Extra dependencies of ${m} module")
endforeach()
+ # order modules by dependencies
+ set(OPENCV_MODULES_BUILD_ "")
+ foreach(m ${OPENCV_MODULES_BUILD})
+ list(APPEND OPENCV_MODULES_BUILD_ ${OPENCV_MODULE_${m}_DEPS} ${m})
+ endforeach()
+ ocv_list_unique(OPENCV_MODULES_BUILD_)
+
set(OPENCV_MODULES_PUBLIC ${OPENCV_MODULES_PUBLIC} CACHE INTERNAL "List of OpenCV modules marked for export")
- set(OPENCV_MODULES_BUILD ${OPENCV_MODULES_BUILD} CACHE INTERNAL "List of OpenCV modules included into the build")
+ set(OPENCV_MODULES_BUILD ${OPENCV_MODULES_BUILD_} CACHE INTERNAL "List of OpenCV modules included into the build")
set(OPENCV_MODULES_DISABLED_AUTO ${OPENCV_MODULES_DISABLED_AUTO} CACHE INTERNAL "List of OpenCV modules implicitly disabled due to dependencies")
endmacro()
endif()
set(__directories_observed "")
- #collect modules
+ # collect modules
set(OPENCV_INITIAL_PASS ON)
foreach(__path ${ARGN})
ocv_get_real_path(__path "${__path}")
+
list(FIND __directories_observed "${__path}" __pathIdx)
if(__pathIdx GREATER -1)
message(FATAL_ERROR "The directory ${__path} is observed for OpenCV modules second time.")
foreach(mod ${__ocvmodules})
ocv_get_real_path(__modpath "${__path}/${mod}")
if(EXISTS "${__modpath}/CMakeLists.txt")
+
list(FIND __directories_observed "${__modpath}" __pathIdx)
if(__pathIdx GREATER -1)
message(FATAL_ERROR "The module from ${__modpath} is already loaded.")
endif()
list(APPEND __directories_observed "${__modpath}")
- add_subdirectory("${__modpath}" "${CMAKE_CURRENT_BINARY_DIR}/${mod}/.${mod}")
+ if(OCV_MODULE_RELOCATE_ON_INITIAL_PASS)
+ file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${mod}/.${mod}")
+ file(COPY "${__modpath}/CMakeLists.txt" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/${mod}/.${mod}")
+ add_subdirectory("${CMAKE_CURRENT_BINARY_DIR}/${mod}/.${mod}" "${CMAKE_CURRENT_BINARY_DIR}/${mod}/.${mod}")
+ if("${OPENCV_MODULE_opencv_${mod}_LOCATION}" STREQUAL "${CMAKE_CURRENT_BINARY_DIR}/${mod}/.${mod}")
+ set(OPENCV_MODULE_opencv_${mod}_LOCATION "${__modpath}" CACHE PATH "" FORCE)
+ endif()
+ else()
+ add_subdirectory("${__modpath}" "${CMAKE_CURRENT_BINARY_DIR}/${mod}/.${mod}")
+ endif()
endif()
endforeach()
endif()
endforeach()
- unset(__ocvmodules)
- unset(__directories_observed)
- unset(__path)
- unset(__modpath)
- unset(__pathIdx)
+ ocv_clear_vars(__ocvmodules __directories_observed __path __modpath __pathIdx)
- #resolve dependencies
+ # resolve dependencies
__ocv_flatten_module_dependencies()
- #order modules by dependencies
- set(OPENCV_MODULES_BUILD_ "")
- foreach(m ${OPENCV_MODULES_BUILD})
- list(APPEND OPENCV_MODULES_BUILD_ ${OPENCV_MODULE_${m}_DEPS} ${m})
- endforeach()
- ocv_list_unique(OPENCV_MODULES_BUILD_)
-
- #create modules
+ # create modules
set(OPENCV_INITIAL_PASS OFF PARENT_SCOPE)
set(OPENCV_INITIAL_PASS OFF)
- foreach(m ${OPENCV_MODULES_BUILD_})
+ foreach(m ${OPENCV_MODULES_BUILD})
if(m MATCHES "^opencv_")
string(REGEX REPLACE "^opencv_" "" __shortname "${m}")
add_subdirectory("${OPENCV_MODULE_${m}_LOCATION}" "${CMAKE_CURRENT_BINARY_DIR}/${__shortname}")
macro(ocv_module_include_directories)
ocv_include_directories("${OPENCV_MODULE_${the_module}_LOCATION}/include"
"${OPENCV_MODULE_${the_module}_LOCATION}/src"
- "${CMAKE_CURRENT_BINARY_DIR}"#for precompiled headers
+ "${CMAKE_CURRENT_BINARY_DIR}" # for precompiled headers
)
ocv_include_modules(${OPENCV_MODULE_${the_module}_DEPS} ${ARGN})
endmacro()
endif()
# use full paths for module to be independent from the module location
- ocv_to_full_paths(OPENCV_MODULE_${the_module}_HEADERS)
+ ocv_convert_to_full_paths(OPENCV_MODULE_${the_module}_HEADERS)
set(OPENCV_MODULE_${the_module}_HEADERS ${OPENCV_MODULE_${the_module}_HEADERS} CACHE INTERNAL "List of header files for ${the_module}")
set(OPENCV_MODULE_${the_module}_SOURCES ${OPENCV_MODULE_${the_module}_SOURCES} CACHE INTERNAL "List of source files for ${the_module}")
# ocv_create_module(SKIP_LINK)
macro(ocv_create_module)
add_library(${the_module} ${OPENCV_MODULE_TYPE} ${OPENCV_MODULE_${the_module}_HEADERS} ${OPENCV_MODULE_${the_module}_SOURCES})
+
if(NOT "${ARGN}" STREQUAL "SKIP_LINK")
target_link_libraries(${the_module} ${OPENCV_MODULE_${the_module}_DEPS} ${OPENCV_MODULE_${the_module}_DEPS_EXT} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${ARGN})
endif()
+
add_dependencies(opencv_modules ${the_module})
if(ENABLE_SOLUTION_FOLDERS)
if(CMAKE_CROSSCOMPILING)
set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:secchk")
endif()
- set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:libc /DEBUG")
+ set_target_properties(${the_module} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:libc /DEBUG")
endif()
install(TARGETS ${the_module}
)
# only "public" headers need to be installed
- if(OPENCV_MODULE_${the_module}_HEADERS AND OPENCV_MODULES_PUBLIC MATCHES "(^|;)${the_module}(;|$)")
+ if(OPENCV_MODULE_${the_module}_HEADERS AND ";${OPENCV_MODULES_PUBLIC};" MATCHES ";${the_module};")
foreach(hdr ${OPENCV_MODULE_${the_module}_HEADERS})
string(REGEX REPLACE "^.*opencv2/" "opencv2/" hdr2 "${hdr}")
if(hdr2 MATCHES "^(opencv2/.*)/[^/]+.h(..)?$")
# Usage:
# ocv_add_precompiled_headers(${the_module})
macro(ocv_add_precompiled_headers the_target)
- if("${the_target}" MATCHES "^opencv_test_.*$")
- SET(pch_path "test/test_")
+ if("${the_target}" MATCHES "^opencv_test_.*$")
+ SET(pch_path "test/test_")
elseif("${the_target}" MATCHES "opencv_perf_gpu_cpu")
- SET(pch_path "perf_cpu/perf_cpu_")
- elseif("${the_target}" MATCHES "^opencv_perf_.*$")
- SET(pch_path "perf/perf_")
- else()
- SET(pch_path "src/")
- endif()
- set(pch_header "${CMAKE_CURRENT_SOURCE_DIR}/${pch_path}precomp.hpp")
-
- if(PCHSupport_FOUND AND ENABLE_PRECOMPILED_HEADERS AND EXISTS "${pch_header}")
- if(CMAKE_GENERATOR MATCHES Visual)
- set(${the_target}_pch "${CMAKE_CURRENT_SOURCE_DIR}/${pch_path}precomp.cpp")
- add_native_precompiled_header(${the_target} ${pch_header})
- elseif(CMAKE_GENERATOR MATCHES Xcode)
- add_native_precompiled_header(${the_target} ${pch_header})
- elseif(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_GENERATOR MATCHES "Makefiles|Ninja")
- add_precompiled_header(${the_target} ${pch_header})
- endif()
- endif()
- unset(pch_header)
- unset(pch_path)
- unset(${the_target}_pch)
+ SET(pch_path "perf_cpu/perf_cpu_")
+ elseif("${the_target}" MATCHES "^opencv_perf_.*$")
+ SET(pch_path "perf/perf_")
+ else()
+ SET(pch_path "src/")
+ endif()
+ ocv_add_precompiled_header_to_target(${the_target} "${CMAKE_CURRENT_SOURCE_DIR}/${pch_path}precomp.hpp")
+ unset(pch_path)
endmacro()
# short command for adding simple OpenCV module
endforeach()
endmacro()
-#auxiliary macro to parse arguments of ocv_add_accuracy_tests and ocv_add_perf_tests commands
+# auxiliary macro to parse arguments of ocv_add_accuracy_tests and ocv_add_perf_tests commands
macro(__ocv_parse_test_sources tests_type)
set(OPENCV_${tests_type}_${the_module}_SOURCES "")
set(OPENCV_${tests_type}_${the_module}_DEPS "")
# this is a command for adding OpenCV performance tests to the module
# ocv_add_perf_tests(<extra_dependencies>)
-macro(ocv_add_perf_tests)
+function(ocv_add_perf_tests)
set(perf_path "${CMAKE_CURRENT_SOURCE_DIR}/perf")
if(BUILD_PERF_TESTS AND EXISTS "${perf_path}")
__ocv_parse_test_sources(PERF ${ARGN})
if(OCV_DEPENDENCIES_FOUND)
set(the_target "opencv_perf_${name}")
- #project(${the_target})
+ # project(${the_target})
ocv_module_include_directories(${perf_deps} "${perf_path}")
add_dependencies(perf ${the_target})
endif()
else(OCV_DEPENDENCIES_FOUND)
- #TODO: warn about unsatisfied dependencies
+ # TODO: warn about unsatisfied dependencies
endif(OCV_DEPENDENCIES_FOUND)
endif()
-endmacro()
+endfunction()
# this is a command for adding OpenCV accuracy/regression tests to the module
# ocv_add_accuracy_tests([FILES <source group name> <list of sources>] [DEPENDS_ON] <list of extra dependencies>)
-macro(ocv_add_accuracy_tests)
+function(ocv_add_accuracy_tests)
set(test_path "${CMAKE_CURRENT_SOURCE_DIR}/test")
ocv_check_dependencies(${test_deps})
if(BUILD_TESTS AND EXISTS "${test_path}")
if(OCV_DEPENDENCIES_FOUND)
set(the_target "opencv_test_${name}")
- #project(${the_target})
+ # project(${the_target})
ocv_module_include_directories(${test_deps} "${test_path}")
ocv_add_precompiled_headers(${the_target})
else(OCV_DEPENDENCIES_FOUND)
- #TODO: warn about unsatisfied dependencies
+ # TODO: warn about unsatisfied dependencies
endif(OCV_DEPENDENCIES_FOUND)
endif()
-endmacro()
+endfunction()
-# internal macro; finds all link dependencies of module
+# internal macro; finds all link dependencies of the module
# should be used at the end of CMake processing
macro(__ocv_track_module_link_dependencies the_module optkind)
set(${the_module}_MODULE_DEPS_${optkind} "")
#ocv_list_reverse(${the_module}_EXTRA_DEPS_${optkind})
if(__has_cycle)
- #not sure if it can work
+ # not sure if it can work
list(APPEND ${the_module}_MODULE_DEPS_${optkind} "${the_module}")
endif()
endif()#STATIC_LIBRARY
unset(__module_type)
-#message("${the_module}_MODULE_DEPS_${optkind}")
-#message(" ${${the_module}_MODULE_DEPS_${optkind}}")
-#message(" ${OPENCV_MODULE_${the_module}_DEPS}")
-#message("")
-#message("${the_module}_EXTRA_DEPS_${optkind}")
-#message(" ${${the_module}_EXTRA_DEPS_${optkind}}")
-#message("")
+ #message("${the_module}_MODULE_DEPS_${optkind}")
+ #message(" ${${the_module}_MODULE_DEPS_${optkind}}")
+ #message(" ${OPENCV_MODULE_${the_module}_DEPS}")
+ #message("")
+ #message("${the_module}_EXTRA_DEPS_${optkind}")
+ #message(" ${${the_module}_EXTRA_DEPS_${optkind}}")
+ #message("")
endmacro()
# creates lists of build dependencies needed for external projects
-# taken from http://www.vtk.org/Bug/view.php?id=1260 and slightly adjusted
+# taken from http://public.kitware.com/Bug/view.php?id=1260 and slightly adjusted
# - Try to find precompiled headers support for GCC 3.4 and 4.x
# Once done this will define:
SET(_PCH_include_prefix "-I")
SET(_PCH_isystem_prefix "-isystem")
-ELSEIF(WIN32)
- SET(PCHSupport_FOUND TRUE) # for experimental msvc support
+ELSEIF(CMAKE_GENERATOR MATCHES "^Visual.*$")
+ SET(PCHSupport_FOUND TRUE)
SET(_PCH_include_prefix "/I")
SET(_PCH_isystem_prefix "/I")
ELSE()
endif()
ENDMACRO(ADD_NATIVE_PRECOMPILED_HEADER)
+
+macro(ocv_add_precompiled_header_to_target the_target pch_header)
+ if(PCHSupport_FOUND AND ENABLE_PRECOMPILED_HEADERS AND EXISTS "${pch_header}")
+ if(CMAKE_GENERATOR MATCHES Visual)
+ string(REGEX REPLACE "hpp$" "cpp" ${the_target}_pch "${pch_header}")
+ add_native_precompiled_header(${the_target} ${pch_header})
+ unset(${the_target}_pch)
+ elseif(CMAKE_GENERATOR MATCHES Xcode)
+ add_native_precompiled_header(${the_target} ${pch_header})
+ elseif(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_GENERATOR MATCHES "Makefiles|Ninja")
+ add_precompiled_header(${the_target} ${pch_header})
+ endif()
+ endif()
+endmacro()
endmacro()
+# gets and removes the first element from list
+macro(ocv_list_pop_front LST VAR)
+ if(${LST})
+ list(GET ${LST} 0 ${VAR})
+ list(REMOVE_AT ${LST} 0)
+ else()
+ set(${VAR} "")
+ endif()
+endmacro()
+
+
# simple regex escaping routine (does not cover all cases!!!)
macro(ocv_regex_escape var regex)
string(REGEX REPLACE "([+.*^$])" "\\\\1" ${var} "${regex}")
# convert list of paths to full paths
-macro(ocv_to_full_paths VAR)
+macro(ocv_convert_to_full_paths VAR)
if(${VAR})
set(__tmp "")
foreach(path ${${VAR}})
#. **Eclipse IDE**
Check the `Android SDK System Requirements <http://developer.android.com/sdk/requirements.html>`_ document for a list of Eclipse versions that are compatible with the Android SDK.
- For OpenCV 2.4.0 we recommend Eclipse 3.7 (Indigo) or later versions. They work well for OpenCV under both Windows and Linux.
+ For OpenCV 2.4.x we recommend Eclipse 3.7 (Indigo) or later versions. They work well for OpenCV under both Windows and Linux.
If you have no Eclipse installed, you can get it from the `download page <http://www.eclipse.org/downloads/>`_.
.. code-block:: bash
- tar -jxvf ~/Downloads/OpenCV-2.4.0-android-bin.tar.bz2
+ tar -jxvf ~/Downloads/OpenCV-2.4.1-android-bin2.tar.bz2
For this tutorial I have unpacked OpenCV to the :file:`C:\\Work\\android-opencv\\` directory.
-.. |opencv_android_bin_pack| replace:: OpenCV-2.4.0-android-bin.tar.bz2
-.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.0/OpenCV-2.4.0-android-bin.tar.bz2/download
+.. |opencv_android_bin_pack| replace:: OpenCV-2.4.1-android-bin2.tar.bz2
+.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.1/OpenCV-2.4.1-android-bin2.tar.bz2/download
.. |opencv_android_bin_pack_url| replace:: |opencv_android_bin_pack|
.. |seven_zip| replace:: 7-Zip
.. _seven_zip: http://www.7-zip.org/
:align: center
* Click :guilabel:`OK` to close preferences dialog.
-
+
#. Import OpenCV and samples into workspace.
OpenCV library is packed as a ready-for-use `Android Library Project
#include <iostream>
#if defined(HAVE_EIGEN) && EIGEN_WORLD_VERSION == 3
-#include <Eigen/Core>
-#include <unsupported/Eigen/MatrixFunctions>
-
-#include <Eigen/Dense>
+# include <Eigen/Core>
+# ifdef ANDROID
+ template <typename Scalar> Scalar log2(Scalar v) { using std::log; return log(v)/log(Scalar(2)); }
+# endif
+# include <unsupported/Eigen/MatrixFunctions>
+# include <Eigen/Dense>
#endif
#include <limits>
const double fy = levelCameraMatrix.at<double>(1,1);
const double determinantThreshold = 1e-6;
- Mat corresps( levelImage0.size(), levelImage0.type(), CV_32SC1 );
+ Mat corresps( levelImage0.size(), levelImage0.type() );
// Run transformation search on current level iteratively.
for( int iter = 0; iter < (*iterCountsPtr)[level]; iter ++ )
template<typename _Tp, int m, int n> class CV_EXPORTS Matx;
typedef std::string String;
-typedef std::basic_string<wchar_t> WString;
class Mat;
class SparseMat;
template<typename _Tp> class CV_EXPORTS MatConstIterator_;
template<typename _Tp> class CV_EXPORTS MatCommaInitializer_;
+#if !defined(ANDROID) || (defined(_GLIBCXX_USE_WCHAR_T) && _GLIBCXX_USE_WCHAR_T)
+typedef std::basic_string<wchar_t> WString;
+
CV_EXPORTS string fromUtf16(const WString& str);
CV_EXPORTS WString toUtf16(const string& str);
+#endif
CV_EXPORTS string format( const char* fmt, ... );
CV_EXPORTS string tempfile( const char* suffix CV_DEFAULT(0));
namespace cv
{
-#if !defined(ANDROID) || defined(_GLIBCXX_USE_WCHAR_T)
+#if !defined(ANDROID) || (defined(_GLIBCXX_USE_WCHAR_T) && _GLIBCXX_USE_WCHAR_T)
string fromUtf16(const WString& str)
{
cv::AutoBuffer<char> _buf(str.size()*4 + 1);
--- /dev/null
+#include "perf_precomp.hpp"\r
+\r
+#ifdef HAVE_CUDA\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Merge\r
+\r
+GPU_PERF_TEST(Merge, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int channels = GET_PARAM(3);\r
+\r
+ std::vector<cv::gpu::GpuMat> src(channels);\r
+ for (int i = 0; i < channels; ++i)\r
+ src[i] = cv::gpu::GpuMat(size, depth, cv::Scalar::all(i));\r
+\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::merge(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::merge(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Merge, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F),\r
+ testing::Values<Channels>(2, 3, 4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Split\r
+\r
+GPU_PERF_TEST(Split, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int channels = GET_PARAM(3);\r
+\r
+ cv::gpu::GpuMat src(size, CV_MAKE_TYPE(depth, channels), cv::Scalar(1, 2, 3, 4));\r
+\r
+ std::vector<cv::gpu::GpuMat> dst;\r
+\r
+ cv::gpu::split(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::split(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Split, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F),\r
+ testing::Values<Channels>(2, 3, 4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Add_Mat\r
+\r
+GPU_PERF_TEST(Add_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0.0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::add(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::add(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Add_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Add_Scalar\r
+\r
+GPU_PERF_TEST(Add_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar s(1, 2, 3, 4);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::add(src, s, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::add(src, s, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Add_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Subtract_Mat\r
+\r
+GPU_PERF_TEST(Subtract_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0.0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::subtract(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::subtract(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Subtract_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Subtract_Scalar\r
+\r
+GPU_PERF_TEST(Subtract_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar s(1, 2, 3, 4);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::subtract(src, s, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::subtract(src, s, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Subtract_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Multiply_Mat\r
+\r
+GPU_PERF_TEST(Multiply_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0.0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::multiply(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::multiply(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Multiply_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Multiply_Scalar\r
+\r
+GPU_PERF_TEST(Multiply_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar s(1, 2, 3, 4);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::multiply(src, s, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::multiply(src, s, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Multiply_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Divide_Mat\r
+\r
+GPU_PERF_TEST(Divide_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0.0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::divide(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::divide(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Divide_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Divide_Scalar\r
+\r
+GPU_PERF_TEST(Divide_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar s(1, 2, 3, 4);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::divide(src, s, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::divide(src, s, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Divide_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Divide_Scalar_Inv\r
+\r
+GPU_PERF_TEST(Divide_Scalar_Inv, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ double scale = 100.0;\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::divide(scale, src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::divide(scale, src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Divide_Scalar_Inv, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// AbsDiff_Mat\r
+\r
+GPU_PERF_TEST(AbsDiff_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0.0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::absdiff(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::absdiff(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, AbsDiff_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// AbsDiff_Scalar\r
+\r
+GPU_PERF_TEST(AbsDiff_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar s(1, 2, 3, 4);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::absdiff(src, s, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::absdiff(src, s, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, AbsDiff_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Abs\r
+\r
+GPU_PERF_TEST(Abs, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::abs(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::abs(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Abs, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_16S, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Sqr\r
+\r
+GPU_PERF_TEST(Sqr, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::sqr(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::sqr(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Sqr, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16S, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Sqrt\r
+\r
+GPU_PERF_TEST(Sqrt, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::sqrt(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::sqrt(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Sqrt, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16S, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Log\r
+\r
+GPU_PERF_TEST(Log, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 1.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::log(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::log(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Log, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16S, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Exp\r
+\r
+GPU_PERF_TEST(Exp, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 1.0, 10.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::exp(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::exp(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Exp, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16S, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Pow\r
+\r
+GPU_PERF_TEST(Pow, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 1.0, 10.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::pow(src, 2.3, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::pow(src, 2.3, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Pow, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16S, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Compare_Mat\r
+\r
+CV_ENUM(CmpCode, cv::CMP_EQ, cv::CMP_GT, cv::CMP_GE, cv::CMP_LT, cv::CMP_LE, cv::CMP_NE)\r
+#define ALL_CMP_CODES testing::Values(CmpCode(cv::CMP_EQ), CmpCode(cv::CMP_NE), CmpCode(cv::CMP_GT), CmpCode(cv::CMP_GE), CmpCode(cv::CMP_LT), CmpCode(cv::CMP_LE))\r
+\r
+GPU_PERF_TEST(Compare_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth, CmpCode)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int cmp_code = GET_PARAM(3);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::compare(src1, src2, dst, cmp_code);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::compare(src1, src2, dst, cmp_code);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Compare_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F),\r
+ ALL_CMP_CODES));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Compare_Scalar\r
+\r
+GPU_PERF_TEST(Compare_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth, CmpCode)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int cmp_code = GET_PARAM(3);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar s = cv::Scalar::all(50);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::compare(src, s, dst, cmp_code);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::compare(src, s, dst, cmp_code);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Compare_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F),\r
+ ALL_CMP_CODES));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Bitwise_Not\r
+\r
+GPU_PERF_TEST(Bitwise_Not, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::bitwise_not(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::bitwise_not(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Bitwise_Not, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Bitwise_And_Mat\r
+\r
+GPU_PERF_TEST(Bitwise_And_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::bitwise_and(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::bitwise_and(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Bitwise_And_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Bitwise_And_Scalar\r
+\r
+GPU_PERF_TEST(Bitwise_And_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int channels = GET_PARAM(3);\r
+\r
+ int type = CV_MAKE_TYPE(depth, channels);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar s = cv::Scalar(50, 50, 50, 50);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::bitwise_and(src, s, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::bitwise_and(src, s, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Bitwise_And_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S),\r
+ testing::Values<Channels>(1, 3, 4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Bitwise_Or_Mat\r
+\r
+GPU_PERF_TEST(Bitwise_Or_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::bitwise_or(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::bitwise_or(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Bitwise_Or_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Bitwise_Or_Scalar\r
+\r
+GPU_PERF_TEST(Bitwise_Or_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int channels = GET_PARAM(3);\r
+\r
+ int type = CV_MAKE_TYPE(depth, channels);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar s = cv::Scalar(50, 50, 50, 50);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::bitwise_or(src, s, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::bitwise_or(src, s, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Bitwise_Or_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S),\r
+ testing::Values<Channels>(1, 3, 4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Bitwise_Xor_Mat\r
+\r
+GPU_PERF_TEST(Bitwise_Xor_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::bitwise_xor(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::bitwise_xor(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Bitwise_Xor_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Bitwise_Xor_Scalar\r
+\r
+GPU_PERF_TEST(Bitwise_Xor_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int channels = GET_PARAM(3);\r
+\r
+ int type = CV_MAKE_TYPE(depth, channels);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar s = cv::Scalar(50, 50, 50, 50);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::bitwise_xor(src, s, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::bitwise_xor(src, s, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Bitwise_Xor_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S),\r
+ testing::Values<Channels>(1, 3, 4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// RShift\r
+\r
+GPU_PERF_TEST(RShift, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int channels = GET_PARAM(3);\r
+\r
+ int type = CV_MAKE_TYPE(depth, channels);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar_<int> val = cv::Scalar_<int>::all(4);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::rshift(src, val, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::rshift(src, val, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, RShift, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S),\r
+ testing::Values<Channels>(1, 3, 4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// LShift\r
+\r
+GPU_PERF_TEST(LShift, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int channels = GET_PARAM(3);\r
+\r
+ int type = CV_MAKE_TYPE(depth, channels);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar_<int> val = cv::Scalar_<int>::all(4);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::lshift(src, val, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::lshift(src, val, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, LShift, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S),\r
+ testing::Values<Channels>(1, 3, 4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Min_Mat\r
+\r
+GPU_PERF_TEST(Min_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0, 255.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0, 255.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::min(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::min(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Min_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Min_Scalar\r
+\r
+GPU_PERF_TEST(Min_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ double val = 50.0;\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::min(src, val, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::min(src, val, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Min_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Max_Mat\r
+\r
+GPU_PERF_TEST(Max_Mat, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, depth);\r
+ fill(src1_host, 0, 255.0);\r
+\r
+ cv::Mat src2_host(size, depth);\r
+ fill(src2_host, 0, 255.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::max(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::max(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Max_Mat, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Max_Scalar\r
+\r
+GPU_PERF_TEST(Max_Scalar, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ double val = 50.0;\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::max(src, val, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::max(src, val, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Max_Scalar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// AddWeighted\r
+\r
+GPU_PERF_TEST(AddWeighted, cv::gpu::DeviceInfo, cv::Size, MatDepth, MatDepth, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth1 = GET_PARAM(2);\r
+ int depth2 = GET_PARAM(3);\r
+ int dst_depth = GET_PARAM(4);\r
+\r
+ cv::Mat src1_host(size, depth1);\r
+ fill(src1_host, 0, 100.0);\r
+\r
+ cv::Mat src2_host(size, depth2);\r
+ fill(src2_host, 0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::addWeighted(src1, 0.5, src2, 0.5, 10.0, dst, dst_depth);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::addWeighted(src1, 0.5, src2, 0.5, 10.0, dst, dst_depth);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, AddWeighted, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F),\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F),\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// GEMM\r
+#ifdef HAVE_CUBLAS\r
+\r
+CV_FLAGS(GemmFlags, 0, cv::GEMM_1_T, cv::GEMM_2_T, cv::GEMM_3_T)\r
+#define ALL_GEMM_FLAGS testing::Values(GemmFlags(0), GemmFlags(cv::GEMM_1_T), GemmFlags(cv::GEMM_2_T), GemmFlags(cv::GEMM_3_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_3_T), GemmFlags(cv::GEMM_1_T | cv::GEMM_2_T | cv::GEMM_3_T))\r
+\r
+GPU_PERF_TEST(GEMM, cv::gpu::DeviceInfo, cv::Size, MatType, GemmFlags)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int type = GET_PARAM(2);\r
+ int flags = GET_PARAM(3);\r
+\r
+ cv::Mat src1_host(size, type);\r
+ fill(src1_host, 0.0, 10.0);\r
+\r
+ cv::Mat src2_host(size, type);\r
+ fill(src2_host, 0.0, 10.0);\r
+\r
+ cv::Mat src3_host(size, type);\r
+ fill(src3_host, 0.0, 10.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat src3(src3_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::gemm(src1, src2, 1.0, src3, 1.0, dst, flags);\r
+\r
+ declare.time(5.0);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::gemm(src1, src2, 1.0, src3, 1.0, dst, flags);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, GEMM, testing::Combine(\r
+ ALL_DEVICES,\r
+ testing::Values(cv::Size(512, 512), cv::Size(1024, 1024)),\r
+ testing::Values<MatType>(CV_32FC1, CV_32FC2, CV_64FC1, CV_64FC2),\r
+ ALL_GEMM_FLAGS));\r
+\r
+#endif\r
+//////////////////////////////////////////////////////////////////////\r
+// Transpose\r
+\r
+GPU_PERF_TEST(Transpose, cv::gpu::DeviceInfo, cv::Size, MatType)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int type = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::transpose(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::transpose(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Transpose, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatType>(CV_8UC1, CV_8UC4, CV_16UC2, CV_16SC2, CV_32SC1, CV_32SC2, CV_64FC1)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Flip\r
+\r
+enum {FLIP_BOTH = 0, FLIP_X = 1, FLIP_Y = -1};\r
+CV_ENUM(FlipCode, FLIP_BOTH, FLIP_X, FLIP_Y)\r
+#define ALL_FLIP_CODES testing::Values(FlipCode(FLIP_BOTH), FlipCode(FLIP_X), FlipCode(FLIP_Y))\r
+\r
+GPU_PERF_TEST(Flip, cv::gpu::DeviceInfo, cv::Size, MatType, FlipCode)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int type = GET_PARAM(2);\r
+ int flipCode = GET_PARAM(3);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::flip(src, dst, flipCode);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::flip(src, dst, flipCode);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Flip, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatType>(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_32FC1, CV_32FC3, CV_32FC4),\r
+ ALL_FLIP_CODES));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// LUT_OneChannel\r
+\r
+GPU_PERF_TEST(LUT_OneChannel, cv::gpu::DeviceInfo, cv::Size, MatType)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int type = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::Mat lut(1, 256, CV_8UC1);\r
+ fill(lut, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::LUT(src, lut, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::LUT(src, lut, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, LUT_OneChannel, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatType>(CV_8UC1, CV_8UC3)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// LUT_MultiChannel\r
+\r
+GPU_PERF_TEST(LUT_MultiChannel, cv::gpu::DeviceInfo, cv::Size, MatType)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int type = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0.0, 100.0);\r
+\r
+ cv::Mat lut(1, 256, CV_MAKE_TYPE(CV_8U, src_host.channels()));\r
+ fill(lut, 0.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::LUT(src, lut, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::LUT(src, lut, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, LUT_MultiChannel, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatType>(CV_8UC3)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Magnitude_Complex\r
+\r
+GPU_PERF_TEST(Magnitude_Complex, cv::gpu::DeviceInfo, cv::Size)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+\r
+ cv::Mat src_host(size, CV_32FC2);\r
+ fill(src_host, -100.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::magnitude(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::magnitude(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Magnitude_Complex, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Magnitude_Sqr_Complex\r
+\r
+GPU_PERF_TEST(Magnitude_Sqr_Complex, cv::gpu::DeviceInfo, cv::Size)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+\r
+ cv::Mat src_host(size, CV_32FC2);\r
+ fill(src_host, -100.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::magnitudeSqr(src, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::magnitudeSqr(src, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Magnitude_Sqr_Complex, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Magnitude\r
+\r
+GPU_PERF_TEST(Magnitude, cv::gpu::DeviceInfo, cv::Size)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+\r
+ cv::Mat src1_host(size, CV_32FC1);\r
+ fill(src1_host, -100.0, 100.0);\r
+\r
+ cv::Mat src2_host(size, CV_32FC1);\r
+ fill(src2_host, -100.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::magnitude(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::magnitude(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Magnitude, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Magnitude_Sqr\r
+\r
+GPU_PERF_TEST(Magnitude_Sqr, cv::gpu::DeviceInfo, cv::Size)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+\r
+ cv::Mat src1_host(size, CV_32FC1);\r
+ fill(src1_host, -100.0, 100.0);\r
+\r
+ cv::Mat src2_host(size, CV_32FC1);\r
+ fill(src2_host, -100.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::magnitudeSqr(src1, src2, dst);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::magnitudeSqr(src1, src2, dst);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Magnitude_Sqr, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Phase\r
+\r
+IMPLEMENT_PARAM_CLASS(AngleInDegrees, bool)\r
+\r
+GPU_PERF_TEST(Phase, cv::gpu::DeviceInfo, cv::Size, AngleInDegrees)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ bool angleInDegrees = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, CV_32FC1);\r
+ fill(src1_host, -100.0, 100.0);\r
+\r
+ cv::Mat src2_host(size, CV_32FC1);\r
+ fill(src2_host, -100.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::phase(src1, src2, dst, angleInDegrees);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::phase(src1, src2, dst, angleInDegrees);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Phase, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<AngleInDegrees>(false, true)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// CartToPolar\r
+\r
+GPU_PERF_TEST(CartToPolar, cv::gpu::DeviceInfo, cv::Size, AngleInDegrees)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ bool angleInDegrees = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, CV_32FC1);\r
+ fill(src1_host, -100.0, 100.0);\r
+\r
+ cv::Mat src2_host(size, CV_32FC1);\r
+ fill(src2_host, -100.0, 100.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ cv::gpu::GpuMat magnitude;\r
+ cv::gpu::GpuMat angle;\r
+\r
+ cv::gpu::cartToPolar(src1, src2, magnitude, angle, angleInDegrees);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::cartToPolar(src1, src2, magnitude, angle, angleInDegrees);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, CartToPolar, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<AngleInDegrees>(false, true)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// PolarToCart\r
+\r
+GPU_PERF_TEST(PolarToCart, cv::gpu::DeviceInfo, cv::Size, AngleInDegrees)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ bool angleInDegrees = GET_PARAM(2);\r
+\r
+ cv::Mat magnitude_host(size, CV_32FC1);\r
+ fill(magnitude_host, 0.0, 100.0);\r
+\r
+ cv::Mat angle_host(size, CV_32FC1);\r
+ fill(angle_host, 0.0, angleInDegrees ? 360.0 : 2 * CV_PI);\r
+\r
+ cv::gpu::GpuMat magnitude(magnitude_host);\r
+ cv::gpu::GpuMat angle(angle_host);\r
+ cv::gpu::GpuMat x;\r
+ cv::gpu::GpuMat y;\r
+\r
+ cv::gpu::polarToCart(magnitude, angle, x, y, angleInDegrees);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::polarToCart(magnitude, angle, x, y, angleInDegrees);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, PolarToCart, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<AngleInDegrees>(false, true)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// MeanStdDev\r
+\r
+GPU_PERF_TEST(MeanStdDev, cv::gpu::DeviceInfo, cv::Size)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+\r
+ cv::Mat src_host(size, CV_8UC1);\r
+ fill(src_host, 0.0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar mean;\r
+ cv::Scalar stddev;\r
+ cv::gpu::GpuMat buf;\r
+\r
+ cv::gpu::meanStdDev(src, mean, stddev, buf);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::meanStdDev(src, mean, stddev, buf);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, MeanStdDev, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Norm\r
+\r
+GPU_PERF_TEST(Norm, cv::gpu::DeviceInfo, cv::Size, MatDepth, NormType)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int normType = GET_PARAM(3);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ double dst;\r
+ cv::gpu::GpuMat buf;\r
+\r
+ dst = cv::gpu::norm(src, normType, buf);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ dst = cv::gpu::norm(src, normType, buf);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Norm, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32S, CV_32F),\r
+ testing::Values(NormType(cv::NORM_INF), NormType(cv::NORM_L1), NormType(cv::NORM_L2))));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// NormDiff\r
+\r
+GPU_PERF_TEST(NormDiff, cv::gpu::DeviceInfo, cv::Size, NormType)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int normType = GET_PARAM(2);\r
+\r
+ cv::Mat src1_host(size, CV_8UC1);\r
+ fill(src1_host, 0.0, 255.0);\r
+\r
+ cv::Mat src2_host(size, CV_8UC1);\r
+ fill(src2_host, 0.0, 255.0);\r
+\r
+ cv::gpu::GpuMat src1(src1_host);\r
+ cv::gpu::GpuMat src2(src2_host);\r
+ double dst;\r
+\r
+ dst = cv::gpu::norm(src1, src2, normType);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ dst = cv::gpu::norm(src1, src2, normType);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, NormDiff, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values(NormType(cv::NORM_INF), NormType(cv::NORM_L1), NormType(cv::NORM_L2))));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Sum\r
+\r
+GPU_PERF_TEST(Sum, cv::gpu::DeviceInfo, cv::Size, MatType)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int type = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0.0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar dst;\r
+ cv::gpu::GpuMat buf;\r
+\r
+ dst = cv::gpu::sum(src, buf);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ dst = cv::gpu::sum(src, buf);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Sum, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatType>(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_32FC1, CV_32FC3, CV_32FC4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Sum_Abs\r
+\r
+GPU_PERF_TEST(Sum_Abs, cv::gpu::DeviceInfo, cv::Size, MatType)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int type = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0.0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar dst;\r
+ cv::gpu::GpuMat buf;\r
+\r
+ dst = cv::gpu::absSum(src, buf);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ dst = cv::gpu::absSum(src, buf);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Sum_Abs, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatType>(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_32FC1, CV_32FC3, CV_32FC4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Sum_Sqr\r
+\r
+GPU_PERF_TEST(Sum_Sqr, cv::gpu::DeviceInfo, cv::Size, MatType)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int type = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0.0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::Scalar dst;\r
+ cv::gpu::GpuMat buf;\r
+\r
+ dst = cv::gpu::sqrSum(src, buf);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ dst = cv::gpu::sqrSum(src, buf);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Sum_Sqr, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatType>(CV_8UC1, CV_8UC3, CV_8UC4, CV_16UC1, CV_16UC3, CV_16UC4, CV_32FC1, CV_32FC3, CV_32FC4)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// MinMax\r
+\r
+GPU_PERF_TEST(MinMax, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ double minVal, maxVal;\r
+ cv::gpu::GpuMat buf;\r
+\r
+ cv::gpu::minMax(src, &minVal, &maxVal, cv::gpu::GpuMat(), buf);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::minMax(src, &minVal, &maxVal, cv::gpu::GpuMat(), buf);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, MinMax, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// MinMaxLoc\r
+\r
+GPU_PERF_TEST(MinMaxLoc, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 255.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ double minVal, maxVal;\r
+ cv::Point minLoc, maxLoc;\r
+ cv::gpu::GpuMat valbuf, locbuf;\r
+\r
+ cv::gpu::minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc, cv::gpu::GpuMat(), valbuf, locbuf);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc, cv::gpu::GpuMat(), valbuf, locbuf);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, MinMaxLoc, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// CountNonZero\r
+\r
+GPU_PERF_TEST(CountNonZero, cv::gpu::DeviceInfo, cv::Size, MatDepth)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+\r
+ cv::Mat src_host(size, depth);\r
+ fill(src_host, 0.0, 1.5);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ int dst;\r
+ cv::gpu::GpuMat buf;\r
+\r
+ dst = cv::gpu::countNonZero(src, buf);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ dst = cv::gpu::countNonZero(src, buf);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, CountNonZero, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_32F, CV_64F)));\r
+\r
+//////////////////////////////////////////////////////////////////////\r
+// Reduce\r
+\r
+CV_ENUM(ReduceCode, CV_REDUCE_SUM, CV_REDUCE_AVG, CV_REDUCE_MAX, CV_REDUCE_MIN)\r
+#define ALL_REDUCE_CODES testing::Values<ReduceCode>(CV_REDUCE_SUM, CV_REDUCE_AVG, CV_REDUCE_MAX, CV_REDUCE_MIN)\r
+\r
+enum {Rows = 0, Cols = 1};\r
+CV_ENUM(ReduceDim, Rows, Cols)\r
+\r
+GPU_PERF_TEST(Reduce, cv::gpu::DeviceInfo, cv::Size, MatDepth, Channels, ReduceCode, ReduceDim)\r
+{\r
+ cv::gpu::DeviceInfo devInfo = GET_PARAM(0);\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::Size size = GET_PARAM(1);\r
+ int depth = GET_PARAM(2);\r
+ int channels = GET_PARAM(3);\r
+ int reduceOp = GET_PARAM(4);\r
+ int dim = GET_PARAM(5);\r
+\r
+ int type = CV_MAKE_TYPE(depth, channels);\r
+\r
+ cv::Mat src_host(size, type);\r
+ fill(src_host, 0.0, 10.0);\r
+\r
+ cv::gpu::GpuMat src(src_host);\r
+ cv::gpu::GpuMat dst;\r
+\r
+ cv::gpu::reduce(src, dst, dim, reduceOp);\r
+\r
+ TEST_CYCLE()\r
+ {\r
+ cv::gpu::reduce(src, dst, dim, reduceOp);\r
+ }\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(Core, Reduce, testing::Combine(\r
+ ALL_DEVICES,\r
+ GPU_TYPICAL_MAT_SIZES,\r
+ testing::Values<MatDepth>(CV_8U, CV_16U, CV_16S, CV_32F),\r
+ testing::Values<Channels>(1, 2, 3, 4),\r
+ ALL_REDUCE_CODES,\r
+ testing::Values(ReduceDim(Rows), ReduceDim(Cols))));\r
+\r
+#endif\r
\r
for (int i = 0; i < nMatches; ++i, ++trainIdx_ptr, ++imgIdx_ptr, ++distance_ptr)\r
{\r
- int trainIdx = *trainIdx_ptr;\r
- int imgIdx = *imgIdx_ptr;\r
- float distance = *distance_ptr;\r
+ int _trainIdx = *trainIdx_ptr;\r
+ int _imgIdx = *imgIdx_ptr;\r
+ float _distance = *distance_ptr;\r
\r
- DMatch m(queryIdx, trainIdx, imgIdx, distance);\r
+ DMatch m(queryIdx, _trainIdx, _imgIdx, _distance);\r
\r
curMatches.push_back(m);\r
}\r
#include "opencv2/gpu/device/vec_distance.hpp"\r
#include "opencv2/gpu/device/datamov_utils.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace bf_knnmatch \r
+ namespace bf_knnmatch\r
{\r
///////////////////////////////////////////////////////////////////////////////\r
// Reduction\r
\r
- template <int BLOCK_SIZE> \r
- __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, \r
- int& bestTrainIdx1, int& bestTrainIdx2, \r
+ template <int BLOCK_SIZE>\r
+ __device__ void findBestMatch(float& bestDistance1, float& bestDistance2,\r
+ int& bestTrainIdx1, int& bestTrainIdx2,\r
float* s_distance, int* s_trainIdx)\r
{\r
- float myBestDistance1 = numeric_limits<float>::max(); \r
+ float myBestDistance1 = numeric_limits<float>::max();\r
float myBestDistance2 = numeric_limits<float>::max();\r
int myBestTrainIdx1 = -1;\r
int myBestTrainIdx2 = -1;\r
bestTrainIdx2 = myBestTrainIdx2;\r
}\r
\r
- template <int BLOCK_SIZE> \r
- __device__ void findBestMatch(float& bestDistance1, float& bestDistance2, \r
- int& bestTrainIdx1, int& bestTrainIdx2, \r
- int& bestImgIdx1, int& bestImgIdx2, \r
+ template <int BLOCK_SIZE>\r
+ __device__ void findBestMatch(float& bestDistance1, float& bestDistance2,\r
+ int& bestTrainIdx1, int& bestTrainIdx2,\r
+ int& bestImgIdx1, int& bestImgIdx2,\r
float* s_distance, int* s_trainIdx, int* s_imgIdx)\r
{\r
- float myBestDistance1 = numeric_limits<float>::max(); \r
+ float myBestDistance1 = numeric_limits<float>::max();\r
float myBestDistance2 = numeric_limits<float>::max();\r
int myBestTrainIdx1 = -1;\r
int myBestTrainIdx2 = -1;\r
///////////////////////////////////////////////////////////////////////////////\r
// Match Unrolled Cached\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>\r
__device__ void loadQueryToSmem(int queryIdx, const DevMem2D_<T>& query, U* s_query)\r
{\r
#pragma unroll\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- __device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, \r
- typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
- float& bestDistance1, float& bestDistance2, \r
- int& bestTrainIdx1, int& bestTrainIdx2, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ __device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask,\r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train,\r
+ float& bestDistance1, float& bestDistance2,\r
+ int& bestTrainIdx1, int& bestTrainIdx2,\r
int& bestImgIdx1, int& bestImgIdx2)\r
{\r
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
__global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)\r
{\r
extern __shared__ int smem[];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,\r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
__global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)\r
{\r
extern __shared__ int smem[];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
- const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,\r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
///////////////////////////////////////////////////////////////////////////////\r
// Match Unrolled\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- __device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, \r
- typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
- float& bestDistance1, float& bestDistance2, \r
- int& bestTrainIdx1, int& bestTrainIdx2, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ __device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask,\r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train,\r
+ float& bestDistance1, float& bestDistance2,\r
+ int& bestTrainIdx1, int& bestTrainIdx2,\r
int& bestImgIdx1, int& bestImgIdx2)\r
{\r
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
__global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)\r
{\r
extern __shared__ int smem[];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,\r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
__global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)\r
{\r
extern __shared__ int smem[];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
- const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,\r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
///////////////////////////////////////////////////////////////////////////////\r
// Match\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
- __device__ void loop(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask, \r
- typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
- float& bestDistance1, float& bestDistance2, \r
- int& bestTrainIdx1, int& bestTrainIdx2, \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
+ __device__ void loop(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask,\r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train,\r
+ float& bestDistance1, float& bestDistance2,\r
+ int& bestTrainIdx1, int& bestTrainIdx2,\r
int& bestImgIdx1, int& bestImgIdx2)\r
{\r
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
}\r
}\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
__global__ void match(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)\r
{\r
extern __shared__ int smem[];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
- void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance, \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,\r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<float2>& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
__global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)\r
{\r
extern __shared__ int smem[];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
- void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
- const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance, \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,\r
+ const DevMem2D_<int2>& trainIdx, const DevMem2D_<int2>& imgIdx, const DevMem2D_<float2>& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
///////////////////////////////////////////////////////////////////////////////\r
// knnMatch 2 dispatcher\r
\r
- template <typename Dist, typename T, typename Mask> \r
- void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2Db& trainIdx, const DevMem2Db& distance, \r
+ template <typename Dist, typename T, typename Mask>\r
+ void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,\r
+ const DevMem2Db& trainIdx, const DevMem2Db& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (query.cols <= 64)\r
matchUnrolled<16, 256, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
else if (query.cols <= 512)\r
- { \r
+ {\r
matchUnrolled<16, 512, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
else if (query.cols <= 1024)\r
- { \r
+ {\r
matchUnrolled<16, 1024, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}*/\r
else\r
}\r
}\r
\r
- template <typename Dist, typename T, typename Mask> \r
- void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
- const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, \r
+ template <typename Dist, typename T, typename Mask>\r
+ void match2Dispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,\r
+ const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (query.cols <= 64)\r
matchUnrolled<16, 256, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
else if (query.cols <= 512)\r
- { \r
+ {\r
matchUnrolled<16, 512, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}\r
else if (query.cols <= 1024)\r
- { \r
+ {\r
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
}*/\r
else\r
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
}\r
else\r
- { \r
+ {\r
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
}\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
void calcDistanceUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];\r
}\r
else\r
- { \r
+ {\r
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;\r
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;\r
}\r
}\r
}\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
void calcDistance(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, const DevMem2Df& allDist, cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
///////////////////////////////////////////////////////////////////////////////\r
// Calc Distance dispatcher\r
\r
- template <typename Dist, typename T, typename Mask> \r
- void calcDistanceDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2Df& allDist, \r
+ template <typename Dist, typename T, typename Mask>\r
+ void calcDistanceDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,\r
+ const DevMem2Df& allDist,\r
int cc, cudaStream_t stream)\r
{\r
if (query.cols <= 64)\r
calcDistanceUnrolled<16, 256, Dist>(query, train, mask, allDist, stream);\r
}\r
else if (query.cols <= 512)\r
- { \r
+ {\r
calcDistanceUnrolled<16, 512, Dist>(query, train, mask, allDist, stream);\r
}\r
else if (query.cols <= 1024)\r
- { \r
+ {\r
calcDistanceUnrolled<16, 1024, Dist>(query, train, mask, allDist, stream);\r
}*/\r
else\r
///////////////////////////////////////////////////////////////////////////////\r
// find knn match kernel\r
\r
- template <int BLOCK_SIZE> \r
+ template <int BLOCK_SIZE>\r
__global__ void findBestMatch(DevMem2Df allDist, int i, PtrStepi trainIdx, PtrStepf distance)\r
{\r
const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64;\r
\r
float dist = numeric_limits<float>::max();\r
int bestIdx = -1;\r
- \r
+\r
for (int i = threadIdx.x; i < allDist.cols; i += BLOCK_SIZE)\r
{\r
float reg = allDistRow[i];\r
}\r
}\r
\r
- template <int BLOCK_SIZE> \r
+ template <int BLOCK_SIZE>\r
void findKnnMatch(int k, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2Df& allDist, cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, 1, 1);\r
// knn match Dispatcher\r
\r
template <typename Dist, typename T, typename Mask>\r
- void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, int k, const Mask& mask, \r
- const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, \r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, int k, const Mask& mask,\r
+ const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist,\r
int cc, cudaStream_t stream)\r
{\r
if (k == 2)\r
calcDistanceDispatcher<Dist>(query, train, mask, allDist, cc, stream);\r
findKnnMatchDispatcher(k, trainIdx, distance, allDist, cc, stream);\r
}\r
- } \r
+ }\r
\r
///////////////////////////////////////////////////////////////////////////////\r
// knn match caller\r
\r
- template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask, \r
- const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, \r
+ template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask,\r
+ const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist,\r
int cc, cudaStream_t stream)\r
{\r
if (mask.data)\r
template void matchL1_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
template void matchL1_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask, \r
+ template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask,\r
const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist,\r
int cc, cudaStream_t stream)\r
{\r
template void matchL2_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
\r
template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& train, int k, const DevMem2Db& mask,\r
- const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, \r
+ const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist,\r
int cc, cudaStream_t stream)\r
{\r
if (mask.data)\r
//template void matchHamming_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
template void matchHamming_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, int k, const DevMem2Db& mask, const DevMem2Db& trainIdx, const DevMem2Db& distance, const DevMem2Df& allDist, int cc, cudaStream_t stream);\r
\r
- template <typename T> void match2L1_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, \r
- const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, \r
+ template <typename T> void match2L1_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,\r
+ const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (masks.data)\r
template void match2L1_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream);\r
template void match2L1_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void match2L2_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, \r
- const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, \r
+ template <typename T> void match2L2_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,\r
+ const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (masks.data)\r
//template void match2L2_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Di& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream);\r
template void match2L2_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void match2Hamming_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, \r
- const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance, \r
+ template <typename T> void match2Hamming_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,\r
+ const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (masks.data)\r
#include "opencv2/gpu/device/vec_distance.hpp"\r
#include "opencv2/gpu/device/datamov_utils.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace bf_match \r
+ namespace bf_match\r
{\r
///////////////////////////////////////////////////////////////////////////////\r
// Reduction\r
\r
- template <int BLOCK_SIZE> \r
+ template <int BLOCK_SIZE>\r
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, float* s_distance, int* s_trainIdx)\r
{\r
s_distance += threadIdx.y * BLOCK_SIZE;\r
reducePredVal<BLOCK_SIZE>(s_distance, bestDistance, s_trainIdx, bestTrainIdx, threadIdx.x, less<volatile float>());\r
}\r
\r
- template <int BLOCK_SIZE> \r
+ template <int BLOCK_SIZE>\r
__device__ void findBestMatch(float& bestDistance, int& bestTrainIdx, int& bestImgIdx, float* s_distance, int* s_trainIdx, int* s_imgIdx)\r
{\r
s_distance += threadIdx.y * BLOCK_SIZE;\r
///////////////////////////////////////////////////////////////////////////////\r
// Match Unrolled Cached\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>\r
__device__ void loadQueryToSmem(int queryIdx, const DevMem2D_<T>& query, U* s_query)\r
{\r
#pragma unroll\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
__device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query,volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,\r
- typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train,\r
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)\r
{\r
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
__global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T> train, const Mask mask, int* bestTrainIdx, float* bestDistance)\r
{\r
extern __shared__ int smem[];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ __global__ void matchUnrolledCached(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,\r
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)\r
{\r
extern __shared__ int smem[];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ void matchUnrolledCached(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
///////////////////////////////////////////////////////////////////////////////\r
// Match Unrolled\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
__device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query,volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,\r
- typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train,\r
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)\r
{\r
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
\r
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
- \r
+\r
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);\r
\r
__syncthreads();\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
}\r
\r
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
- __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, \r
+ __global__ void matchUnrolled(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,\r
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)\r
{\r
extern __shared__ int smem[];\r
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
\r
Mask m = mask;\r
- \r
+\r
for (int imgIdx = 0; imgIdx < n; ++imgIdx)\r
{\r
const DevMem2D_<T> train = trains[imgIdx];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
///////////////////////////////////////////////////////////////////////////////\r
// Match\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
__device__ void loop(int queryIdx, const DevMem2D_<T>& query, volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,\r
- typename Dist::value_type* s_query, typename Dist::value_type* s_train, \r
+ typename Dist::value_type* s_query, typename Dist::value_type* s_train,\r
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)\r
{\r
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)\r
\r
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);\r
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);\r
- \r
+\r
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance, myBestTrainIdx, myBestTrainIdx);\r
\r
__syncthreads();\r
}\r
}\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
- void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
}\r
\r
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
- __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask, \r
+ __global__ void match(const DevMem2D_<T> query, const DevMem2D_<T>* trains, int n, const Mask mask,\r
int* bestTrainIdx, int* bestImgIdx, float* bestDistance)\r
{\r
extern __shared__ int smem[];\r
}\r
}\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
- void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
///////////////////////////////////////////////////////////////////////////////\r
// Match dispatcher\r
\r
- template <typename Dist, typename T, typename Mask> \r
- void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ template <typename Dist, typename T, typename Mask>\r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (query.cols <= 64)\r
matchUnrolled<16, 256, Dist>(query, train, mask, trainIdx, distance, stream);\r
}\r
else if (query.cols <= 512)\r
- { \r
+ {\r
matchUnrolled<16, 512, Dist>(query, train, mask, trainIdx, distance, stream);\r
}\r
else if (query.cols <= 1024)\r
- { \r
+ {\r
matchUnrolled<16, 1024, Dist>(query, train, mask, trainIdx, distance, stream);\r
}*/\r
else\r
}\r
}\r
\r
- template <typename Dist, typename T, typename Mask> \r
- void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ template <typename Dist, typename T, typename Mask>\r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (query.cols <= 64)\r
matchUnrolled<16, 256, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
}\r
else if (query.cols <= 512)\r
- { \r
+ {\r
matchUnrolled<16, 512, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
}\r
else if (query.cols <= 1024)\r
- { \r
+ {\r
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
}*/\r
else\r
///////////////////////////////////////////////////////////////////////////////\r
// Match caller\r
\r
- template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask, \r
+ template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask,\r
const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (mask.data)\r
{\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask), \r
- trainIdx, distance, \r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),\r
+ trainIdx, distance,\r
cc, stream);\r
}\r
else\r
{\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(), \r
- trainIdx, distance, \r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),\r
+ trainIdx, distance,\r
cc, stream);\r
}\r
}\r
template void matchL1_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
template void matchL1_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (mask.data)\r
{\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask), \r
- trainIdx, distance, \r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),\r
+ trainIdx, distance,\r
cc, stream);\r
}\r
else\r
{\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(), \r
- trainIdx, distance, \r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),\r
+ trainIdx, distance,\r
cc, stream);\r
}\r
}\r
//template void matchL2_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
template void matchL2_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, \r
+ template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& train, const DevMem2Db& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (mask.data)\r
{\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask), \r
- trainIdx, distance, \r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), SingleMask(mask),\r
+ trainIdx, distance,\r
cc, stream);\r
}\r
else\r
{\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(), \r
- trainIdx, distance, \r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), WithOutMask(),\r
+ trainIdx, distance,\r
cc, stream);\r
}\r
}\r
//template void matchHamming_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
template void matchHamming_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (masks.data)\r
{\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), \r
- trainIdx, imgIdx, distance, \r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),\r
+ trainIdx, imgIdx, distance,\r
cc, stream);\r
}\r
else\r
{\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), \r
- trainIdx, imgIdx, distance, \r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),\r
+ trainIdx, imgIdx, distance,\r
cc, stream);\r
}\r
}\r
template void matchL1_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
template void matchL1_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (masks.data)\r
{\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), \r
- trainIdx, imgIdx, distance, \r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),\r
+ trainIdx, imgIdx, distance,\r
cc, stream);\r
}\r
else\r
{\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), \r
- trainIdx, imgIdx, distance, \r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),\r
+ trainIdx, imgIdx, distance,\r
cc, stream);\r
}\r
}\r
//template void matchL2_gpu<int >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
template void matchL2_gpu<float >(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& maskCollection, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, \r
+ template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& trains, const DevMem2D_<PtrStepb>& masks,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
if (masks.data)\r
{\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), \r
- trainIdx, imgIdx, distance, \r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data),\r
+ trainIdx, imgIdx, distance,\r
cc, stream);\r
}\r
else\r
{\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(), \r
- trainIdx, imgIdx, distance, \r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains.ptr(), trains.cols, WithOutMask(),\r
+ trainIdx, imgIdx, distance,\r
cc, stream);\r
}\r
}\r
#include "opencv2/gpu/device/vec_distance.hpp"\r
#include "opencv2/gpu/device/datamov_utils.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace bf_radius_match \r
+ namespace bf_radius_match\r
{\r
///////////////////////////////////////////////////////////////////////////////\r
// Match Unrolled\r
#endif\r
}\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask> \r
- void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>\r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask,\r
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
\r
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, false, Dist><<<grid, block, smemSize, stream>>>(query, 0, train, maxDistance, mask, \r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, false, Dist><<<grid, block, smemSize, stream>>>(query, 0, train, maxDistance, mask,\r
trainIdx, PtrStepi(), distance, nMatches.data, trainIdx.cols);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
cudaSafeCall( cudaDeviceSynchronize() );\r
- } \r
+ }\r
\r
- template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T> \r
- void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2Db* masks, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T>\r
+ void matchUnrolled(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2Db* masks,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
\r
if (masks != 0 && masks[i].data)\r
{\r
- matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, SingleMask(masks[i]), \r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, SingleMask(masks[i]),\r
trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);\r
}\r
else\r
{\r
- matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, WithOutMask(), \r
+ matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, WithOutMask(),\r
trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);\r
}\r
cudaSafeCall( cudaGetLastError() );\r
#endif\r
}\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T, typename Mask> \r
- void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>\r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
\r
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);\r
\r
- match<BLOCK_SIZE, false, Dist><<<grid, block, smemSize, stream>>>(query, 0, train, maxDistance, mask, \r
+ match<BLOCK_SIZE, false, Dist><<<grid, block, smemSize, stream>>>(query, 0, train, maxDistance, mask,\r
trainIdx, PtrStepi(), distance, nMatches.data, trainIdx.cols);\r
cudaSafeCall( cudaGetLastError() );\r
\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template <int BLOCK_SIZE, typename Dist, typename T> \r
- void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2Db* masks, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <int BLOCK_SIZE, typename Dist, typename T>\r
+ void match(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2Db* masks,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
cudaStream_t stream)\r
{\r
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);\r
\r
if (masks != 0 && masks[i].data)\r
{\r
- match<BLOCK_SIZE, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, SingleMask(masks[i]), \r
+ match<BLOCK_SIZE, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, SingleMask(masks[i]),\r
trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);\r
}\r
else\r
{\r
- match<BLOCK_SIZE, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, WithOutMask(), \r
+ match<BLOCK_SIZE, true, Dist><<<grid, block, smemSize, stream>>>(query, i, train, maxDistance, WithOutMask(),\r
trainIdx, imgIdx, distance, nMatches.data, trainIdx.cols);\r
}\r
cudaSafeCall( cudaGetLastError() );\r
///////////////////////////////////////////////////////////////////////////////\r
// Match dispatcher\r
\r
- template <typename Dist, typename T, typename Mask> \r
- void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <typename Dist, typename T, typename Mask>\r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>& train, float maxDistance, const Mask& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
if (query.cols <= 64)\r
matchUnrolled<16, 256, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
}\r
else if (query.cols <= 512)\r
- { \r
+ {\r
matchUnrolled<16, 512, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
}\r
else if (query.cols <= 1024)\r
- { \r
+ {\r
matchUnrolled<16, 1024, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
}*/\r
else\r
}\r
}\r
\r
- template <typename Dist, typename T> \r
- void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2Db* masks, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <typename Dist, typename T>\r
+ void matchDispatcher(const DevMem2D_<T>& query, const DevMem2D_<T>* trains, int n, float maxDistance, const DevMem2Db* masks,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
if (query.cols <= 64)\r
matchUnrolled<16, 256, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
}\r
else if (query.cols <= 512)\r
- { \r
+ {\r
matchUnrolled<16, 512, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
}\r
else if (query.cols <= 1024)\r
- { \r
+ {\r
matchUnrolled<16, 1024, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
}*/\r
else\r
{\r
match<16, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
}\r
- } \r
+ }\r
\r
///////////////////////////////////////////////////////////////////////////////\r
// Radius Match caller\r
\r
- template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, float maxDistance, const DevMem2Db& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db& train, float maxDistance, const DevMem2Db& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
if (mask.data)\r
{\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask), \r
- trainIdx, distance, nMatches, \r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask),\r
+ trainIdx, distance, nMatches,\r
cc, stream);\r
}\r
else\r
{\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(), \r
- trainIdx, distance, nMatches, \r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(),\r
+ trainIdx, distance, nMatches,\r
cc, stream);\r
}\r
}\r
template void matchL1_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, float maxDistance, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
template void matchL1_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, float maxDistance, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, float maxDistance, const DevMem2Db& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db& train, float maxDistance, const DevMem2Db& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
if (mask.data)\r
{\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask), \r
- trainIdx, distance, nMatches, \r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask),\r
+ trainIdx, distance, nMatches,\r
cc, stream);\r
}\r
else\r
{\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(), \r
- trainIdx, distance, nMatches, \r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(),\r
+ trainIdx, distance, nMatches,\r
cc, stream);\r
}\r
}\r
//template void matchL2_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, float maxDistance, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
template void matchL2_gpu<float >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, float maxDistance, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& train, float maxDistance, const DevMem2Db& mask, \r
- const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db& train, float maxDistance, const DevMem2Db& mask,\r
+ const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
if (mask.data)\r
{\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask), \r
- trainIdx, distance, nMatches, \r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, SingleMask(mask),\r
+ trainIdx, distance, nMatches,\r
cc, stream);\r
}\r
else\r
{\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(), \r
- trainIdx, distance, nMatches, \r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), static_cast< DevMem2D_<T> >(train), maxDistance, WithOutMask(),\r
+ trainIdx, distance, nMatches,\r
cc, stream);\r
}\r
}\r
//template void matchHamming_gpu<short >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, float maxDistance, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
template void matchHamming_gpu<int >(const DevMem2Db& queryDescs, const DevMem2Db& trainDescs, float maxDistance, const DevMem2Db& mask, const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <typename T> void matchL1_gpu(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
- matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks, \r
- trainIdx, imgIdx, distance, nMatches, \r
+ matchDispatcher< L1Dist<T> >(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks,\r
+ trainIdx, imgIdx, distance, nMatches,\r
cc, stream);\r
}\r
\r
template void matchL1_gpu<int >(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
template void matchL1_gpu<float >(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <typename T> void matchL2_gpu(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
- matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks, \r
- trainIdx, imgIdx, distance, nMatches, \r
+ matchDispatcher<L2Dist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks,\r
+ trainIdx, imgIdx, distance, nMatches,\r
cc, stream);\r
}\r
\r
//template void matchL2_gpu<int >(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
template void matchL2_gpu<float >(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks, const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, int cc, cudaStream_t stream);\r
\r
- template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks, \r
- const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches, \r
+ template <typename T> void matchHamming_gpu(const DevMem2Db& query, const DevMem2Db* trains, int n, float maxDistance, const DevMem2Db* masks,\r
+ const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
- matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks, \r
- trainIdx, imgIdx, distance, nMatches, \r
+ matchDispatcher<HammingDist>(static_cast< DevMem2D_<T> >(query), (const DevMem2D_<T>*)trains, n, maxDistance, masks,\r
+ trainIdx, imgIdx, distance, nMatches,\r
cc, stream);\r
}\r
\r
#include "internal_shared.hpp"\r
#include "opencv2/gpu/device/limits.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace bilateral_filter \r
+ namespace bilateral_filter\r
{\r
__constant__ float* ctable_color;\r
__constant__ float* ctable_space;\r
dp[3] = *(disp + (y+1) * disp_step + x + 0);\r
dp[4] = *(disp + (y ) * disp_step + x + 1);\r
\r
- if(::abs(dp[1] - dp[0]) >= cedge_disc || ::abs(dp[2] - dp[0]) >= cedge_disc || ::abs(dp[3] - dp[0]) >= cedge_disc || ::abs(dp[4] - dp[0]) >= cedge_disc) \r
+ if(::abs(dp[1] - dp[0]) >= cedge_disc || ::abs(dp[2] - dp[0]) >= cedge_disc || ::abs(dp[3] - dp[0]) >= cedge_disc || ::abs(dp[4] - dp[0]) >= cedge_disc)\r
{\r
const int ymin = ::max(0, y - cradius);\r
const int xmin = ::max(0, x - cradius);\r
}\r
}\r
\r
- template <typename T> \r
+ template <typename T>\r
void bilateral_filter_caller(DevMem2D_<T> disp, DevMem2Db img, int channels, int iters, cudaStream_t stream)\r
{\r
dim3 threads(32, 8, 1);\r
\r
#include "internal_shared.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace blend \r
+ namespace blend\r
{\r
template <typename T>\r
__global__ void blendLinearKernel(int rows, int cols, int cn, const PtrStep<T> img1, const PtrStep<T> img2,\r
T p2 = img2.ptr(y)[x];\r
result.ptr(y)[x] = (p1 * w1 + p2 * w2) / (w1 + w2 + 1e-5f);\r
}\r
- } \r
+ }\r
\r
template <typename T>\r
void blendLinearCaller(int rows, int cols, int cn, PtrStep<T> img1, PtrStep<T> img2, PtrStepf weights1, PtrStepf weights2, PtrStep<T> result, cudaStream_t stream)\r
{\r
dim3 threads(16, 16);\r
dim3 grid(divUp(cols * cn, threads.x), divUp(rows, threads.y));\r
- \r
+\r
blendLinearKernel<<<grid, threads, 0, stream>>>(rows, cols * cn, cn, img1, img2, weights1, weights2, result);\r
cudaSafeCall( cudaGetLastError() );\r
\r
{\r
dim3 threads(16, 16);\r
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));\r
- \r
+\r
blendLinearKernel8UC4<<<grid, threads, 0, stream>>>(rows, cols, img1, img2, weights1, weights2, result);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
cudaSafeCall(cudaDeviceSynchronize());\r
}\r
- } // namespace blend \r
+ } // namespace blend\r
}}} // namespace cv { namespace gpu { namespace device\r
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/functional.hpp"
-namespace cv { namespace gpu { namespace device
+namespace cv { namespace gpu { namespace device
{
#define SOLVE_PNP_RANSAC_MAX_NUM_ITERS 200
#include <algorithm>\r
#include "internal_shared.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace canny \r
+ namespace canny\r
{\r
__global__ void calcSobelRowPass(const PtrStepb src, PtrStepi dx_buf, PtrStepi dy_buf, int rows, int cols)\r
{\r
}\r
};\r
\r
- template <typename Norm> __global__ void calcMagnitude(const PtrStepi dx_buf, const PtrStepi dy_buf, \r
+ template <typename Norm> __global__ void calcMagnitude(const PtrStepi dx_buf, const PtrStepi dy_buf,\r
PtrStepi dx, PtrStepi dy, PtrStepf mag, int rows, int cols)\r
{\r
__shared__ int sdx[18][16];\r
}\r
\r
//////////////////////////////////////////////////////////////////////////////////////////\r
- \r
+\r
#define CANNY_SHIFT 15\r
#define TG22 (int)(0.4142135623730950488016887242097*(1<<CANNY_SHIFT) + 0.5)\r
\r
edge_type = 1 + (int)(m > high_thresh);\r
}\r
}\r
- \r
+\r
map.ptr(i + 1)[j + 1] = edge_type;\r
}\r
}\r
\r
const int tid = threadIdx.y * 16 + threadIdx.x;\r
const int lx = tid % 18;\r
- const int ly = tid / 18; \r
+ const int ly = tid / 18;\r
\r
if (ly < 14)\r
smem[ly][lx] = map.ptr(blockIdx.y * 16 + ly)[blockIdx.x * 16 + lx];\r
n += smem[threadIdx.y ][threadIdx.x ] == 2;\r
n += smem[threadIdx.y ][threadIdx.x + 1] == 2;\r
n += smem[threadIdx.y ][threadIdx.x + 2] == 2;\r
- \r
+\r
n += smem[threadIdx.y + 1][threadIdx.x ] == 2;\r
n += smem[threadIdx.y + 1][threadIdx.x + 2] == 2;\r
- \r
+\r
n += smem[threadIdx.y + 2][threadIdx.x ] == 2;\r
n += smem[threadIdx.y + 2][threadIdx.x + 1] == 2;\r
n += smem[threadIdx.y + 2][threadIdx.x + 2] == 2;\r
n += smem[threadIdx.y ][threadIdx.x ] == 1;\r
n += smem[threadIdx.y ][threadIdx.x + 1] == 1;\r
n += smem[threadIdx.y ][threadIdx.x + 2] == 1;\r
- \r
+\r
n += smem[threadIdx.y + 1][threadIdx.x ] == 1;\r
n += smem[threadIdx.y + 1][threadIdx.x + 2] == 1;\r
- \r
+\r
n += smem[threadIdx.y + 2][threadIdx.x ] == 1;\r
n += smem[threadIdx.y + 2][threadIdx.x + 1] == 1;\r
n += smem[threadIdx.y + 2][threadIdx.x + 2] == 1;\r
#if __CUDA_ARCH__ >= 120\r
\r
const int stack_size = 512;\r
- \r
+\r
__shared__ unsigned int s_counter;\r
__shared__ unsigned int s_ind;\r
__shared__ ushort2 s_st[stack_size];\r
if (subTaskIdx < portion)\r
pos = s_st[s_counter - 1 - subTaskIdx];\r
__syncthreads();\r
- \r
+\r
if (threadIdx.x == 0)\r
s_counter -= portion;\r
__syncthreads();\r
- \r
+\r
if (pos.x > 0 && pos.x <= cols && pos.y > 0 && pos.y <= rows)\r
{\r
pos.x += c_dx[threadIdx.x & 7];\r
{\r
void* counter_ptr;\r
cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, counter) );\r
- \r
+\r
unsigned int count;\r
cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost) );\r
\r
#include <opencv2/gpu/device/color.hpp>\r
#include <cvt_colot_internal.h>\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_rgba_traits<uchar>::functor_type)\r
{\r
{\r
enum { smart_block_dim_y = 8 };\r
enum { smart_shift = 4 };\r
- }; \r
+ };\r
\r
OPENCV_GPU_TRANSFORM_FUNCTOR_TRAITS(bgra_to_xyz4_traits<uchar>::functor_type)\r
{\r
#include "opencv2/gpu/device/border_interpolate.hpp"\r
#include "opencv2/gpu/device/static_check.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace column_filter \r
+ namespace column_filter\r
{\r
#define MAX_KERNEL_SIZE 32\r
\r
\r
const dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y);\r
const dim3 grid(divUp(src.cols, BLOCK_DIM_X), divUp(src.rows, BLOCK_DIM_Y * PATCH_PER_BLOCK));\r
- \r
+\r
B<T> brd(src.rows);\r
\r
linearColumnFilter<KSIZE, T, D><<<grid, block, 0, stream>>>(src, dst, anchor, brd);\r
{\r
typedef void (*caller_t)(DevMem2D_<T> src, DevMem2D_<D> dst, int anchor, int cc, cudaStream_t stream);\r
\r
- static const caller_t callers[5][33] = \r
+ static const caller_t callers[5][33] =\r
{\r
{\r
0,\r
linearColumnFilter_caller<30, T, D, BrdColWrap>,\r
linearColumnFilter_caller<31, T, D, BrdColWrap>,\r
linearColumnFilter_caller<32, T, D, BrdColWrap>\r
- } \r
+ }\r
};\r
- \r
+\r
loadKernel(kernel, ksize);\r
\r
callers[brd_type][ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor, cc, stream);\r
#include "internal_shared.hpp"\r
#include "opencv2/gpu/device/border_interpolate.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace imgproc \r
+ namespace imgproc\r
{\r
template <typename Ptr2D, typename T> __global__ void copyMakeBorder(const Ptr2D src, DevMem2D_<T> dst, int top, int left)\r
{\r
\r
template <template <typename> class B, typename T> struct CopyMakeBorderDispatcher\r
{\r
- static void call(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, int top, int left, \r
+ static void call(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, int top, int left,\r
const typename VecTraits<T>::elem_type* borderValue, cudaStream_t stream)\r
- { \r
+ {\r
dim3 block(32, 8);\r
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));\r
\r
}\r
};\r
\r
- template <typename T, int cn> void copyMakeBorder_gpu(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode, \r
+ template <typename T, int cn> void copyMakeBorder_gpu(const DevMem2Db& src, const DevMem2Db& dst, int top, int left, int borderMode,\r
const T* borderValue, cudaStream_t stream)\r
{\r
typedef typename TypeVec<T, cn>::vec_type vec_type;\r
\r
typedef void (*caller_t)(const DevMem2D_<vec_type>& src, const DevMem2D_<vec_type>& dst, int top, int left, const T* borderValue, cudaStream_t stream);\r
\r
- static const caller_t callers[5] = \r
+ static const caller_t callers[5] =\r
{\r
- CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call, \r
- CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call, \r
- CopyMakeBorderDispatcher<BrdConstant, vec_type>::call, \r
- CopyMakeBorderDispatcher<BrdReflect, vec_type>::call, \r
- CopyMakeBorderDispatcher<BrdWrap, vec_type>::call \r
+ CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call,\r
+ CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call,\r
+ CopyMakeBorderDispatcher<BrdConstant, vec_type>::call,\r
+ CopyMakeBorderDispatcher<BrdReflect, vec_type>::call,\r
+ CopyMakeBorderDispatcher<BrdWrap, vec_type>::call\r
};\r
\r
callers[borderMode](DevMem2D_<vec_type>(src), DevMem2D_<vec_type>(dst), top, left, borderValue, stream);\r
//\r
// Copyright (c) 2010, Paul Furgale, Chi Hay Tong\r
//\r
-// The original code was written by Paul Furgale and Chi Hay Tong \r
+// The original code was written by Paul Furgale and Chi Hay Tong\r
// and later optimized and prepared for integration into OpenCV by Itseez.\r
//\r
//M*/\r
#include "opencv2/gpu/device/common.hpp"\r
#include "opencv2/gpu/device/utility.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace fast \r
+ namespace fast\r
{\r
__device__ unsigned int g_counter = 0;\r
\r
\r
\r
\r
- d1 = diffType(v, C[0] & 0xff, th); \r
+ d1 = diffType(v, C[0] & 0xff, th);\r
d2 = diffType(v, C[2] & 0xff, th);\r
\r
if ((d1 | d2) == 0)\r
return;\r
\r
mask1 |= (d1 & 1) << 0;\r
- mask2 |= ((d1 & 2) >> 1) << 0; \r
+ mask2 |= ((d1 & 2) >> 1) << 0;\r
\r
mask1 |= (d2 & 1) << 8;\r
mask2 |= ((d2 & 2) >> 1) << 8;\r
return;*/\r
\r
mask1 |= (d1 & 1) << 1;\r
- mask2 |= ((d1 & 2) >> 1) << 1; \r
+ mask2 |= ((d1 & 2) >> 1) << 1;\r
\r
mask1 |= (d2 & 1) << 9;\r
mask2 |= ((d2 & 2) >> 1) << 9;\r
return;*/\r
\r
mask1 |= (d1 & 1) << 5;\r
- mask2 |= ((d1 & 2) >> 1) << 5; \r
+ mask2 |= ((d1 & 2) >> 1) << 5;\r
\r
mask1 |= (d2 & 1) << 13;\r
mask2 |= ((d2 & 2) >> 1) << 13;\r
// 0 -> not a keypoint\r
__device__ __forceinline__ bool isKeyPoint(int mask1, int mask2)\r
{\r
- return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) || \r
+ return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) ||\r
(__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7))));\r
}\r
\r
calcMask(C, v, mid, mask1, mask2);\r
\r
int isKp = static_cast<int>(isKeyPoint(mask1, mask2));\r
- \r
+\r
min = isKp * (mid + 1) + (isKp ^ 1) * min;\r
max = (isKp ^ 1) * (mid - 1) + isKp * max;\r
}\r
\r
return min - 1;\r
}\r
- \r
+\r
template <bool calcScore, class Mask>\r
__global__ void calcKeypoints(const DevMem2Db img, const Mask mask, short2* kpLoc, const unsigned int maxKeypoints, PtrStepi score, const int threshold)\r
{\r
C[2] |= static_cast<uint>(img(i - 1, j - 3)) << (3 * 8);\r
C[1] |= static_cast<uint>(img(i - 1, j + 3)) << 8;\r
\r
- C[3] |= static_cast<uint>(img(i, j - 3)); \r
+ C[3] |= static_cast<uint>(img(i, j - 3));\r
v = static_cast<int>(img(i, j));\r
C[1] |= static_cast<uint>(img(i, j + 3));\r
\r
cudaSafeCall( cudaGetLastError() );\r
\r
cudaSafeCall( cudaDeviceSynchronize() );\r
- \r
+\r
unsigned int count;\r
cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost) );\r
\r
\r
int score = scoreMat(loc.y, loc.x);\r
\r
- bool ismax = \r
+ bool ismax =\r
score > scoreMat(loc.y - 1, loc.x - 1) &&\r
score > scoreMat(loc.y - 1, loc.x ) &&\r
score > scoreMat(loc.y - 1, loc.x + 1) &&\r
\r
score > scoreMat(loc.y , loc.x - 1) &&\r
score > scoreMat(loc.y , loc.x + 1) &&\r
- \r
+\r
score > scoreMat(loc.y + 1, loc.x - 1) &&\r
score > scoreMat(loc.y + 1, loc.x ) &&\r
score > scoreMat(loc.y + 1, loc.x + 1);\r
cudaSafeCall( cudaGetLastError() );\r
\r
cudaSafeCall( cudaDeviceSynchronize() );\r
- \r
+\r
unsigned int new_count;\r
cudaSafeCall( cudaMemcpy(&new_count, counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost) );\r
\r
//\r
// Copyright (c) 2010, Paul Furgale, Chi Hay Tong\r
//\r
-// The original code was written by Paul Furgale and Chi Hay Tong \r
+// The original code was written by Paul Furgale and Chi Hay Tong\r
// and later optimized and prepared for integration into OpenCV by Itseez.\r
//\r
//M*/\r
#include "opencv2/gpu/device/common.hpp"\r
#include "opencv2/gpu/device/utility.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace gfft \r
+ namespace gfft\r
{\r
texture<float, cudaTextureType2D, cudaReadModeElementType> eigTex(0, cudaFilterModePoint, cudaAddressModeClamp);\r
\r
cudaSafeCall( cudaGetLastError() );\r
\r
cudaSafeCall( cudaDeviceSynchronize() );\r
- \r
+\r
uint count;\r
cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(uint), cudaMemcpyDeviceToHost) );\r
\r
\r
class EigGreater\r
{\r
- public: \r
- __device__ __forceinline__ bool operator()(float2 a, float2 b) const \r
- { \r
+ public:\r
+ __device__ __forceinline__ bool operator()(float2 a, float2 b) const\r
+ {\r
return tex2D(eigTex, a.x, a.y) > tex2D(eigTex, b.x, b.y);\r
}\r
};\r
#include "opencv2/gpu/device/utility.hpp"\r
#include "opencv2/gpu/device/saturate_cast.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
#define UINT_BITS 32U\r
\r
\r
#define USE_SMEM_ATOMICS (__CUDA_ARCH__ >= 120)\r
\r
- namespace hist \r
+ namespace hist\r
{\r
#if (!USE_SMEM_ATOMICS)\r
\r
{\r
histogram256<<<PARTIAL_HISTOGRAM256_COUNT, HISTOGRAM256_THREADBLOCK_SIZE, 0, stream>>>(\r
DevMem2D_<uint>(src),\r
- buf, \r
+ buf,\r
static_cast<uint>(src.rows * src.step / sizeof(uint)),\r
src.cols);\r
\r
\r
#include "internal_shared.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
// Other values are not supported\r
#define CELL_WIDTH 8\r
#define CELLS_PER_BLOCK_X 2\r
#define CELLS_PER_BLOCK_Y 2\r
\r
- namespace hog \r
+ namespace hog\r
{\r
__constant__ int cnbins;\r
__constant__ int cblock_stride_x;\r
__constant__ int cdescr_width;\r
\r
\r
- /* Returns the nearest upper power of two, works only for \r
+ /* Returns the nearest upper power of two, works only for\r
the typical GPU thread count (pert block) values */\r
int power_2up(unsigned int n)\r
{\r
}\r
\r
\r
- void set_up_constants(int nbins, int block_stride_x, int block_stride_y, \r
+ void set_up_constants(int nbins, int block_stride_x, int block_stride_y,\r
int nblocks_win_x, int nblocks_win_y)\r
{\r
- cudaSafeCall( cudaMemcpyToSymbol(cnbins, &nbins, sizeof(nbins)) ); \r
- cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_x, &block_stride_x, sizeof(block_stride_x)) ); \r
- cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_y, &block_stride_y, sizeof(block_stride_y)) ); \r
- cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_x, &nblocks_win_x, sizeof(nblocks_win_x)) ); \r
- cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_y, &nblocks_win_y, sizeof(nblocks_win_y)) ); \r
+ cudaSafeCall( cudaMemcpyToSymbol(cnbins, &nbins, sizeof(nbins)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_x, &block_stride_x, sizeof(block_stride_x)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(cblock_stride_y, &block_stride_y, sizeof(block_stride_y)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_x, &nblocks_win_x, sizeof(nblocks_win_x)) );\r
+ cudaSafeCall( cudaMemcpyToSymbol(cnblocks_win_y, &nblocks_win_y, sizeof(nblocks_win_y)) );\r
\r
- int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y; \r
- cudaSafeCall( cudaMemcpyToSymbol(cblock_hist_size, &block_hist_size, sizeof(block_hist_size)) ); \r
+ int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;\r
+ cudaSafeCall( cudaMemcpyToSymbol(cblock_hist_size, &block_hist_size, sizeof(block_hist_size)) );\r
\r
- int block_hist_size_2up = power_2up(block_hist_size); \r
+ int block_hist_size_2up = power_2up(block_hist_size);\r
cudaSafeCall( cudaMemcpyToSymbol(cblock_hist_size_2up, &block_hist_size_2up, sizeof(block_hist_size_2up)) );\r
\r
int descr_width = nblocks_win_x * block_hist_size;\r
\r
\r
template <int nblocks> // Number of histogram blocks processed by single GPU thread block\r
- __global__ void compute_hists_kernel_many_blocks(const int img_block_width, const PtrElemStepf grad, \r
+ __global__ void compute_hists_kernel_many_blocks(const int img_block_width, const PtrElemStepf grad,\r
const PtrElemStep qangle, float scale, float* block_hists)\r
{\r
const int block_x = threadIdx.z;\r
float* hists = smem;\r
float* final_hist = smem + cnbins * 48 * nblocks;\r
\r
- const int offset_x = (blockIdx.x * blockDim.z + block_x) * cblock_stride_x + \r
+ const int offset_x = (blockIdx.x * blockDim.z + block_x) * cblock_stride_x +\r
4 * cell_x + cell_thread_x;\r
const int offset_y = blockIdx.y * cblock_stride_y + 4 * cell_y;\r
\r
// 12 means that 12 pixels affect on block's cell (in one row)\r
if (cell_thread_x < 12)\r
{\r
- float* hist = hists + 12 * (cell_y * blockDim.z * CELLS_PER_BLOCK_Y + \r
- cell_x + block_x * CELLS_PER_BLOCK_X) + \r
+ float* hist = hists + 12 * (cell_y * blockDim.z * CELLS_PER_BLOCK_Y +\r
+ cell_x + block_x * CELLS_PER_BLOCK_X) +\r
cell_thread_x;\r
for (int bin_id = 0; bin_id < cnbins; ++bin_id)\r
hist[bin_id * 48 * nblocks] = 0.f;\r
int dist_center_y = dist_y - 4 * (1 - 2 * cell_y);\r
int dist_center_x = dist_x - 4 * (1 - 2 * cell_x);\r
\r
- float gaussian = ::expf(-(dist_center_y * dist_center_y + \r
+ float gaussian = ::expf(-(dist_center_y * dist_center_y +\r
dist_center_x * dist_center_x) * scale);\r
- float interp_weight = (8.f - ::fabs(dist_y + 0.5f)) * \r
+ float interp_weight = (8.f - ::fabs(dist_y + 0.5f)) *\r
(8.f - ::fabs(dist_x + 0.5f)) / 64.f;\r
\r
hist[bin.x * 48 * nblocks] += gaussian * interp_weight * vote.x;\r
{\r
if (cell_thread_x < 6) hist_[0] += hist_[6];\r
if (cell_thread_x < 3) hist_[0] += hist_[3];\r
- if (cell_thread_x == 0) \r
- final_hist[((cell_x + block_x * 2) * 2 + cell_y) * cnbins + bin_id] \r
+ if (cell_thread_x == 0)\r
+ final_hist[((cell_x + block_x * 2) * 2 + cell_y) * cnbins + bin_id]\r
= hist_[0] + hist_[1] + hist_[2];\r
}\r
}\r
\r
__syncthreads();\r
\r
- float* block_hist = block_hists + (blockIdx.y * img_block_width + \r
- blockIdx.x * blockDim.z + block_x) * \r
- cblock_hist_size; \r
+ float* block_hist = block_hists + (blockIdx.y * img_block_width +\r
+ blockIdx.x * blockDim.z + block_x) *\r
+ cblock_hist_size;\r
\r
int tid = (cell_y * CELLS_PER_BLOCK_Y + cell_x) * 16 + cell_thread_x;\r
if (tid < cblock_hist_size)\r
- block_hist[tid] = final_hist[block_x * cblock_hist_size + tid]; \r
+ block_hist[tid] = final_hist[block_x * cblock_hist_size + tid];\r
}\r
\r
\r
- void compute_hists(int nbins, int block_stride_x, int block_stride_y, \r
- int height, int width, const DevMem2Df& grad, \r
- const DevMem2Db& qangle, float sigma, float* block_hists) \r
+ void compute_hists(int nbins, int block_stride_x, int block_stride_y,\r
+ int height, int width, const DevMem2Df& grad,\r
+ const DevMem2Db& qangle, float sigma, float* block_hists)\r
{\r
const int nblocks = 1;\r
\r
- int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / \r
+ int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) /\r
block_stride_x;\r
- int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) / \r
+ int img_block_height = (height - CELLS_PER_BLOCK_Y * CELL_HEIGHT + block_stride_y) /\r
block_stride_y;\r
\r
dim3 grid(divUp(img_block_width, nblocks), img_block_height);\r
dim3 threads(32, 2, nblocks);\r
\r
- cudaSafeCall(cudaFuncSetCacheConfig(compute_hists_kernel_many_blocks<nblocks>, \r
+ cudaSafeCall(cudaFuncSetCacheConfig(compute_hists_kernel_many_blocks<nblocks>,\r
cudaFuncCachePreferL1));\r
- \r
+\r
// Precompute gaussian spatial window parameter\r
float scale = 1.f / (2.f * sigma * sigma);\r
\r
//\r
\r
\r
- template<int size> \r
+ template<int size>\r
__device__ float reduce_smem(volatile float* smem)\r
- { \r
+ {\r
unsigned int tid = threadIdx.x;\r
float sum = smem[tid];\r
\r
if (size >= 512) { if (tid < 256) smem[tid] = sum = sum + smem[tid + 256]; __syncthreads(); }\r
if (size >= 256) { if (tid < 128) smem[tid] = sum = sum + smem[tid + 128]; __syncthreads(); }\r
if (size >= 128) { if (tid < 64) smem[tid] = sum = sum + smem[tid + 64]; __syncthreads(); }\r
- \r
+\r
if (tid < 32)\r
- { \r
+ {\r
if (size >= 64) smem[tid] = sum = sum + smem[tid + 32];\r
if (size >= 32) smem[tid] = sum = sum + smem[tid + 16];\r
if (size >= 16) smem[tid] = sum = sum + smem[tid + 8];\r
\r
__syncthreads();\r
sum = smem[0];\r
- \r
+\r
return sum;\r
}\r
\r
\r
- template <int nthreads, // Number of threads which process one block historgam \r
+ template <int nthreads, // Number of threads which process one block historgam\r
int nblocks> // Number of block hisograms processed by one GPU thread block\r
__global__ void normalize_hists_kernel_many_blocks(const int block_hist_size,\r
- const int img_block_width, \r
+ const int img_block_width,\r
float* block_hists, float threshold)\r
{\r
if (blockIdx.x * blockDim.z + threadIdx.z >= img_block_width)\r
return;\r
\r
- float* hist = block_hists + (blockIdx.y * img_block_width + \r
- blockIdx.x * blockDim.z + threadIdx.z) * \r
+ float* hist = block_hists + (blockIdx.y * img_block_width +\r
+ blockIdx.x * blockDim.z + threadIdx.z) *\r
block_hist_size + threadIdx.x;\r
- \r
+\r
__shared__ float sh_squares[nthreads * nblocks];\r
float* squares = sh_squares + threadIdx.z * nthreads;\r
- \r
+\r
float elem = 0.f;\r
if (threadIdx.x < block_hist_size)\r
elem = hist[0];\r
- \r
- squares[threadIdx.x] = elem * elem; \r
+\r
+ squares[threadIdx.x] = elem * elem;\r
\r
__syncthreads();\r
float sum = reduce_smem<nthreads>(squares);\r
- \r
- float scale = 1.0f / (::sqrtf(sum) + 0.1f * block_hist_size); \r
+\r
+ float scale = 1.0f / (::sqrtf(sum) + 0.1f * block_hist_size);\r
elem = ::min(elem * scale, threshold);\r
- \r
+\r
__syncthreads();\r
squares[threadIdx.x] = elem * elem;\r
\r
__syncthreads();\r
sum = reduce_smem<nthreads>(squares);\r
scale = 1.0f / (::sqrtf(sum) + 1e-3f);\r
- \r
+\r
if (threadIdx.x < block_hist_size)\r
hist[0] = elem * scale;\r
}\r
\r
\r
- void normalize_hists(int nbins, int block_stride_x, int block_stride_y, \r
+ void normalize_hists(int nbins, int block_stride_x, int block_stride_y,\r
int height, int width, float* block_hists, float threshold)\r
- { \r
+ {\r
const int nblocks = 1;\r
\r
int block_hist_size = nbins * CELLS_PER_BLOCK_X * CELLS_PER_BLOCK_Y;\r
//\r
\r
\r
- template <int nthreads, // Number of threads per one histogram block \r
+ template <int nthreads, // Number of threads per one histogram block\r
int nblocks> // Number of histogram block processed by single GPU thread block\r
- __global__ void classify_hists_kernel_many_blocks(const int img_win_width, const int img_block_width, \r
+ __global__ void classify_hists_kernel_many_blocks(const int img_win_width, const int img_block_width,\r
const int win_block_stride_x, const int win_block_stride_y,\r
const float* block_hists, const float* coefs,\r
float free_coef, float threshold, unsigned char* labels)\r
- { \r
+ {\r
const int win_x = threadIdx.z;\r
if (blockIdx.x * blockDim.z + win_x >= img_win_width)\r
return;\r
\r
- const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + \r
- blockIdx.x * win_block_stride_x * blockDim.z + win_x) * \r
+ const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +\r
+ blockIdx.x * win_block_stride_x * blockDim.z + win_x) *\r
cblock_hist_size;\r
\r
float product = 0.f;\r
\r
__syncthreads();\r
\r
- if (nthreads >= 512) \r
- { \r
+ if (nthreads >= 512)\r
+ {\r
if (threadIdx.x < 256) products[tid] = product = product + products[tid + 256];\r
- __syncthreads(); \r
+ __syncthreads();\r
}\r
- if (nthreads >= 256) \r
- { \r
- if (threadIdx.x < 128) products[tid] = product = product + products[tid + 128]; \r
- __syncthreads(); \r
+ if (nthreads >= 256)\r
+ {\r
+ if (threadIdx.x < 128) products[tid] = product = product + products[tid + 128];\r
+ __syncthreads();\r
}\r
- if (nthreads >= 128) \r
- { \r
- if (threadIdx.x < 64) products[tid] = product = product + products[tid + 64]; \r
- __syncthreads(); \r
+ if (nthreads >= 128)\r
+ {\r
+ if (threadIdx.x < 64) products[tid] = product = product + products[tid + 64];\r
+ __syncthreads();\r
}\r
- \r
+\r
if (threadIdx.x < 32)\r
- { \r
+ {\r
volatile float* smem = products;\r
if (nthreads >= 64) smem[tid] = product = product + smem[tid + 32];\r
if (nthreads >= 32) smem[tid] = product = product + smem[tid + 16];\r
}\r
\r
\r
- void classify_hists(int win_height, int win_width, int block_stride_y, int block_stride_x, \r
- int win_stride_y, int win_stride_x, int height, int width, float* block_hists, \r
+ void classify_hists(int win_height, int win_width, int block_stride_y, int block_stride_x,\r
+ int win_stride_y, int win_stride_x, int height, int width, float* block_hists,\r
float* coefs, float free_coef, float threshold, unsigned char* labels)\r
- { \r
+ {\r
const int nthreads = 256;\r
const int nblocks = 1;\r
\r
\r
int img_block_width = (width - CELLS_PER_BLOCK_X * CELL_WIDTH + block_stride_x) / block_stride_x;\r
classify_hists_kernel_many_blocks<nthreads, nblocks><<<grid, threads>>>(\r
- img_win_width, img_block_width, win_block_stride_x, win_block_stride_y, \r
+ img_win_width, img_block_width, win_block_stride_x, win_block_stride_y,\r
block_hists, coefs, free_coef, threshold, labels);\r
cudaSafeCall( cudaGetLastError() );\r
\r
\r
\r
template <int nthreads>\r
- __global__ void extract_descrs_by_rows_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, \r
+ __global__ void extract_descrs_by_rows_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y,\r
const float* block_hists, PtrElemStepf descriptors)\r
{\r
// Get left top corner of the window in src\r
- const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + \r
+ const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +\r
blockIdx.x * win_block_stride_x) * cblock_hist_size;\r
\r
// Get left top corner of the window in dst\r
}\r
\r
\r
- void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x, \r
+ void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x,\r
int height, int width, float* block_hists, DevMem2Df descriptors)\r
{\r
const int nthreads = 256;\r
\r
\r
template <int nthreads>\r
- __global__ void extract_descrs_by_cols_kernel(const int img_block_width, const int win_block_stride_x, \r
- const int win_block_stride_y, const float* block_hists, \r
+ __global__ void extract_descrs_by_cols_kernel(const int img_block_width, const int win_block_stride_x,\r
+ const int win_block_stride_y, const float* block_hists,\r
PtrElemStepf descriptors)\r
{\r
// Get left top corner of the window in src\r
- const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + \r
+ const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width +\r
blockIdx.x * win_block_stride_x) * cblock_hist_size;\r
\r
// Get left top corner of the window in dst\r
int y = block_idx / cnblocks_win_x;\r
int x = block_idx - y * cnblocks_win_x;\r
\r
- descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block] \r
+ descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block]\r
= hist[(y * img_block_width + x) * cblock_hist_size + idx_in_block];\r
}\r
}\r
\r
\r
- void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x, \r
- int win_stride_y, int win_stride_x, int height, int width, float* block_hists, \r
+ void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x,\r
+ int win_stride_y, int win_stride_x, int height, int width, float* block_hists,\r
DevMem2Df descriptors)\r
{\r
const int nthreads = 256;\r
\r
\r
template <int nthreads, int correct_gamma>\r
- __global__ void compute_gradients_8UC4_kernel(int height, int width, const PtrElemStep img, \r
+ __global__ void compute_gradients_8UC4_kernel(int height, int width, const PtrElemStep img,\r
float angle_scale, PtrElemStepf grad, PtrElemStep qangle)\r
{\r
const int x = blockIdx.x * blockDim.x + threadIdx.x;\r
__shared__ float sh_row[(nthreads + 2) * 3];\r
\r
uchar4 val;\r
- if (x < width) \r
- val = row[x]; \r
- else \r
+ if (x < width)\r
+ val = row[x];\r
+ else\r
val = row[width - 2];\r
\r
sh_row[threadIdx.x + 1] = val.x;\r
\r
float3 dx;\r
if (correct_gamma)\r
- dx = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z)); \r
+ dx = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z));\r
else\r
- dx = make_float3(b.x - a.x, b.y - a.y, b.z - a.z); \r
+ dx = make_float3(b.x - a.x, b.y - a.y, b.z - a.z);\r
\r
float3 dy = make_float3(0.f, 0.f, 0.f);\r
\r
\r
float mag0 = dx.x * dx.x + dy.x * dy.x;\r
float mag1 = dx.y * dx.y + dy.y * dy.y;\r
- if (mag0 < mag1) \r
+ if (mag0 < mag1)\r
{\r
best_dx = dx.y;\r
best_dy = dy.y;\r
}\r
\r
\r
- void compute_gradients_8UC4(int nbins, int height, int width, const DevMem2Db& img, \r
+ void compute_gradients_8UC4(int nbins, int height, int width, const DevMem2Db& img,\r
float angle_scale, DevMem2Df grad, DevMem2Db qangle, bool correct_gamma)\r
{\r
const int nthreads = 256;\r
}\r
\r
template <int nthreads, int correct_gamma>\r
- __global__ void compute_gradients_8UC1_kernel(int height, int width, const PtrElemStep img, \r
+ __global__ void compute_gradients_8UC1_kernel(int height, int width, const PtrElemStep img,\r
float angle_scale, PtrElemStepf grad, PtrElemStep qangle)\r
{\r
const int x = blockIdx.x * blockDim.x + threadIdx.x;\r
\r
__shared__ float sh_row[nthreads + 2];\r
\r
- if (x < width) \r
- sh_row[threadIdx.x + 1] = row[x]; \r
- else \r
+ if (x < width)\r
+ sh_row[threadIdx.x + 1] = row[x];\r
+ else\r
sh_row[threadIdx.x + 1] = row[width - 2];\r
\r
if (threadIdx.x == 0)\r
}\r
\r
\r
- void compute_gradients_8UC1(int nbins, int height, int width, const DevMem2Db& img, \r
+ void compute_gradients_8UC1(int nbins, int height, int width, const DevMem2Db& img,\r
float angle_scale, DevMem2Df grad, DevMem2Db qangle, bool correct_gamma)\r
{\r
const int nthreads = 256;\r
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;\r
\r
if (x < dst.cols && y < dst.rows)\r
- { \r
+ {\r
float4 val = tex2D(resize8UC4_tex, x * sx + colOfs, y * sy);\r
dst.ptr(y)[x] = make_uchar4(val.x * 255, val.y * 255, val.z * 255, val.w * 255);\r
}\r
}\r
\r
- template<class T, class TEX> \r
+ template<class T, class TEX>\r
static void resize_for_hog(const DevMem2Db& src, DevMem2Db dst, TEX& tex)\r
{\r
tex.filterMode = cudaFilterModeLinear;\r
size_t texOfs = 0;\r
int colOfs = 0;\r
\r
- cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>(); \r
+ cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>();\r
cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );\r
\r
- if (texOfs != 0) \r
+ if (texOfs != 0)\r
{\r
colOfs = static_cast<int>( texOfs/sizeof(T) );\r
cudaSafeCall( cudaUnbindTexture(tex) );\r
cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) );\r
- } \r
+ }\r
\r
dim3 threads(32, 8);\r
dim3 grid(divUp(dst.cols, threads.x), divUp(dst.rows, threads.y));\r
- \r
+\r
float sx = static_cast<float>(src.cols) / dst.cols;\r
float sy = static_cast<float>(src.rows) / dst.rows;\r
\r
\r
void resize_8UC1(const DevMem2Db& src, DevMem2Db dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); }\r
void resize_8UC4(const DevMem2Db& src, DevMem2Db dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); }\r
- } // namespace hog \r
+ } // namespace hog\r
}}} // namespace cv { namespace gpu { namespace device\r
#undef IMPLEMENT_FILTER2D_TEX_READER\r
\r
template <typename T, typename D>\r
- void filter2D_gpu(DevMem2Db srcWhole, int ofsX, int ofsY, DevMem2Db dst, \r
- int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, \r
+ void filter2D_gpu(DevMem2Db srcWhole, int ofsX, int ofsY, DevMem2Db dst,\r
+ int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel,\r
int borderMode, const float* borderValue, cudaStream_t stream)\r
{\r
typedef void (*func_t)(const DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2D_<D> dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, cudaStream_t stream);\r
- static const func_t funcs[] = \r
+ static const func_t funcs[] =\r
{\r
Filter2DCaller<T, D, BrdReflect101>::call,\r
Filter2DCaller<T, D, BrdReplicate>::call,\r
#include "safe_call.hpp"\r
#include "opencv2/gpu/device/common.hpp"\r
\r
-namespace cv { namespace gpu \r
+namespace cv { namespace gpu\r
{\r
- enum \r
+ enum\r
{\r
BORDER_REFLECT101_GPU = 0,\r
BORDER_REPLICATE_GPU,\r
BORDER_REFLECT_GPU,\r
BORDER_WRAP_GPU\r
};\r
- \r
+\r
// Converts CPU border extrapolation mode into GPU internal analogue.\r
// Returns true if the GPU analogue exists, false otherwise.\r
bool tryConvertToGpuBorderType(int cpuBorderType, int& gpuBorderType);\r
#include "internal_shared.hpp"\r
#include "opencv2/gpu/device/vec_math.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace match_template \r
+ namespace match_template\r
{\r
__device__ __forceinline__ float sum(float v) { return v; }\r
__device__ __forceinline__ float sum(float2 v) { return v.x + v.y; }\r
//////////////////////////////////////////////////////////////////////\r
// Naive_CCORR\r
\r
- template <typename T, int cn> \r
+ template <typename T, int cn>\r
__global__ void matchTemplateNaiveKernel_CCORR(int w, int h, const PtrStepb image, const PtrStepb templ, DevMem2Df result)\r
{\r
typedef typename TypeVec<T, cn>::vec_type Type;\r
{\r
typedef void (*caller_t)(const DevMem2Db image, const DevMem2Db templ, DevMem2Df result, cudaStream_t stream);\r
\r
- static const caller_t callers[] = \r
+ static const caller_t callers[] =\r
{\r
0, matchTemplateNaive_CCORR<float, 1>, matchTemplateNaive_CCORR<float, 2>, matchTemplateNaive_CCORR<float, 3>, matchTemplateNaive_CCORR<float, 4>\r
};\r
{\r
typedef void (*caller_t)(const DevMem2Db image, const DevMem2Db templ, DevMem2Df result, cudaStream_t stream);\r
\r
- static const caller_t callers[] = \r
+ static const caller_t callers[] =\r
{\r
0, matchTemplateNaive_CCORR<uchar, 1>, matchTemplateNaive_CCORR<uchar, 2>, matchTemplateNaive_CCORR<uchar, 3>, matchTemplateNaive_CCORR<uchar, 4>\r
};\r
{\r
typedef void (*caller_t)(const DevMem2Db image, const DevMem2Db templ, DevMem2Df result, cudaStream_t stream);\r
\r
- static const caller_t callers[] = \r
+ static const caller_t callers[] =\r
{\r
0, matchTemplateNaive_SQDIFF<float, 1>, matchTemplateNaive_SQDIFF<float, 2>, matchTemplateNaive_SQDIFF<float, 3>, matchTemplateNaive_SQDIFF<float, 4>\r
};\r
{\r
typedef void (*caller_t)(const DevMem2Db image, const DevMem2Db templ, DevMem2Df result, cudaStream_t stream);\r
\r
- static const caller_t callers[] = \r
+ static const caller_t callers[] =\r
{\r
0, matchTemplateNaive_SQDIFF<uchar, 1>, matchTemplateNaive_SQDIFF<uchar, 2>, matchTemplateNaive_SQDIFF<uchar, 3>, matchTemplateNaive_SQDIFF<uchar, 4>\r
};\r
{\r
typedef void (*caller_t)(int w, int h, const DevMem2D_<unsigned long long> image_sqsum, unsigned long long templ_sqsum, DevMem2Df result, cudaStream_t stream);\r
\r
- static const caller_t callers[] = \r
+ static const caller_t callers[] =\r
{\r
0, matchTemplatePrepared_SQDIFF_8U<1>, matchTemplatePrepared_SQDIFF_8U<2>, matchTemplatePrepared_SQDIFF_8U<3>, matchTemplatePrepared_SQDIFF_8U<4>\r
};\r
DevMem2Df result, int cn, cudaStream_t stream)\r
{\r
typedef void (*caller_t)(int w, int h, const DevMem2D_<unsigned long long> image_sqsum, unsigned long long templ_sqsum, DevMem2Df result, cudaStream_t stream);\r
- static const caller_t callers[] = \r
+ static const caller_t callers[] =\r
{\r
0, matchTemplatePrepared_SQDIFF_NORMED_8U<1>, matchTemplatePrepared_SQDIFF_NORMED_8U<2>, matchTemplatePrepared_SQDIFF_NORMED_8U<3>, matchTemplatePrepared_SQDIFF_NORMED_8U<4>\r
};\r
(image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) -\r
(image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x]));\r
float ccorr = result.ptr(y)[x];\r
- result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r \r
+ result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r\r
- image_sum_g_ * templ_sum_scale_g;\r
}\r
}\r
\r
void matchTemplatePrepared_CCOFF_8UC2(\r
- int w, int h, \r
- const DevMem2D_<unsigned int> image_sum_r, \r
+ int w, int h,\r
+ const DevMem2D_<unsigned int> image_sum_r,\r
const DevMem2D_<unsigned int> image_sum_g,\r
- unsigned int templ_sum_r, unsigned int templ_sum_g, \r
+ unsigned int templ_sum_r, unsigned int templ_sum_g,\r
DevMem2Df result, cudaStream_t stream)\r
{\r
dim3 threads(32, 8);\r
\r
\r
__global__ void matchTemplatePreparedKernel_CCOFF_8UC3(\r
- int w, int h, \r
+ int w, int h,\r
float templ_sum_scale_r,\r
float templ_sum_scale_g,\r
float templ_sum_scale_b,\r
}\r
\r
void matchTemplatePrepared_CCOFF_8UC3(\r
- int w, int h, \r
- const DevMem2D_<unsigned int> image_sum_r, \r
+ int w, int h,\r
+ const DevMem2D_<unsigned int> image_sum_r,\r
const DevMem2D_<unsigned int> image_sum_g,\r
const DevMem2D_<unsigned int> image_sum_b,\r
- unsigned int templ_sum_r, \r
- unsigned int templ_sum_g, \r
- unsigned int templ_sum_b, \r
+ unsigned int templ_sum_r,\r
+ unsigned int templ_sum_g,\r
+ unsigned int templ_sum_b,\r
DevMem2Df result, cudaStream_t stream)\r
{\r
dim3 threads(32, 8);\r
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));\r
\r
matchTemplatePreparedKernel_CCOFF_8UC3<<<grid, threads, 0, stream>>>(\r
- w, h, \r
+ w, h,\r
(float)templ_sum_r / (w * h),\r
(float)templ_sum_g / (w * h),\r
(float)templ_sum_b / (w * h),\r
\r
\r
__global__ void matchTemplatePreparedKernel_CCOFF_8UC4(\r
- int w, int h, \r
- float templ_sum_scale_r, \r
+ int w, int h,\r
+ float templ_sum_scale_r,\r
float templ_sum_scale_g,\r
float templ_sum_scale_b,\r
float templ_sum_scale_a,\r
(image_sum_a.ptr(y + h)[x + w] - image_sum_a.ptr(y)[x + w]) -\r
(image_sum_a.ptr(y + h)[x] - image_sum_a.ptr(y)[x]));\r
float ccorr = result.ptr(y)[x];\r
- result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r \r
+ result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r\r
- image_sum_g_ * templ_sum_scale_g\r
- image_sum_b_ * templ_sum_scale_b\r
- image_sum_a_ * templ_sum_scale_a;\r
}\r
\r
void matchTemplatePrepared_CCOFF_8UC4(\r
- int w, int h, \r
- const DevMem2D_<unsigned int> image_sum_r, \r
+ int w, int h,\r
+ const DevMem2D_<unsigned int> image_sum_r,\r
const DevMem2D_<unsigned int> image_sum_g,\r
const DevMem2D_<unsigned int> image_sum_b,\r
const DevMem2D_<unsigned int> image_sum_a,\r
- unsigned int templ_sum_r, \r
- unsigned int templ_sum_g, \r
- unsigned int templ_sum_b, \r
- unsigned int templ_sum_a, \r
+ unsigned int templ_sum_r,\r
+ unsigned int templ_sum_g,\r
+ unsigned int templ_sum_b,\r
+ unsigned int templ_sum_a,\r
DevMem2Df result, cudaStream_t stream)\r
{\r
dim3 threads(32, 8);\r
dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y));\r
\r
matchTemplatePreparedKernel_CCOFF_8UC4<<<grid, threads, 0, stream>>>(\r
- w, h, \r
- (float)templ_sum_r / (w * h), \r
- (float)templ_sum_g / (w * h), \r
+ w, h,\r
+ (float)templ_sum_r / (w * h),\r
+ (float)templ_sum_g / (w * h),\r
(float)templ_sum_b / (w * h),\r
(float)templ_sum_a / (w * h),\r
image_sum_r, image_sum_g, image_sum_b, image_sum_a,\r
// Prepared_CCOFF_NORMED\r
\r
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8U(\r
- int w, int h, float weight, \r
+ int w, int h, float weight,\r
float templ_sum_scale, float templ_sqsum_scale,\r
- const PtrStep<unsigned int> image_sum, \r
+ const PtrStep<unsigned int> image_sum,\r
const PtrStep<unsigned long long> image_sqsum,\r
DevMem2Df result)\r
{\r
}\r
\r
void matchTemplatePrepared_CCOFF_NORMED_8U(\r
- int w, int h, const DevMem2D_<unsigned int> image_sum, \r
+ int w, int h, const DevMem2D_<unsigned int> image_sum,\r
const DevMem2D_<unsigned long long> image_sqsum,\r
unsigned int templ_sum, unsigned long long templ_sqsum,\r
DevMem2Df result, cudaStream_t stream)\r
float templ_sqsum_scale = templ_sqsum - weight * templ_sum * templ_sum;\r
\r
matchTemplatePreparedKernel_CCOFF_NORMED_8U<<<grid, threads, 0, stream>>>(\r
- w, h, weight, templ_sum_scale, templ_sqsum_scale, \r
+ w, h, weight, templ_sum_scale, templ_sqsum_scale,\r
image_sum, image_sqsum, result);\r
cudaSafeCall( cudaGetLastError() );\r
\r
\r
\r
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC2(\r
- int w, int h, float weight, \r
- float templ_sum_scale_r, float templ_sum_scale_g, \r
+ int w, int h, float weight,\r
+ float templ_sum_scale_r, float templ_sum_scale_g,\r
float templ_sqsum_scale,\r
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,\r
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,\r
}\r
\r
void matchTemplatePrepared_CCOFF_NORMED_8UC2(\r
- int w, int h, \r
+ int w, int h,\r
const DevMem2D_<unsigned int> image_sum_r, const DevMem2D_<unsigned long long> image_sqsum_r,\r
const DevMem2D_<unsigned int> image_sum_g, const DevMem2D_<unsigned long long> image_sqsum_g,\r
unsigned int templ_sum_r, unsigned long long templ_sqsum_r,\r
float weight = 1.f / (w * h);\r
float templ_sum_scale_r = templ_sum_r * weight;\r
float templ_sum_scale_g = templ_sum_g * weight;\r
- float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r \r
+ float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r\r
+ templ_sqsum_g - weight * templ_sum_g * templ_sum_g;\r
\r
matchTemplatePreparedKernel_CCOFF_NORMED_8UC2<<<grid, threads, 0, stream>>>(\r
- w, h, weight, \r
+ w, h, weight,\r
templ_sum_scale_r, templ_sum_scale_g,\r
templ_sqsum_scale,\r
- image_sum_r, image_sqsum_r, \r
- image_sum_g, image_sqsum_g, \r
+ image_sum_r, image_sqsum_r,\r
+ image_sum_g, image_sqsum_g,\r
result);\r
cudaSafeCall( cudaGetLastError() );\r
\r
\r
\r
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC3(\r
- int w, int h, float weight, \r
- float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b, \r
+ int w, int h, float weight,\r
+ float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b,\r
float templ_sqsum_scale,\r
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,\r
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,\r
}\r
\r
void matchTemplatePrepared_CCOFF_NORMED_8UC3(\r
- int w, int h, \r
+ int w, int h,\r
const DevMem2D_<unsigned int> image_sum_r, const DevMem2D_<unsigned long long> image_sqsum_r,\r
const DevMem2D_<unsigned int> image_sum_g, const DevMem2D_<unsigned long long> image_sqsum_g,\r
const DevMem2D_<unsigned int> image_sum_b, const DevMem2D_<unsigned long long> image_sqsum_b,\r
float templ_sum_scale_r = templ_sum_r * weight;\r
float templ_sum_scale_g = templ_sum_g * weight;\r
float templ_sum_scale_b = templ_sum_b * weight;\r
- float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r \r
+ float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r\r
+ templ_sqsum_g - weight * templ_sum_g * templ_sum_g\r
+ templ_sqsum_b - weight * templ_sum_b * templ_sum_b;\r
\r
matchTemplatePreparedKernel_CCOFF_NORMED_8UC3<<<grid, threads, 0, stream>>>(\r
- w, h, weight, \r
- templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b, \r
- templ_sqsum_scale, \r
- image_sum_r, image_sqsum_r, \r
- image_sum_g, image_sqsum_g, \r
- image_sum_b, image_sqsum_b, \r
+ w, h, weight,\r
+ templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b,\r
+ templ_sqsum_scale,\r
+ image_sum_r, image_sqsum_r,\r
+ image_sum_g, image_sqsum_g,\r
+ image_sum_b, image_sqsum_b,\r
result);\r
cudaSafeCall( cudaGetLastError() );\r
\r
\r
\r
__global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC4(\r
- int w, int h, float weight, \r
- float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b, \r
+ int w, int h, float weight,\r
+ float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b,\r
float templ_sum_scale_a, float templ_sqsum_scale,\r
const PtrStep<unsigned int> image_sum_r, const PtrStep<unsigned long long> image_sqsum_r,\r
const PtrStep<unsigned int> image_sum_g, const PtrStep<unsigned long long> image_sqsum_g,\r
}\r
\r
void matchTemplatePrepared_CCOFF_NORMED_8UC4(\r
- int w, int h, \r
+ int w, int h,\r
const DevMem2D_<unsigned int> image_sum_r, const DevMem2D_<unsigned long long> image_sqsum_r,\r
const DevMem2D_<unsigned int> image_sum_g, const DevMem2D_<unsigned long long> image_sqsum_g,\r
const DevMem2D_<unsigned int> image_sum_b, const DevMem2D_<unsigned long long> image_sqsum_b,\r
+ templ_sqsum_a - weight * templ_sum_a * templ_sum_a;\r
\r
matchTemplatePreparedKernel_CCOFF_NORMED_8UC4<<<grid, threads, 0, stream>>>(\r
- w, h, weight, \r
- templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b, templ_sum_scale_a, \r
- templ_sqsum_scale, \r
- image_sum_r, image_sqsum_r, \r
- image_sum_g, image_sqsum_g, \r
- image_sum_b, image_sqsum_b, \r
- image_sum_a, image_sqsum_a, \r
+ w, h, weight,\r
+ templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b, templ_sum_scale_a,\r
+ templ_sqsum_scale,\r
+ image_sum_r, image_sqsum_r,\r
+ image_sum_g, image_sqsum_g,\r
+ image_sum_b, image_sqsum_b,\r
+ image_sum_a, image_sqsum_a,\r
result);\r
cudaSafeCall( cudaGetLastError() );\r
\r
\r
template <int cn>\r
__global__ void normalizeKernel_8U(\r
- int w, int h, const PtrStep<unsigned long long> image_sqsum, \r
+ int w, int h, const PtrStep<unsigned long long> image_sqsum,\r
unsigned long long templ_sqsum, DevMem2Df result)\r
{\r
const int x = blockIdx.x * blockDim.x + threadIdx.x;\r
}\r
}\r
\r
- void normalize_8U(int w, int h, const DevMem2D_<unsigned long long> image_sqsum, \r
+ void normalize_8U(int w, int h, const DevMem2D_<unsigned long long> image_sqsum,\r
unsigned long long templ_sqsum, DevMem2Df result, int cn, cudaStream_t stream)\r
{\r
dim3 threads(32, 8);\r
\r
#include "internal_shared.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace mathfunc \r
+ namespace mathfunc\r
{\r
//////////////////////////////////////////////////////////////////////////////////////\r
// Cart <-> Polar\r
}\r
};\r
template <typename Mag, typename Angle>\r
- __global__ void cartToPolar(const float* xptr, size_t x_step, const float* yptr, size_t y_step, \r
+ __global__ void cartToPolar(const float* xptr, size_t x_step, const float* yptr, size_t y_step,\r
float* mag, size_t mag_step, float* angle, size_t angle_step, float scale, int width, int height)\r
{\r
const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
\r
grid.x = divUp(x.cols, threads.x);\r
grid.y = divUp(x.rows, threads.y);\r
- \r
+\r
const float scale = angleInDegrees ? (float)(180.0f / CV_PI) : 1.f;\r
\r
cartToPolar<Mag, Angle><<<grid, threads, 0, stream>>>(\r
- x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(), \r
+ x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(),\r
mag.data, mag.step/mag.elemSize(), angle.data, angle.step/angle.elemSize(), scale, x.cols, x.rows);\r
cudaSafeCall( cudaGetLastError() );\r
\r
void cartToPolar_gpu(DevMem2Df x, DevMem2Df y, DevMem2Df mag, bool magSqr, DevMem2Df angle, bool angleInDegrees, cudaStream_t stream)\r
{\r
typedef void (*caller_t)(DevMem2Df x, DevMem2Df y, DevMem2Df mag, DevMem2Df angle, bool angleInDegrees, cudaStream_t stream);\r
- static const caller_t callers[2][2][2] = \r
+ static const caller_t callers[2][2][2] =\r
{\r
{\r
{\r
\r
grid.x = divUp(mag.cols, threads.x);\r
grid.y = divUp(mag.rows, threads.y);\r
- \r
+\r
const float scale = angleInDegrees ? (float)(CV_PI / 180.0f) : 1.0f;\r
\r
- polarToCart<Mag><<<grid, threads, 0, stream>>>(mag.data, mag.step/mag.elemSize(), \r
+ polarToCart<Mag><<<grid, threads, 0, stream>>>(mag.data, mag.step/mag.elemSize(),\r
angle.data, angle.step/angle.elemSize(), scale, x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(), mag.cols, mag.rows);\r
cudaSafeCall( cudaGetLastError() );\r
\r
void polarToCart_gpu(DevMem2Df mag, DevMem2Df angle, DevMem2Df x, DevMem2Df y, bool angleInDegrees, cudaStream_t stream)\r
{\r
typedef void (*caller_t)(DevMem2Df mag, DevMem2Df angle, DevMem2Df x, DevMem2Df y, bool angleInDegrees, cudaStream_t stream);\r
- static const caller_t callers[2] = \r
+ static const caller_t callers[2] =\r
{\r
polarToCart_caller<NonEmptyMag>,\r
polarToCart_caller<EmptyMag>\r
#include "opencv2/gpu/device/saturate_cast.hpp"\r
#include "opencv2/gpu/device/vec_math.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace matrix_reductions \r
+ namespace matrix_reductions\r
{\r
// Performs reduction in shared memory\r
template <int size, typename T>\r
{\r
explicit Mask8U(PtrStepb mask): mask(mask) {}\r
\r
- __device__ __forceinline__ bool operator()(int y, int x) const \r
- { \r
- return mask.ptr(y)[x]; \r
+ __device__ __forceinline__ bool operator()(int y, int x) const\r
+ {\r
+ return mask.ptr(y)[x];\r
}\r
\r
PtrStepb mask;\r
};\r
\r
- struct MaskTrue \r
- { \r
- __device__ __forceinline__ bool operator()(int y, int x) const \r
- { \r
- return true; \r
+ struct MaskTrue\r
+ {\r
+ __device__ __forceinline__ bool operator()(int y, int x) const\r
+ {\r
+ return true;\r
}\r
__device__ __forceinline__ MaskTrue(){}\r
__device__ __forceinline__ MaskTrue(const MaskTrue& mask_){}\r
//////////////////////////////////////////////////////////////////////////////\r
// Min max\r
\r
- // To avoid shared bank conflicts we convert each value into value of \r
+ // To avoid shared bank conflicts we convert each value into value of\r
// appropriate type (32 bits minimum)\r
template <typename T> struct MinMaxTypeTraits {};\r
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };\r
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };\r
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };\r
\r
- namespace minmax \r
+ namespace minmax\r
{\r
__constant__ int ctwidth;\r
__constant__ int ctheight;\r
{\r
dim3 threads, grid;\r
estimateThreadCfg(cols, rows, threads, grid);\r
- bufcols = grid.x * grid.y * elem_size; \r
+ bufcols = grid.x * grid.y * elem_size;\r
bufrows = 2;\r
}\r
\r
\r
// Estimates device constants which are used in the kernels using specified thread configuration\r
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)\r
- { \r
+ {\r
int twidth = divUp(divUp(cols, grid.x), threads.x);\r
int theight = divUp(divUp(rows, grid.y), threads.y);\r
- cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth))); \r
- cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(ctheight))); \r
- } \r
+ cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));\r
+ }\r
\r
\r
// Does min and max in shared memory\r
for (uint x = x0; x < x_end; x += blockDim.x)\r
{\r
T val = src_row[x];\r
- if (mask(y, x)) \r
- { \r
- mymin = ::min(mymin, val); \r
- mymax = ::max(mymax, val); \r
+ if (mask(y, x))\r
+ {\r
+ mymin = ::min(mymin, val);\r
+ mymax = ::max(mymax, val);\r
}\r
}\r
}\r
\r
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];\r
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];\r
\r
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
minval[0] = (T)sminval[0];\r
maxval[0] = (T)smaxval[0];\r
}\r
}\r
#else\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];\r
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];\r
#endif\r
}\r
\r
- \r
+\r
template <typename T>\r
void minMaxMaskCaller(const DevMem2Db src, const PtrStepb mask, double* minval, double* maxval, PtrStepb buf)\r
{\r
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );\r
*minval = minval_;\r
*maxval = maxval_;\r
- } \r
+ }\r
\r
template void minMaxMaskCaller<uchar>(const DevMem2Db, const PtrStepb, double*, double*, PtrStepb);\r
template void minMaxMaskCaller<char>(const DevMem2Db, const PtrStepb, double*, double*, PtrStepb);\r
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );\r
*minval = minval_;\r
*maxval = maxval_;\r
- } \r
+ }\r
\r
template void minMaxCaller<uchar>(const DevMem2Db, double*, double*, PtrStepb);\r
template void minMaxCaller<char>(const DevMem2Db, double*, double*, PtrStepb);\r
typedef typename MinMaxTypeTraits<T>::best_type best_type;\r
__shared__ best_type sminval[nthreads];\r
__shared__ best_type smaxval[nthreads];\r
- \r
+\r
uint tid = threadIdx.y * blockDim.x + threadIdx.x;\r
uint idx = ::min(tid, size - 1);\r
\r
\r
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
minval[0] = (T)sminval[0];\r
maxval[0] = (T)smaxval[0];\r
///////////////////////////////////////////////////////////////////////////////\r
// minMaxLoc\r
\r
- namespace minmaxloc \r
+ namespace minmaxloc\r
{\r
__constant__ int ctwidth;\r
__constant__ int ctheight;\r
\r
\r
// Returns required buffer sizes\r
- void getBufSizeRequired(int cols, int rows, int elem_size, int& b1cols, \r
+ void getBufSizeRequired(int cols, int rows, int elem_size, int& b1cols,\r
int& b1rows, int& b2cols, int& b2rows)\r
{\r
dim3 threads, grid;\r
\r
// Estimates device constants which are used in the kernels using specified thread configuration\r
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)\r
- { \r
+ {\r
int twidth = divUp(divUp(cols, grid.x), threads.x);\r
int theight = divUp(divUp(rows, grid.y), threads.y);\r
- cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth))); \r
- cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(ctheight))); \r
- } \r
+ cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));\r
+ }\r
\r
\r
template <typename T>\r
- __device__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval, \r
+ __device__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval,\r
volatile uint* minloc, volatile uint* maxloc)\r
{\r
T val = minval[tid + offset];\r
\r
\r
template <int size, typename T>\r
- __device__ void findMinMaxLocInSmem(volatile T* minval, volatile T* maxval, volatile uint* minloc, \r
+ __device__ void findMinMaxLocInSmem(volatile T* minval, volatile T* maxval, volatile uint* minloc,\r
volatile uint* maxloc, const uint tid)\r
{\r
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval, minloc, maxloc); } __syncthreads(); }\r
\r
\r
template <int nthreads, typename T, typename Mask>\r
- __global__ void minMaxLocKernel(const DevMem2Db src, Mask mask, T* minval, T* maxval, \r
+ __global__ void minMaxLocKernel(const DevMem2Db src, Mask mask, T* minval, T* maxval,\r
uint* minloc, uint* maxloc)\r
{\r
typedef typename MinMaxTypeTraits<T>::best_type best_type;\r
uint tid = threadIdx.y * blockDim.x + threadIdx.x;\r
\r
T mymin = numeric_limits<T>::max();\r
- T mymax = numeric_limits<T>::is_signed ? -numeric_limits<T>::max() : numeric_limits<T>::min(); \r
+ T mymax = numeric_limits<T>::is_signed ? -numeric_limits<T>::max() : numeric_limits<T>::min();\r
uint myminloc = 0;\r
uint mymaxloc = 0;\r
uint y_end = ::min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);\r
}\r
}\r
\r
- sminval[tid] = mymin; \r
+ sminval[tid] = mymin;\r
smaxval[tid] = mymax;\r
sminloc[tid] = myminloc;\r
smaxloc[tid] = mymaxloc;\r
\r
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
minval[0] = (T)sminval[0];\r
maxval[0] = (T)smaxval[0];\r
}\r
}\r
#else\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];\r
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];\r
\r
\r
template <typename T>\r
- void minMaxLocMaskCaller(const DevMem2Db src, const PtrStepb mask, double* minval, double* maxval, \r
+ void minMaxLocMaskCaller(const DevMem2Db src, const PtrStepb mask, double* minval, double* maxval,\r
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)\r
{\r
dim3 threads, grid;\r
uint* minloc_buf = (uint*)locbuf.ptr(0);\r
uint* maxloc_buf = (uint*)locbuf.ptr(1);\r
\r
- minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf, \r
+ minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf,\r
minloc_buf, maxloc_buf);\r
cudaSafeCall( cudaGetLastError() );\r
\r
\r
\r
template <typename T>\r
- void minMaxLocCaller(const DevMem2Db src, double* minval, double* maxval, \r
+ void minMaxLocCaller(const DevMem2Db src, double* minval, double* maxval,\r
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)\r
{\r
dim3 threads, grid;\r
uint* minloc_buf = (uint*)locbuf.ptr(0);\r
uint* maxloc_buf = (uint*)locbuf.ptr(1);\r
\r
- minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf, \r
+ minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf,\r
minloc_buf, maxloc_buf);\r
cudaSafeCall( cudaGetLastError() );\r
\r
\r
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
minval[0] = (T)sminval[0];\r
maxval[0] = (T)smaxval[0];\r
\r
\r
template <typename T>\r
- void minMaxLocMaskMultipassCaller(const DevMem2Db src, const PtrStepb mask, double* minval, double* maxval, \r
+ void minMaxLocMaskMultipassCaller(const DevMem2Db src, const PtrStepb mask, double* minval, double* maxval,\r
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)\r
{\r
dim3 threads, grid;\r
uint* minloc_buf = (uint*)locbuf.ptr(0);\r
uint* maxloc_buf = (uint*)locbuf.ptr(1);\r
\r
- minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf, \r
+ minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf,\r
minloc_buf, maxloc_buf);\r
cudaSafeCall( cudaGetLastError() );\r
minMaxLocPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);\r
\r
\r
template <typename T>\r
- void minMaxLocMultipassCaller(const DevMem2Db src, double* minval, double* maxval, \r
+ void minMaxLocMultipassCaller(const DevMem2Db src, double* minval, double* maxval,\r
int minloc[2], int maxloc[2], PtrStepb valbuf, PtrStepb locbuf)\r
{\r
dim3 threads, grid;\r
uint* minloc_buf = (uint*)locbuf.ptr(0);\r
uint* maxloc_buf = (uint*)locbuf.ptr(1);\r
\r
- minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf, \r
+ minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf,\r
minloc_buf, maxloc_buf);\r
cudaSafeCall( cudaGetLastError() );\r
minMaxLocPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);\r
//////////////////////////////////////////////////////////////////////////////////////////////////////////\r
// countNonZero\r
\r
- namespace countnonzero \r
+ namespace countnonzero\r
{\r
__constant__ int ctwidth;\r
__constant__ int ctheight;\r
\r
\r
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)\r
- { \r
+ {\r
int twidth = divUp(divUp(cols, grid.x), threads.x);\r
int theight = divUp(divUp(rows, grid.y), threads.y);\r
- cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth))); \r
- cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(theight))); \r
+ cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(theight)));\r
}\r
\r
\r
\r
sumInSmem<nthreads, uint>(scount, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
count[0] = scount[0];\r
blocks_finished = 0;\r
#endif\r
}\r
\r
- \r
+\r
template <typename T>\r
int countNonZeroCaller(const DevMem2Db src, PtrStepb buf)\r
{\r
\r
uint count;\r
cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(int), cudaMemcpyDeviceToHost));\r
- \r
+\r
return count;\r
- } \r
+ }\r
\r
template int countNonZeroCaller<uchar>(const DevMem2Db, PtrStepb);\r
template int countNonZeroCaller<char>(const DevMem2Db, PtrStepb);\r
\r
sumInSmem<nthreads, uint>(scount, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
count[0] = scount[0];\r
}\r
\r
\r
uint count;\r
cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(int), cudaMemcpyDeviceToHost));\r
- \r
+\r
return count;\r
- } \r
+ }\r
\r
template int countNonZeroMultipassCaller<uchar>(const DevMem2Db, PtrStepb);\r
template int countNonZeroMultipassCaller<char>(const DevMem2Db, PtrStepb);\r
template <> struct SumType<float> { typedef float R; };\r
template <> struct SumType<double> { typedef double R; };\r
\r
- template <typename R> \r
+ template <typename R>\r
struct IdentityOp { static __device__ __forceinline__ R call(R x) { return x; } };\r
\r
- template <typename R> \r
+ template <typename R>\r
struct AbsOp { static __device__ __forceinline__ R call(R x) { return ::abs(x); } };\r
\r
template <>\r
struct AbsOp<uint> { static __device__ __forceinline__ uint call(uint x) { return x; } };\r
\r
- template <typename R> \r
+ template <typename R>\r
struct SqrOp { static __device__ __forceinline__ R call(R x) { return x * x; } };\r
\r
__constant__ int ctwidth;\r
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)\r
{\r
threads = dim3(threads_x, threads_y);\r
- grid = dim3(divUp(cols, threads.x * threads.y), \r
+ grid = dim3(divUp(cols, threads.x * threads.y),\r
divUp(rows, threads.y * threads.x));\r
grid.x = std::min(grid.x, threads.x);\r
grid.y = std::min(grid.y, threads.y);\r
\r
\r
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)\r
- { \r
+ {\r
int twidth = divUp(divUp(cols, grid.x), threads.x);\r
int theight = divUp(divUp(rows, grid.y), threads.y);\r
- cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth))); \r
- cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(theight))); \r
+ cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(theight)));\r
}\r
\r
template <typename T, typename R, typename Op, int nthreads>\r
\r
sumInSmem<nthreads, R>(smem, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
result[0] = smem[0];\r
blocks_finished = 0;\r
\r
sumInSmem<nthreads, R>(smem, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
result[0] = smem[0];\r
}\r
\r
sumInSmem<nthreads, R>(smem, tid);\r
sumInSmem<nthreads, R>(smem + nthreads, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
res.x = smem[0];\r
res.y = smem[nthreads];\r
}\r
}\r
#else\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
DstType res;\r
res.x = smem[0];\r
sumInSmem<nthreads, R>(smem, tid);\r
sumInSmem<nthreads, R>(smem + nthreads, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
res.x = smem[0];\r
res.y = smem[nthreads];\r
sumInSmem<nthreads, R>(smem + nthreads, tid);\r
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
res.x = smem[0];\r
res.y = smem[nthreads];\r
}\r
}\r
#else\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
DstType res;\r
res.x = smem[0];\r
sumInSmem<nthreads, R>(smem + nthreads, tid);\r
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
res.x = smem[0];\r
res.y = smem[nthreads];\r
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)\r
{\r
val = ptr[x0 + x * blockDim.x];\r
- sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y), \r
+ sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y),\r
Op::call(val.z), Op::call(val.w));\r
}\r
}\r
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
res.x = smem[0];\r
res.y = smem[nthreads];\r
}\r
}\r
#else\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
DstType res;\r
res.x = smem[0];\r
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);\r
\r
- if (tid == 0) \r
+ if (tid == 0)\r
{\r
res.x = smem[0];\r
res.y = smem[nthreads];\r
sum[1] = result[1];\r
sum[2] = result[2];\r
sum[3] = result[3];\r
- } \r
+ }\r
\r
template void sumMultipassCaller<uchar>(const DevMem2Db, PtrStepb, double*, int);\r
template void sumMultipassCaller<char>(const DevMem2Db, PtrStepb, double*, int);\r
sum[1] = result[1];\r
sum[2] = result[2];\r
sum[3] = result[3];\r
- } \r
+ }\r
\r
template void sumCaller<uchar>(const DevMem2Db, PtrStepb, double*, int);\r
template void sumCaller<char>(const DevMem2Db, PtrStepb, double*, int);\r
sum[1] = result[1];\r
sum[2] = result[2];\r
sum[3] = result[3];\r
- } \r
+ }\r
\r
template void absSumMultipassCaller<uchar>(const DevMem2Db, PtrStepb, double*, int);\r
template void absSumMultipassCaller<char>(const DevMem2Db, PtrStepb, double*, int);\r
sum[1] = result[1];\r
sum[2] = result[2];\r
sum[3] = result[3];\r
- } \r
+ }\r
\r
template void sqrSumMultipassCaller<uchar>(const DevMem2Db, PtrStepb, double*, int);\r
template void sqrSumMultipassCaller<char>(const DevMem2Db, PtrStepb, double*, int);\r
{\r
for (int y = threadIdx.y; y < src.rows; y += 16)\r
myVal = op(myVal, src.ptr(y)[x]);\r
- } \r
+ }\r
\r
smem[threadIdx.x * 16 + threadIdx.y] = myVal;\r
__syncthreads();\r
{\r
typedef void (*caller_t)(const DevMem2D_<T>& src, DevMem2D_<D> dst, cudaStream_t stream);\r
\r
- static const caller_t callers[] = \r
+ static const caller_t callers[] =\r
{\r
- reduceRows_caller<SumReductor, T, S, D>, \r
- reduceRows_caller<AvgReductor, T, S, D>, \r
- reduceRows_caller<MaxReductor, T, S, D>, \r
+ reduceRows_caller<SumReductor, T, S, D>,\r
+ reduceRows_caller<AvgReductor, T, S, D>,\r
+ reduceRows_caller<MaxReductor, T, S, D>,\r
reduceRows_caller<MinReductor, T, S, D>\r
};\r
\r
\r
template void reduceRows_gpu<uchar, int, uchar>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
template void reduceRows_gpu<uchar, int, int>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
- template void reduceRows_gpu<uchar, int, float>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream); \r
+ template void reduceRows_gpu<uchar, int, float>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
\r
template void reduceRows_gpu<ushort, int, ushort>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
template void reduceRows_gpu<ushort, int, int>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
- template void reduceRows_gpu<ushort, int, float>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream); \r
+ template void reduceRows_gpu<ushort, int, float>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
\r
template void reduceRows_gpu<short, int, short>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
template void reduceRows_gpu<short, int, int>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
- template void reduceRows_gpu<short, int, float>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream); \r
+ template void reduceRows_gpu<short, int, float>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
\r
template void reduceRows_gpu<int, int, int>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
template void reduceRows_gpu<int, int, float>(const DevMem2Db& src, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
{\r
typedef void (*caller_t)(const DevMem2D_<T>& src, DevMem2D_<D> dst, cudaStream_t stream);\r
\r
- static const caller_t callers[4][4] = \r
+ static const caller_t callers[4][4] =\r
{\r
{reduceCols_caller<1, SumReductor, T, S, D>, reduceCols_caller<1, AvgReductor, T, S, D>, reduceCols_caller<1, MaxReductor, T, S, D>, reduceCols_caller<1, MinReductor, T, S, D>},\r
{reduceCols_caller<2, SumReductor, T, S, D>, reduceCols_caller<2, AvgReductor, T, S, D>, reduceCols_caller<2, MaxReductor, T, S, D>, reduceCols_caller<2, MinReductor, T, S, D>},\r
template void reduceCols_gpu<uchar, int, int>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
template void reduceCols_gpu<uchar, int, float>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
\r
- template void reduceCols_gpu<ushort, int, ushort>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream); \r
- template void reduceCols_gpu<ushort, int, int>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream); \r
+ template void reduceCols_gpu<ushort, int, ushort>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
+ template void reduceCols_gpu<ushort, int, int>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
template void reduceCols_gpu<ushort, int, float>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
\r
- template void reduceCols_gpu<short, int, short>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream); \r
- template void reduceCols_gpu<short, int, int>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream); \r
- template void reduceCols_gpu<short, int, float>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream); \r
+ template void reduceCols_gpu<short, int, short>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
+ template void reduceCols_gpu<short, int, int>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
+ template void reduceCols_gpu<short, int, float>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
\r
- template void reduceCols_gpu<int, int, int>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream); \r
+ template void reduceCols_gpu<int, int, int>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
template void reduceCols_gpu<int, int, float>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
\r
template void reduceCols_gpu<float, float, float>(const DevMem2Db& src, int cn, const DevMem2Db& dst, int reduceOp, cudaStream_t stream);\r
\r
#include "opencv2/gpu/device/common.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
namespace optical_flow\r
{\r
#define NUM_VERTS_PER_ARROW 6\r
\r
__global__ void NeedleMapAverageKernel(const DevMem2Df u, const PtrStepf v, PtrStepf u_avg, PtrStepf v_avg)\r
- { \r
+ {\r
__shared__ float smem[2 * NEEDLE_MAP_SCALE];\r
\r
volatile float* u_col_sum = smem;\r
}\r
\r
if (threadIdx.x < 8)\r
- { \r
+ {\r
// now add the column sums\r
const uint X = threadIdx.x;\r
\r
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 1];\r
}\r
\r
- if (X | 0xfe == 0xfc) // bits 0 & 1 == 0 \r
- { \r
+ if (X | 0xfe == 0xfc) // bits 0 & 1 == 0\r
+ {\r
u_col_sum[threadIdx.x] += u_col_sum[threadIdx.x + 2];\r
v_col_sum[threadIdx.x] += v_col_sum[threadIdx.x + 2];\r
}\r
v_avg(blockIdx.y, blockIdx.x) = v_col_sum[0];\r
}\r
}\r
- \r
+\r
void NeedleMapAverage_gpu(DevMem2Df u, DevMem2Df v, DevMem2Df u_avg, DevMem2Df v_avg)\r
{\r
const dim3 block(NEEDLE_MAP_SCALE);\r
//\r
// Copyright (c) 2010, Paul Furgale, Chi Hay Tong\r
//\r
-// The original code was written by Paul Furgale and Chi Hay Tong \r
+// The original code was written by Paul Furgale and Chi Hay Tong\r
// and later optimized and prepared for integration into OpenCV by Itseez.\r
//\r
//M*/\r
#include "opencv2/gpu/device/utility.hpp"\r
#include "opencv2/gpu/device/functional.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
namespace orb\r
{\r
// cull\r
\r
int cull_gpu(int* loc, float* response, int size, int n_points)\r
- { \r
+ {\r
thrust::device_ptr<int> loc_ptr(loc);\r
thrust::device_ptr<float> response_ptr(response);\r
\r
{\r
const short2 loc = loc_[ptidx];\r
\r
- const int r = blockSize / 2; \r
+ const int r = blockSize / 2;\r
const int x0 = loc.x - r;\r
const int y0 = loc.y - r;\r
- \r
+\r
int a = 0, b = 0, c = 0;\r
\r
for (int ind = threadIdx.x; ind < blockSize * blockSize; ind += blockDim.x)\r
const int i = ind / blockSize;\r
const int j = ind % blockSize;\r
\r
- int Ix = (img(y0 + i, x0 + j + 1) - img(y0 + i, x0 + j - 1)) * 2 + \r
- (img(y0 + i - 1, x0 + j + 1) - img(y0 + i - 1, x0 + j - 1)) + \r
+ int Ix = (img(y0 + i, x0 + j + 1) - img(y0 + i, x0 + j - 1)) * 2 +\r
+ (img(y0 + i - 1, x0 + j + 1) - img(y0 + i - 1, x0 + j - 1)) +\r
(img(y0 + i + 1, x0 + j + 1) - img(y0 + i + 1, x0 + j - 1));\r
\r
- int Iy = (img(y0 + i + 1, x0 + j) - img(y0 + i - 1, x0 + j)) * 2 + \r
- (img(y0 + i + 1, x0 + j - 1) - img(y0 + i - 1, x0 + j - 1)) + \r
+ int Iy = (img(y0 + i + 1, x0 + j) - img(y0 + i - 1, x0 + j)) * 2 +\r
+ (img(y0 + i + 1, x0 + j - 1) - img(y0 + i - 1, x0 + j - 1)) +\r
(img(y0 + i + 1, x0 + j + 1) - img(y0 + i - 1, x0 + j + 1));\r
\r
a += Ix * Ix;\r
int m_01 = 0, m_10 = 0;\r
\r
const short2 loc = loc_[ptidx];\r
- \r
+\r
// Treat the center line differently, v=0\r
for (int u = threadIdx.x - half_k; u <= half_k; u += blockDim.x)\r
m_10 += u * image(loc.y, loc.x + u);\r
int v_sum = 0;\r
int m_sum = 0;\r
const int d = c_u_max[v];\r
- \r
+\r
for (int u = threadIdx.x - d; u <= d; u += blockDim.x)\r
{\r
int val_plus = image(loc.y + v, loc.x + u);\r
{\r
__device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i)\r
{\r
- pattern_x += 16 * i; \r
+ pattern_x += 16 * i;\r
pattern_y += 16 * i;\r
\r
int t0, t1, val;\r
\r
t0 = GET_VALUE(14); t1 = GET_VALUE(15);\r
val |= (t0 < t1) << 7;\r
- \r
+\r
return val;\r
}\r
};\r
{\r
__device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i)\r
{\r
- pattern_x += 12 * i; \r
+ pattern_x += 12 * i;\r
pattern_y += 12 * i;\r
- \r
+\r
int t0, t1, t2, val;\r
\r
t0 = GET_VALUE(0); t1 = GET_VALUE(1); t2 = GET_VALUE(2);\r
val = t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0);\r
- \r
+\r
t0 = GET_VALUE(3); t1 = GET_VALUE(4); t2 = GET_VALUE(5);\r
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 2;\r
- \r
+\r
t0 = GET_VALUE(6); t1 = GET_VALUE(7); t2 = GET_VALUE(8);\r
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 4;\r
- \r
+\r
t0 = GET_VALUE(9); t1 = GET_VALUE(10); t2 = GET_VALUE(11);\r
val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 6;\r
- \r
+\r
return val;\r
}\r
};\r
{\r
__device__ static int calc(const PtrStepb& img, short2 loc, const int* pattern_x, const int* pattern_y, float sina, float cosa, int i)\r
{\r
- pattern_x += 16 * i; \r
+ pattern_x += 16 * i;\r
pattern_y += 16 * i;\r
- \r
+\r
int t0, t1, t2, t3, k, val;\r
int a, b;\r
\r
if( t3 > t2 ) t2 = t3, b = 3;\r
k = t0 > t2 ? a : b;\r
val = k;\r
- \r
+\r
t0 = GET_VALUE(4); t1 = GET_VALUE(5);\r
t2 = GET_VALUE(6); t3 = GET_VALUE(7);\r
a = 0, b = 2;\r
if( t3 > t2 ) t2 = t3, b = 3;\r
k = t0 > t2 ? a : b;\r
val |= k << 2;\r
- \r
+\r
t0 = GET_VALUE(8); t1 = GET_VALUE(9);\r
t2 = GET_VALUE(10); t3 = GET_VALUE(11);\r
a = 0, b = 2;\r
if( t3 > t2 ) t2 = t3, b = 3;\r
k = t0 > t2 ? a : b;\r
val |= k << 4;\r
- \r
+\r
t0 = GET_VALUE(12); t1 = GET_VALUE(13);\r
t2 = GET_VALUE(14); t3 = GET_VALUE(15);\r
a = 0, b = 2;\r
if( t3 > t2 ) t2 = t3, b = 3;\r
k = t0 > t2 ? a : b;\r
val |= k << 6;\r
- \r
+\r
return val;\r
}\r
};\r
y[ptidx] = loc.y * scale;\r
}\r
}\r
- \r
+\r
void mergeLocation_gpu(const short2* loc, float* x, float* y, int npoints, float scale, cudaStream_t stream)\r
{\r
dim3 block(256);\r
{\r
static void call(DevMem2D_<T> src, DevMem2Df mapx, DevMem2Df mapy, DevMem2D_<T> dst, const float* borderValue, cudaStream_t stream, int)\r
{\r
- typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type; \r
+ typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;\r
\r
dim3 block(32, 8);\r
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));\r
cudaSafeCall( cudaDeviceSynchronize() ); \\r
} \\r
};\r
- \r
+\r
OPENCV_GPU_IMPLEMENT_REMAP_TEX(uchar)\r
//OPENCV_GPU_IMPLEMENT_REMAP_TEX(uchar2)\r
OPENCV_GPU_IMPLEMENT_REMAP_TEX(uchar4)\r
\r
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcher\r
{\r
- static void call(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2Df mapx, DevMem2Df mapy, \r
+ static void call(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2Df mapx, DevMem2Df mapy,\r
DevMem2D_<T> dst, const float* borderValue, cudaStream_t stream, int cc)\r
{\r
if (stream == 0)\r
}\r
};\r
\r
- template <typename T> void remap_gpu(DevMem2Db src, DevMem2Db srcWhole, int xoff, int yoff, DevMem2Df xmap, DevMem2Df ymap, \r
+ template <typename T> void remap_gpu(DevMem2Db src, DevMem2Db srcWhole, int xoff, int yoff, DevMem2Df xmap, DevMem2Df ymap,\r
DevMem2Db dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, int cc)\r
{\r
- typedef void (*caller_t)(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2Df xmap, DevMem2Df ymap, \r
+ typedef void (*caller_t)(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2Df xmap, DevMem2Df ymap,\r
DevMem2D_<T> dst, const float* borderValue, cudaStream_t stream, int cc);\r
\r
- static const caller_t callers[3][5] = \r
+ static const caller_t callers[3][5] =\r
{\r
{\r
RemapDispatcher<PointFilter, BrdReflect101, T>::call,\r
}\r
};\r
\r
- callers[interpolation][borderMode](static_cast< DevMem2D_<T> >(src), static_cast< DevMem2D_<T> >(srcWhole), xoff, yoff, xmap, ymap, \r
+ callers[interpolation][borderMode](static_cast< DevMem2D_<T> >(src), static_cast< DevMem2D_<T> >(srcWhole), xoff, yoff, xmap, ymap,\r
static_cast< DevMem2D_<T> >(dst), borderValue, stream, cc);\r
}\r
\r
}\r
};\r
\r
- template <typename T> void resize_gpu(DevMem2Db src, DevMem2Db srcWhole, int xoff, int yoff, float fx, float fy, \r
+ template <typename T> void resize_gpu(DevMem2Db src, DevMem2Db srcWhole, int xoff, int yoff, float fx, float fy,\r
DevMem2Db dst, int interpolation, cudaStream_t stream)\r
{\r
typedef void (*caller_t)(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, float fx, float fy, DevMem2D_<T> dst, cudaStream_t stream);\r
if (interpolation == 3 && (fx <= 1.f || fy <= 1.f))\r
interpolation = 1;\r
\r
- callers[interpolation](static_cast< DevMem2D_<T> >(src), static_cast< DevMem2D_<T> >(srcWhole), xoff, yoff, fx, fy, \r
+ callers[interpolation](static_cast< DevMem2D_<T> >(src), static_cast< DevMem2D_<T> >(srcWhole), xoff, yoff, fx, fy,\r
static_cast< DevMem2D_<T> >(dst), stream);\r
}\r
\r
#include "opencv2/gpu/device/border_interpolate.hpp"\r
#include "opencv2/gpu/device/static_check.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace row_filter \r
+ namespace row_filter\r
{\r
#define MAX_KERNEL_SIZE 32\r
\r
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;\r
\r
__shared__ sum_t smem[BLOCK_DIM_Y][(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_X];\r
- \r
+\r
const int y = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;\r
\r
if (y >= src.rows)\r
{\r
typedef void (*caller_t)(DevMem2D_<T> src, DevMem2D_<D> dst, int anchor, int cc, cudaStream_t stream);\r
\r
- static const caller_t callers[5][33] = \r
+ static const caller_t callers[5][33] =\r
{\r
{\r
0,\r
linearRowFilter_caller<30, T, D, BrdRowWrap>,\r
linearRowFilter_caller<31, T, D, BrdRowWrap>,\r
linearRowFilter_caller<32, T, D, BrdRowWrap>\r
- } \r
+ }\r
};\r
- \r
+\r
loadKernel(kernel, ksize);\r
\r
callers[brd_type][ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor, cc, stream);\r
#define cublasSafeCall(expr) ___cublasSafeCall(expr, __FILE__, __LINE__)\r
#endif\r
\r
-namespace cv { namespace gpu \r
+namespace cv { namespace gpu\r
{\r
void nppError(int err, const char *file, const int line, const char *func = "");\r
void ncvError(int err, const char *file, const int line, const char *func = "");\r
\r
#include "internal_shared.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace split_merge \r
+ namespace split_merge\r
{\r
template <typename T, size_t elem_size = sizeof(T)>\r
- struct TypeTraits \r
+ struct TypeTraits\r
{\r
typedef T type;\r
typedef T type2;\r
};\r
\r
template <typename T>\r
- struct TypeTraits<T, 4> \r
+ struct TypeTraits<T, 4>\r
{\r
typedef int type;\r
typedef int2 type2;\r
};\r
\r
template <typename T>\r
- struct TypeTraits<T, 8> \r
+ struct TypeTraits<T, 8>\r
{\r
typedef double type;\r
typedef double2 type2;\r
typedef void (*SplitFunction)(const DevMem2Db& src, DevMem2Db* dst, const cudaStream_t& stream);\r
\r
//------------------------------------------------------------\r
- // Merge \r
+ // Merge\r
\r
template <typename T>\r
- __global__ void mergeC2_(const uchar* src0, size_t src0_step, \r
- const uchar* src1, size_t src1_step, \r
+ __global__ void mergeC2_(const uchar* src0, size_t src0_step,\r
+ const uchar* src1, size_t src1_step,\r
int rows, int cols, uchar* dst, size_t dst_step)\r
{\r
typedef typename TypeTraits<T>::type2 dst_type;\r
const T* src1_y = (const T*)(src1 + y * src1_step);\r
dst_type* dst_y = (dst_type*)(dst + y * dst_step);\r
\r
- if (x < cols && y < rows) \r
- { \r
+ if (x < cols && y < rows)\r
+ {\r
dst_type dst_elem;\r
dst_elem.x = src0_y[x];\r
dst_elem.y = src1_y[x];\r
\r
\r
template <typename T>\r
- __global__ void mergeC3_(const uchar* src0, size_t src0_step, \r
- const uchar* src1, size_t src1_step, \r
- const uchar* src2, size_t src2_step, \r
+ __global__ void mergeC3_(const uchar* src0, size_t src0_step,\r
+ const uchar* src1, size_t src1_step,\r
+ const uchar* src2, size_t src2_step,\r
int rows, int cols, uchar* dst, size_t dst_step)\r
{\r
typedef typename TypeTraits<T>::type3 dst_type;\r
const T* src2_y = (const T*)(src2 + y * src2_step);\r
dst_type* dst_y = (dst_type*)(dst + y * dst_step);\r
\r
- if (x < cols && y < rows) \r
- { \r
+ if (x < cols && y < rows)\r
+ {\r
dst_type dst_elem;\r
dst_elem.x = src0_y[x];\r
dst_elem.y = src1_y[x];\r
\r
\r
template <>\r
- __global__ void mergeC3_<double>(const uchar* src0, size_t src0_step, \r
- const uchar* src1, size_t src1_step, \r
- const uchar* src2, size_t src2_step, \r
+ __global__ void mergeC3_<double>(const uchar* src0, size_t src0_step,\r
+ const uchar* src1, size_t src1_step,\r
+ const uchar* src2, size_t src2_step,\r
int rows, int cols, uchar* dst, size_t dst_step)\r
{\r
const int x = blockIdx.x * blockDim.x + threadIdx.x;\r
const double* src2_y = (const double*)(src2 + y * src2_step);\r
double* dst_y = (double*)(dst + y * dst_step);\r
\r
- if (x < cols && y < rows) \r
- { \r
+ if (x < cols && y < rows)\r
+ {\r
dst_y[3 * x] = src0_y[x];\r
dst_y[3 * x + 1] = src1_y[x];\r
dst_y[3 * x + 2] = src2_y[x];\r
\r
\r
template <typename T>\r
- __global__ void mergeC4_(const uchar* src0, size_t src0_step, \r
- const uchar* src1, size_t src1_step, \r
- const uchar* src2, size_t src2_step, \r
- const uchar* src3, size_t src3_step, \r
+ __global__ void mergeC4_(const uchar* src0, size_t src0_step,\r
+ const uchar* src1, size_t src1_step,\r
+ const uchar* src2, size_t src2_step,\r
+ const uchar* src3, size_t src3_step,\r
int rows, int cols, uchar* dst, size_t dst_step)\r
{\r
typedef typename TypeTraits<T>::type4 dst_type;\r
const T* src3_y = (const T*)(src3 + y * src3_step);\r
dst_type* dst_y = (dst_type*)(dst + y * dst_step);\r
\r
- if (x < cols && y < rows) \r
- { \r
+ if (x < cols && y < rows)\r
+ {\r
dst_type dst_elem;\r
dst_elem.x = src0_y[x];\r
dst_elem.y = src1_y[x];\r
\r
\r
template <>\r
- __global__ void mergeC4_<double>(const uchar* src0, size_t src0_step, \r
- const uchar* src1, size_t src1_step, \r
- const uchar* src2, size_t src2_step, \r
- const uchar* src3, size_t src3_step, \r
+ __global__ void mergeC4_<double>(const uchar* src0, size_t src0_step,\r
+ const uchar* src1, size_t src1_step,\r
+ const uchar* src2, size_t src2_step,\r
+ const uchar* src3, size_t src3_step,\r
int rows, int cols, uchar* dst, size_t dst_step)\r
{\r
const int x = blockIdx.x * blockDim.x + threadIdx.x;\r
const double* src3_y = (const double*)(src3 + y * src3_step);\r
double2* dst_y = (double2*)(dst + y * dst_step);\r
\r
- if (x < cols && y < rows) \r
- { \r
+ if (x < cols && y < rows)\r
+ {\r
dst_y[2 * x] = make_double2(src0_y[x], src1_y[x]);\r
dst_y[2 * x + 1] = make_double2(src2_y[x], src3_y[x]);\r
}\r
\r
\r
template <typename T>\r
- __global__ void splitC2_(const uchar* src, size_t src_step, \r
+ __global__ void splitC2_(const uchar* src, size_t src_step,\r
int rows, int cols,\r
uchar* dst0, size_t dst0_step,\r
uchar* dst1, size_t dst1_step)\r
T* dst0_y = (T*)(dst0 + y * dst0_step);\r
T* dst1_y = (T*)(dst1 + y * dst1_step);\r
\r
- if (x < cols && y < rows) \r
+ if (x < cols && y < rows)\r
{\r
src_type src_elem = src_y[x];\r
dst0_y[x] = src_elem.x;\r
\r
\r
template <typename T>\r
- __global__ void splitC3_(const uchar* src, size_t src_step, \r
+ __global__ void splitC3_(const uchar* src, size_t src_step,\r
int rows, int cols,\r
uchar* dst0, size_t dst0_step,\r
uchar* dst1, size_t dst1_step,\r
T* dst1_y = (T*)(dst1 + y * dst1_step);\r
T* dst2_y = (T*)(dst2 + y * dst2_step);\r
\r
- if (x < cols && y < rows) \r
+ if (x < cols && y < rows)\r
{\r
src_type src_elem = src_y[x];\r
dst0_y[x] = src_elem.x;\r
double* dst1_y = (double*)(dst1 + y * dst1_step);\r
double* dst2_y = (double*)(dst2 + y * dst2_step);\r
\r
- if (x < cols && y < rows) \r
+ if (x < cols && y < rows)\r
{\r
dst0_y[x] = src_y[3 * x];\r
dst1_y[x] = src_y[3 * x + 1];\r
T* dst2_y = (T*)(dst2 + y * dst2_step);\r
T* dst3_y = (T*)(dst3 + y * dst3_step);\r
\r
- if (x < cols && y < rows) \r
+ if (x < cols && y < rows)\r
{\r
src_type src_elem = src_y[x];\r
dst0_y[x] = src_elem.x;\r
double* dst2_y = (double*)(dst2 + y * dst2_step);\r
double* dst3_y = (double*)(dst3 + y * dst3_step);\r
\r
- if (x < cols && y < rows) \r
+ if (x < cols && y < rows)\r
{\r
double2 src_elem1 = src_y[2 * x];\r
double2 src_elem2 = src_y[2 * x + 1];\r
\r
#include "internal_shared.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace stereobm \r
+ namespace stereobm\r
{\r
//////////////////////////////////////////////////////////////////////////////////////////////////\r
/////////////////////////////////////// Stereo BM ////////////////////////////////////////////////\r
\r
template<int RADIUS>\r
__device__ unsigned int CalcSSD(volatile unsigned int *col_ssd_cache, volatile unsigned int *col_ssd)\r
- { \r
+ {\r
unsigned int cache = 0;\r
unsigned int cache2 = 0;\r
\r
prefilter_kernel<<<grid, threads, 0, stream>>>(output, prefilterCap);\r
cudaSafeCall( cudaGetLastError() );\r
\r
- if (stream == 0) \r
- cudaSafeCall( cudaDeviceSynchronize() ); \r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
\r
cudaSafeCall( cudaUnbindTexture (texForSobel ) );\r
}\r
#include "opencv2/gpu/device/saturate_cast.hpp"\r
#include "opencv2/gpu/device/limits.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace stereobp \r
+ namespace stereobp\r
{\r
///////////////////////////////////////////////////////////////\r
/////////////////////// load constants ////////////////////////\r
#include "opencv2/gpu/device/saturate_cast.hpp"\r
#include "opencv2/gpu/device/limits.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
- namespace stereocsbp \r
+ namespace stereocsbp\r
{\r
///////////////////////////////////////////////////////////////\r
/////////////////////// load constants ////////////////////////\r
__constant__ int cth;\r
\r
__constant__ size_t cimg_step;\r
- __constant__ size_t cmsg_step; \r
+ __constant__ size_t cmsg_step;\r
__constant__ size_t cdisp_step1;\r
__constant__ size_t cdisp_step2;\r
\r
get_first_k_initial_local<<<grid, threads, 0, stream>>> (data_cost_selected, disp_selected_pyr, h, w, nr_plane);\r
else\r
get_first_k_initial_global<<<grid, threads, 0, stream>>>(data_cost_selected, disp_selected_pyr, h, w, nr_plane);\r
- \r
+\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) );\r
cudaSafeCall( cudaMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) );\r
cudaSafeCall( cudaMemcpyToSymbol(cmsg_step, &msg_step, sizeof(size_t)) );\r
- \r
+\r
callers[level](disp_selected_pyr, data_cost, rows, cols, h, w, level, nr_plane, channels, stream);\r
cudaSafeCall( cudaGetLastError() );\r
\r
\r
template void compute_data_cost(const float* disp_selected_pyr, float* data_cost, size_t msg_step,\r
int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream);\r
- \r
+\r
\r
///////////////////////////////////////////////////////////////\r
//////////////////////// init message /////////////////////////\r
///////////////////////////////////////////////////////////////\r
\r
- \r
+\r
template <typename T>\r
__device__ void get_first_k_element_increase(T* u_new, T* d_new, T* l_new, T* r_new,\r
const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur,\r
cudaSafeCall( cudaMemcpyToSymbol(cdisp_step1, &disp_step1, sizeof(size_t)) );\r
cudaSafeCall( cudaMemcpyToSymbol(cdisp_step2, &disp_step2, sizeof(size_t)) );\r
cudaSafeCall( cudaMemcpyToSymbol(cmsg_step, &msg_step, sizeof(size_t)) );\r
- \r
+\r
dim3 threads(32, 8, 1);\r
dim3 grid(1, 1, 1);\r
\r
const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur,\r
float* selected_disp_pyr_new, const float* selected_disp_pyr_cur,\r
float* data_cost_selected, const float* data_cost, size_t msg_step,\r
- int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream); \r
+ int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream);\r
\r
///////////////////////////////////////////////////////////////\r
//////////////////// calc all iterations /////////////////////\r
for(int t = 0; t < iters; ++t)\r
{\r
compute_message<<<grid, threads, 0, stream>>>(u, d, l, r, data_cost_selected, selected_disp_pyr_cur, h, w, nr_plane, t & 1);\r
- cudaSafeCall( cudaGetLastError() ); \r
+ cudaSafeCall( cudaGetLastError() );\r
}\r
if (stream == 0)\r
cudaSafeCall( cudaDeviceSynchronize() );\r
template void calc_all_iterations(short* u, short* d, short* l, short* r, const short* data_cost_selected, const short* selected_disp_pyr_cur, size_t msg_step,\r
int h, int w, int nr_plane, int iters, cudaStream_t stream);\r
\r
- template void calc_all_iterations(float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step, \r
+ template void calc_all_iterations(float* u, float* d, float* l, float* r, const float* data_cost_selected, const float* selected_disp_pyr_cur, size_t msg_step,\r
int h, int w, int nr_plane, int iters, cudaStream_t stream);\r
\r
\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
- template void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step, \r
+ template void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step,\r
const DevMem2D_<short>& disp, int nr_plane, cudaStream_t stream);\r
\r
template void compute_disp(const float* u, const float* d, const float* l, const float* r, const float* data_cost_selected, const float* disp_selected, size_t msg_step,\r
{\r
dim3 block(32, 8);\r
dim3 grid(divUp(xmap.cols, block.x), divUp(xmap.rows, block.y));\r
- \r
+\r
buildWarpMaps<Transform><<<grid, block, 0, stream>>>(xmap, ymap);\r
cudaSafeCall( cudaGetLastError() );\r
\r
{\r
static void call(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2D_<T> dst, const float* borderValue, int)\r
{\r
- typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type; \r
+ typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;\r
\r
dim3 block(32, 8);\r
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));\r
#undef OPENCV_GPU_IMPLEMENT_WARP_TEX\r
\r
template <class Transform, template <typename> class Filter, template <typename> class B, typename T> struct WarpDispatcher\r
- { \r
+ {\r
static void call(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2D_<T> dst, const float* borderValue, cudaStream_t stream, int cc)\r
{\r
if (stream == 0)\r
}\r
};\r
\r
- template <class Transform, typename T> \r
+ template <class Transform, typename T>\r
void warp_caller(DevMem2Db src, DevMem2Db srcWhole, int xoff, int yoff, DevMem2Db dst, int interpolation,\r
int borderMode, const float* borderValue, cudaStream_t stream, int cc)\r
{\r
\r
using namespace ::cv::gpu::device;\r
\r
-cv::gpu::HOGDescriptor::HOGDescriptor(Size win_size, Size block_size, Size block_stride, Size cell_size, \r
- int nbins, double win_sigma, double threshold_L2hys, bool gamma_correction, int nlevels)\r
- : win_size(win_size), \r
- block_size(block_size), \r
- block_stride(block_stride), \r
- cell_size(cell_size),\r
- nbins(nbins), \r
- win_sigma(win_sigma),\r
- threshold_L2hys(threshold_L2hys),\r
- gamma_correction(gamma_correction),\r
- nlevels(nlevels)\r
+cv::gpu::HOGDescriptor::HOGDescriptor(Size win_size_, Size block_size_, Size block_stride_, Size cell_size_,\r
+ int nbins_, double win_sigma_, double threshold_L2hys_, bool gamma_correction_, int nlevels_)\r
+ : win_size(win_size_),\r
+ block_size(block_size_),\r
+ block_stride(block_stride_),\r
+ cell_size(cell_size_),\r
+ nbins(nbins_),\r
+ win_sigma(win_sigma_),\r
+ threshold_L2hys(threshold_L2hys_),\r
+ gamma_correction(gamma_correction_),\r
+ nlevels(nlevels_)\r
{\r
CV_Assert((win_size.width - block_size.width ) % block_stride.width == 0 && \r
(win_size.height - block_size.height) % block_stride.height == 0);\r
return detector_size == 0 || detector_size == descriptor_size || detector_size == descriptor_size + 1;\r
}\r
\r
-void cv::gpu::HOGDescriptor::setSVMDetector(const vector<float>& detector)\r
+void cv::gpu::HOGDescriptor::setSVMDetector(const vector<float>& _detector)\r
{\r
- std::vector<float> detector_reordered(detector.size());\r
+ std::vector<float> detector_reordered(_detector.size());\r
\r
size_t block_hist_size = getBlockHistogramSize();\r
cv::Size blocks_per_img = numPartsWithin(win_size, block_size, block_stride);\r
for (int i = 0; i < blocks_per_img.height; ++i)\r
for (int j = 0; j < blocks_per_img.width; ++j)\r
{\r
- const float* src = &detector[0] + (j * blocks_per_img.height + i) * block_hist_size;\r
+ const float* src = &_detector[0] + (j * blocks_per_img.height + i) * block_hist_size;\r
float* dst = &detector_reordered[0] + (i * blocks_per_img.width + j) * block_hist_size;\r
for (size_t k = 0; k < block_hist_size; ++k)\r
dst[k] = src[k];\r
this->detector.upload(Mat(detector_reordered).reshape(1, 1));\r
\r
size_t descriptor_size = getDescriptorSize(); \r
- free_coef = detector.size() > descriptor_size ? detector[descriptor_size] : 0;\r
+ free_coef = _detector.size() > descriptor_size ? _detector[descriptor_size] : 0;\r
\r
CV_Assert(checkDetectorSize());\r
}\r
}\r
\r
\r
-void cv::gpu::HOGDescriptor::computeGradient(const GpuMat& img, GpuMat& grad, GpuMat& qangle)\r
+void cv::gpu::HOGDescriptor::computeGradient(const GpuMat& img, GpuMat& _grad, GpuMat& _qangle)\r
{\r
CV_Assert(img.type() == CV_8UC1 || img.type() == CV_8UC4);\r
\r
// grad.create(img.size(), CV_32FC2);\r
- grad = getBuffer(img.size(), CV_32FC2, grad_buf); \r
+ _grad = getBuffer(img.size(), CV_32FC2, grad_buf);\r
\r
// qangle.create(img.size(), CV_8UC2);\r
- qangle = getBuffer(img.size(), CV_8UC2, qangle_buf); \r
+ _qangle = getBuffer(img.size(), CV_8UC2, qangle_buf);\r
\r
float angleScale = (float)(nbins / CV_PI);\r
switch (img.type()) \r
{\r
case CV_8UC1:\r
- hog::compute_gradients_8UC1(nbins, img.rows, img.cols, img, angleScale, grad, qangle, gamma_correction);\r
+ hog::compute_gradients_8UC1(nbins, img.rows, img.cols, img, angleScale, _grad, _qangle, gamma_correction);\r
break;\r
case CV_8UC4:\r
- hog::compute_gradients_8UC4(nbins, img.rows, img.cols, img, angleScale, grad, qangle, gamma_correction);\r
+ hog::compute_gradients_8UC4(nbins, img.rows, img.cols, img, angleScale, _grad, _qangle, gamma_correction);\r
break;\r
}\r
}\r
\r
for (size_t i = 0; i < level_scale.size(); i++)\r
{\r
- double scale = level_scale[i];\r
- Size sz(cvRound(img.cols / scale), cvRound(img.rows / scale));\r
+ double _scale = level_scale[i];\r
+ Size sz(cvRound(img.cols / _scale), cvRound(img.rows / _scale));\r
GpuMat smaller_img;\r
\r
if (sz == img.size())\r
struct GraphEdge\r
{\r
GraphEdge() {}\r
- GraphEdge(int to, int next, const T& val) : to(to), next(next), val(val) {}\r
+ GraphEdge(int to_, int next_, const T& val_) : to(to_), next(next_), val(val_) {}\r
int to;\r
int next;\r
T val;\r
struct SegmLinkVal\r
{\r
SegmLinkVal() {}\r
- SegmLinkVal(int dr, int dsp) : dr(dr), dsp(dsp) {}\r
+ SegmLinkVal(int dr_, int dsp_) : dr(dr_), dsp(dsp_) {}\r
bool operator <(const SegmLinkVal& other) const\r
{\r
return dr + dsp < other.dr + other.dsp;\r
struct SegmLink\r
{\r
SegmLink() {}\r
- SegmLink(int from, int to, const SegmLinkVal& val)\r
- : from(from), to(to), val(val) {}\r
+ SegmLink(int from_, int to_, const SegmLinkVal& val_)\r
+ : from(from_), to(to_), val(val_) {}\r
bool operator <(const SegmLink& other) const\r
{\r
return val < other.val;\r
\r
\r
template <typename T>\r
-Graph<T>::Graph(int numv, int nume_max) : start(numv, -1), edges(nume_max)\r
+Graph<T>::Graph(int numv_, int nume_max_) : start(numv_, -1), edges(nume_max_)\r
{\r
- this->numv = numv;\r
- this->nume_max = nume_max;\r
+ this->numv = numv_;\r
+ this->nume_max = nume_max_;\r
nume = 0;\r
}\r
\r
#ifndef OPENCV_GPU_WARP_REDUCE_HPP__\r
#define OPENCV_GPU_WARP_REDUCE_HPP__\r
\r
-namespace cv { namespace gpu { namespace device \r
-{ \r
- template <class T> \r
+namespace cv { namespace gpu { namespace device\r
+{\r
+ template <class T>\r
__device__ __forceinline__ T warp_reduce(volatile T *ptr , const unsigned int tid = threadIdx.x)\r
{\r
const unsigned int lane = tid & 31; // index of thread in warp (0..31)\r
- \r
- if (lane < 16)\r
- { \r
- T partial = ptr[tid];\r
\r
- ptr[tid] = partial = partial + ptr[tid + 16];\r
- ptr[tid] = partial = partial + ptr[tid + 8];\r
- ptr[tid] = partial = partial + ptr[tid + 4];\r
- ptr[tid] = partial = partial + ptr[tid + 2];\r
- ptr[tid] = partial = partial + ptr[tid + 1]; \r
- }\r
+ if (lane < 16)\r
+ {\r
+ T partial = ptr[tid];\r
\r
- return ptr[tid - lane];\r
+ ptr[tid] = partial = partial + ptr[tid + 16];\r
+ ptr[tid] = partial = partial + ptr[tid + 8];\r
+ ptr[tid] = partial = partial + ptr[tid + 4];\r
+ ptr[tid] = partial = partial + ptr[tid + 2];\r
+ ptr[tid] = partial = partial + ptr[tid + 1];\r
+ }\r
+\r
+ return ptr[tid - lane];\r
}\r
}}} // namespace cv { namespace gpu { namespace device {\r
\r
{\r
cols_pyr[i] = cols_pyr[i-1] / 2;\r
rows_pyr[i] = rows_pyr[i-1] / 2;\r
- nr_plane_pyr[i] = nr_plane_pyr[i-1] * 2; \r
- } \r
+ nr_plane_pyr[i] = nr_plane_pyr[i-1] * 2;\r
+ }\r
\r
\r
GpuMat u[2], d[2], l[2], r[2], disp_selected_pyr[2], data_cost, data_cost_selected;\r
GpuMat sub2 = sub1.rowRange((k+0)*sub1.rows/2, (k+1)*sub1.rows/2);\r
\r
GpuMat *buf_ptrs[] = { &u[k], &d[k], &l[k], &r[k], &disp_selected_pyr[k] }; \r
- for(int r = 0; r < 5; ++r) \r
+ for(int _r = 0; _r < 5; ++_r)\r
{\r
- *buf_ptrs[r] = sub2.rowRange(r * sub2.rows/5, (r+1) * sub2.rows/5);\r
- assert(buf_ptrs[r]->cols == cols && buf_ptrs[r]->rows == rows * rthis.nr_plane);\r
+ *buf_ptrs[_r] = sub2.rowRange(_r * sub2.rows/5, (_r+1) * sub2.rows/5);\r
+ assert(buf_ptrs[_r]->cols == cols && buf_ptrs[_r]->rows == rows * rthis.nr_plane);\r
}\r
};\r
\r
- size_t elem_step = mbuf.step / sizeof(T); \r
+ size_t elem_step = mbuf.step / sizeof(T);\r
\r
Size temp_size = data_cost.size();\r
if ((size_t)temp_size.area() < elem_step * rows_pyr[levels - 1] * rthis.ndisp) \r
.. ocv:pyfunction:: cv2.SVM.train_auto(trainData, responses, varIdx, sampleIdx, params[, k_fold[, Cgrid[, gammaGrid[, pGrid[, nuGrid[, coeffGrid[, degreeGrid[, balanced]]]]]]]]) -> retval
- :param k_fold: Cross-validation parameter. The training set is divided into ``k_fold`` subsets. One subset is used to train the model, the others form the test set. So, the SVM algorithm is executed ``k_fold`` times.
+ :param k_fold: Cross-validation parameter. The training set is divided into ``k_fold`` subsets. One subset is used to test the model, the others form the train set. So, the SVM algorithm is executed ``k_fold`` times.
:param \*Grid: Iteration grid for the corresponding SVM parameter.
-android update project --target android-11 --library ../../OpenCV-2.4.0/ --name "Sample - 15-puzzle" --path ./15-puzzle
-android update project --target android-11 --library ../../OpenCV-2.4.0/ --name "Sample - face-detection" --path ./face-detection
-android update project --target android-11 --library ../../OpenCV-2.4.0/ --name "Sample - image-manipulations" --path ./image-manipulations
-android update project --target android-11 --name "Tutorial 0 (Basic) - Android Camera" --path ./tutorial-0-androidcamera
-android update project --target android-11 --library ../../OpenCV-2.4.0/ --name "Tutorial 1 (Basic) - Add OpenCV" --path ./tutorial-1-addopencv
-android update project --target android-11 --library ../../OpenCV-2.4.0/ --name "Tutorial 2 (Basic) - Use OpenCV Camera" --path ./tutorial-2-opencvcamera
-android update project --target android-11 --name "Tutorial 3 (Advanced) - Add Native OpenCV" --path ./tutorial-3-native
-android update project --target android-11 --library ../../OpenCV-2.4.0/ --name "Tutorial 4 (Advanced) - Mix Java+Native OpenCV" --path ./tutorial-4-mixed
\ No newline at end of file
+call android update project --target android-11 --library ../../OpenCV-2.4.1/ --name "Sample - 15-puzzle" --path ./15-puzzle
+call android update project --target android-11 --library ../../OpenCV-2.4.1/ --name "Sample - face-detection" --path ./face-detection
+call android update project --target android-11 --library ../../OpenCV-2.4.1/ --name "Sample - image-manipulations" --path ./image-manipulations
+call android update project --target android-11 --name "Tutorial 0 (Basic) - Android Camera" --path ./tutorial-0-androidcamera
+call android update project --target android-11 --library ../../OpenCV-2.4.1/ --name "Tutorial 1 (Basic) - Add OpenCV" --path ./tutorial-1-addopencv
+call android update project --target android-11 --library ../../OpenCV-2.4.1/ --name "Tutorial 2 (Basic) - Use OpenCV Camera" --path ./tutorial-2-opencvcamera
+call android update project --target android-11 --name "Tutorial 3 (Advanced) - Add Native OpenCV" --path ./tutorial-3-native
+call android update project --target android-11 --library ../../OpenCV-2.4.1/ --name "Tutorial 4 (Advanced) - Mix Java+Native OpenCV" --path ./tutorial-4-mixed
\ No newline at end of file
minGradMagnitudes[3] = 1;
const float minDepth = 0.f; //in meters
- const float maxDepth = 3.f; //in meters
+ const float maxDepth = 4.f; //in meters
const float maxDepthDiff = 0.07f; //in meters
tm.start();
--- /dev/null
+'''\r
+K-means clusterization sample.\r
+Usage:\r
+ kmeans.py\r
+\r
+Keyboard shortcuts:\r
+ ESC - exit\r
+ space - generate new distribution\r
+'''\r
+\r
+import numpy as np\r
+import cv2\r
+\r
+from gaussian_mix import make_gaussians\r
+\r
+if __name__ == '__main__':\r
+ cluster_n = 5\r
+ img_size = 512\r
+\r
+ print __doc__\r
+\r
+ # generating bright palette\r
+ colors = np.zeros((1, cluster_n, 3), np.uint8)\r
+ colors[0,:] = 255\r
+ colors[0,:,0] = np.arange(0, 180, 180.0/cluster_n)\r
+ colors = cv2.cvtColor(colors, cv2.COLOR_HSV2BGR)[0]\r
+\r
+ while True:\r
+ print 'sampling distributions...'\r
+ points, _ = make_gaussians(cluster_n, img_size)\r
+\r
+ term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1)\r
+ ret, labels, centers = cv2.kmeans(points, cluster_n, term_crit, 10, 0)\r
+\r
+ img = np.zeros((img_size, img_size, 3), np.uint8)\r
+ for (x, y), label in zip(np.int32(points), labels.ravel()):\r
+ c = map(int, colors[label])\r
+ cv2.circle(img, (x, y), 1, c, -1)\r
+\r
+ cv2.imshow('gaussian mixture', img)\r
+ ch = 0xFF & cv2.waitKey(0)\r
+ if ch == 27:\r
+ break\r
+ cv2.destroyAllWindows() \r