-# Binaries branch name: ffmpeg/3.4_20180608
-# Binaries were created for OpenCV: f5ddbbf65937d8f44e481e4ee1082961821f5c62
-ocv_update(FFMPEG_BINARIES_COMMIT "8041bd6f5ad37045c258904ba3030bb3442e3911")
-ocv_update(FFMPEG_FILE_HASH_BIN32 "fa5a2a4e2f37defcb95bde8ed145c2b3")
-ocv_update(FFMPEG_FILE_HASH_BIN64 "2cc08fc4fef8199fe80e0f126684834f")
+# Binaries branch name: ffmpeg/master_20180220
+# Binaries were created for OpenCV: 9819ebc0954c2df62943ebbd5936d325e5dc89e1
+ocv_update(FFMPEG_BINARIES_COMMIT "0a0e88972a7ea97708378d0488a65f83e7cc5e69")
+ocv_update(FFMPEG_FILE_HASH_BIN32 "b8120c07962d591e2e9071a1bf566fd0")
+ocv_update(FFMPEG_FILE_HASH_BIN64 "dc9c50e7b05482acc25d6ce0ac61bf1d")
ocv_update(FFMPEG_FILE_HASH_CMAKE "3b90f67f4b429e77d3da36698cef700c")
function(download_win_ffmpeg script_var)
# ----------------------------------------------------------------------------
# Detect compiler and target platform architecture
# ----------------------------------------------------------------------------
-OCV_OPTION(ENABLE_CXX11 "Enable C++11 compilation mode" "${OPENCV_CXX11}")
include(cmake/OpenCVDetectCXXCompiler.cmake)
ocv_cmake_hook(POST_DETECT_COMPILER)
OCV_OPTION(WITH_QTKIT "Use QTKit Video I/O backend" OFF IF APPLE )
OCV_OPTION(WITH_TBB "Include Intel TBB support" OFF IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_OPENMP "Include OpenMP support" OFF)
-OCV_OPTION(WITH_CSTRIPES "Include C= support" OFF IF (WIN32 AND NOT WINRT) )
OCV_OPTION(WITH_PTHREADS_PF "Use pthreads-based parallel_for" ON IF (NOT WIN32 OR MINGW) )
OCV_OPTION(WITH_TIFF "Include TIFF support" ON IF (NOT IOS) )
OCV_OPTION(WITH_UNICAP "Include Unicap support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_OPENCLAMDBLAS "Include AMD OpenCL BLAS library support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_DIRECTX "Include DirectX support" ON IF (WIN32 AND NOT WINRT) )
OCV_OPTION(WITH_INTELPERC "Include Intel Perceptual Computing support" OFF IF (WIN32 AND NOT WINRT) )
+OCV_OPTION(WITH_LIBREALSENSE "Include Intel librealsense support" OFF IF (NOT WITH_INTELPERC) )
OCV_OPTION(WITH_MATLAB "Include Matlab support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT))
OCV_OPTION(WITH_VA "Include VA support" OFF IF (UNIX AND NOT ANDROID) )
OCV_OPTION(WITH_VA_INTEL "Include Intel VA-API/OpenCL support" OFF IF (UNIX AND NOT ANDROID) )
status("")
status(" C/C++:")
status(" Built as dynamic libs?:" BUILD_SHARED_LIBS THEN YES ELSE NO)
-if(ENABLE_CXX11 OR HAVE_CXX11)
-status(" C++11:" HAVE_CXX11 THEN YES ELSE NO)
-endif()
status(" C++ Compiler:" ${OPENCV_COMPILER_STR})
status(" C++ flags (Release):" ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE})
status(" C++ flags (Debug):" ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG})
# Order is similar to CV_PARALLEL_FRAMEWORK in core/src/parallel.cpp
ocv_build_features_string(parallel_status EXCLUSIVE
IF HAVE_TBB THEN "TBB (ver ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR} interface ${TBB_INTERFACE_VERSION})"
- IF HAVE_CSTRIPES THEN "C="
IF HAVE_OPENMP THEN "OpenMP"
IF HAVE_GCD THEN "GCD"
IF WINRT OR HAVE_CONCURRENCY THEN "Concurrency"
+++ /dev/null
-if(WIN32)
- find_path( CSTRIPES_LIB_DIR
- NAMES "C=.lib"
- DOC "The path to C= lib and dll")
- if(CSTRIPES_LIB_DIR)
- ocv_include_directories("${CSTRIPES_LIB_DIR}/..")
- link_directories("${CSTRIPES_LIB_DIR}")
- set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} "C=")
- set(HAVE_CSTRIPES 1)
- endif()
-endif()
endif()
if(UNIX OR APPLE)
- set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} -Xcompiler -fPIC)
+ set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} -Xcompiler -fPIC --std=c++11)
endif()
if(APPLE)
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} -Xcompiler -fno-finite-math-only)
endforeach()
endif()
-if(ENABLE_CXX11)
- #cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
- set(CMAKE_CXX_STANDARD 11)
- set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
- set(CMAKE_CXX_EXTENSIONS OFF) # use -std=c++11 instead of -std=gnu++11
- if(CMAKE_CXX11_COMPILE_FEATURES)
- set(HAVE_CXX11 ON)
- endif()
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
+set(CMAKE_CXX_EXTENSIONS OFF) # use -std=c++11 instead of -std=gnu++11
+if(CMAKE_CXX11_COMPILE_FEATURES)
+ set(HAVE_CXX11 ON)
endif()
if(NOT HAVE_CXX11)
ocv_check_compiler_flag(CXX "" HAVE_CXX11 "${OpenCV_SOURCE_DIR}/cmake/checks/cxx11.cpp")
endif()
endif()
endif()
+if(NOT HAVE_CXX11)
+ message(FATAL_ERROR "OpenCV 4.x requires C++11")
+endif()
return()
endmacro()
-if(NOT HAVE_CXX11)
- message(WARNING "DL Inference engine requires C++11. You can turn it on via ENABLE_CXX11=ON CMake flag.")
- ie_fail()
-endif()
find_package(InferenceEngine QUIET)
if(InferenceEngine_FOUND)
endif()
endif()
+try_compile(VTK_COMPILE_STATUS
+ "${OpenCV_BINARY_DIR}"
+ "${OpenCV_SOURCE_DIR}/cmake/checks/vtk_test.cpp"
+ CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${VTK_INCLUDE_DIRS}"
+ LINK_LIBRARIES ${VTK_LIBRARIES}
+ OUTPUT_VARIABLE OUTPUT
+)
+
+if(NOT ${VTK_COMPILE_STATUS})
+ message(STATUS "VTK support is disabled. Compilation of the sample code has failed.")
+ return()
+endif()
+
set(HAVE_VTK ON)
message(STATUS "Found VTK ${VTK_VERSION} (${VTK_USE_FILE})")
--- /dev/null
+# Main variables:
+# LIBREALSENSE_LIBRARIES and LIBREALSENSE_INCLUDE to link Intel librealsense modules
+# HAVE_LIBREALSENSE for conditional compilation OpenCV with/without librealsense
+
+find_path(LIBREALSENSE_INCLUDE_DIR "librealsense2/rs.hpp" PATHS "$ENV{LIBREALSENSE_INCLUDE}" DOC "Path to librealsense interface headers")
+find_library(LIBREALSENSE_LIBRARIES "realsense2" PATHS "$ENV{LIBREALSENSE_LIB}" DOC "Path to librealsense interface libraries")
+
+if(LIBREALSENSE_INCLUDE_DIR AND LIBREALSENSE_LIBRARIES)
+ set(HAVE_LIBREALSENSE TRUE)
+else()
+ set(HAVE_LIBREALSENSE FALSE)
+ message( WARNING, " librealsense include directory (set by LIBREALSENSE_INCLUDE_DIR variable) is not found or does not have librealsense include files." )
+endif() #if(LIBREALSENSE_INCLUDE_DIR AND LIBREALSENSE_LIBRARIES)
+
+mark_as_advanced(FORCE LIBREALSENSE_LIBRARIES LIBREALSENSE_INCLUDE_DIR)
\ No newline at end of file
endif()
endif(WITH_CLP)
-# --- C= ---
-if(WITH_CSTRIPES AND NOT HAVE_TBB)
- include("${OpenCV_SOURCE_DIR}/cmake/OpenCVDetectCStripes.cmake")
-else()
- set(HAVE_CSTRIPES 0)
-endif()
-
# --- GCD ---
-if(APPLE AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES)
+if(APPLE AND NOT HAVE_TBB)
set(HAVE_GCD 1)
else()
set(HAVE_GCD 0)
endif()
# --- Concurrency ---
-if(MSVC AND NOT HAVE_TBB AND NOT HAVE_CSTRIPES)
+if(MSVC AND NOT HAVE_TBB)
set(_fname "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/concurrencytest.cpp")
file(WRITE "${_fname}" "#if _MSC_VER < 1600\n#error\n#endif\nint main() { return 0; }\n")
try_compile(HAVE_CONCURRENCY "${CMAKE_BINARY_DIR}" "${_fname}")
endif()
endif(APPLE)
+# --- Intel librealsense ---
+if(WITH_LIBREALSENSE)
+ include("${OpenCV_SOURCE_DIR}/cmake/OpenCVFindLibRealsense.cmake")
+endif(WITH_LIBREALSENSE)
+
# --- Intel Perceptual Computing SDK ---
if(WITH_INTELPERC)
include("${OpenCV_SOURCE_DIR}/cmake/OpenCVFindIntelPerCSDK.cmake")
string(REPLACE "opencv_" "" OPENCV_MODULES_CONFIGMAKE "${OPENCV_MODULES_CONFIGMAKE}")
if(BUILD_FAT_JAVA_LIB)
- set(OPENCV_LIBS_CONFIGMAKE java3)
+ set(OPENCV_LIBS_CONFIGMAKE java4)
else()
set(OPENCV_LIBS_CONFIGMAKE "${OPENCV_MODULES_CONFIGMAKE}")
endif()
-set(MIN_VER_CMAKE 2.8.12.2)
+set(MIN_VER_CMAKE 3.5.1)
set(MIN_VER_CUDA 6.5)
set(MIN_VER_PYTHON2 2.6)
set(MIN_VER_PYTHON3 3.2)
--- /dev/null
+#include <vtkSmartPointer.h>
+#include <vtkTransform.h>
+#include <vtkMath.h>
+
+int main()
+{
+ vtkSmartPointer<vtkTransform> transform = vtkSmartPointer<vtkTransform>::New();
+ return 0;
+}
/* Cocoa API */
#cmakedefine HAVE_COCOA
-/* C= */
-#cmakedefine HAVE_CSTRIPES
-
/* NVIDIA CUDA Basic Linear Algebra Subprograms (BLAS) API*/
#cmakedefine HAVE_CUBLAS
/* OpenNI library */
#cmakedefine HAVE_OPENNI2
+/* librealsense library */
+#cmakedefine HAVE_LIBREALSENSE
+
/* PNG codec */
#cmakedefine HAVE_PNG
Finally, as in the previous case, we start by splitting our big dataset into individual cells. For
every digit, 250 cells are reserved for training data and remaining 250 data is reserved for
-testing. Full code is given below, you also can download it from [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py):
+testing. Full code is given below, you also can download it from [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py):
@include samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py
You may also find the source code in the `samples/cpp/tutorial_code/calib3d/camera_calibration/`
folder of the OpenCV source library or [download it from here
-](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp). The program has a
+](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp). The program has a
single argument: the name of its configuration file. If none is given then it will try to open the
one named "default.xml". [Here's a sample configuration file
-](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml) in XML format. In the
+](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml) in XML format. In the
configuration file you may choose to use camera as an input, a video file or an image list. If you
opt for the last one, you will need to create a configuration file where you enumerate the images to
-use. Here's [an example of this ](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml).
+use. Here's [an example of this ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml).
The important part to remember is that the images need to be specified using the absolute path or
the relative one from your application's working directory. You may find all this in the samples
directory mentioned above.
@add_toggle_cpp
Download the source code from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp).
@include cpp/tutorial_code/core/AddingImages/AddingImages.cpp
@end_toggle
@add_toggle_java
Download the source code from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/core/AddingImages/AddingImages.java).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/AddingImages/AddingImages.java).
@include java/tutorial_code/core/AddingImages/AddingImages.java
@end_toggle
@add_toggle_python
Download the source code from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/core/AddingImages/adding_images.py).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/AddingImages/adding_images.py).
@include python/tutorial_code/core/AddingImages/adding_images.py
@end_toggle
@snippet python/tutorial_code/core/AddingImages/adding_images.py load
@end_toggle
-We used the following images: [LinuxLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/LinuxLogo.jpg) and [WindowsLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/WindowsLogo.jpg)
+We used the following images: [LinuxLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/LinuxLogo.jpg) and [WindowsLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/WindowsLogo.jpg)
@warning Since we are *adding* *src1* and *src2*, they both have to be of the same size
(width and height) and type.
@add_toggle_cpp
- This code is in your OpenCV sample folder. Otherwise you can grab it from
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp)
@include samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp
@end_toggle
@add_toggle_java
- This code is in your OpenCV sample folder. Otherwise you can grab it from
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java)
@include samples/java/tutorial_code/core/BasicGeometricDrawing/BasicGeometricDrawing.java
@end_toggle
@add_toggle_python
- This code is in your OpenCV sample folder. Otherwise you can grab it from
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py)
@include samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py
@end_toggle
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp)
- The following code performs the operation \f$g(i,j) = \alpha \cdot f(i,j) + \beta\f$ :
@include samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/BasicLinearTransformsDemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/BasicLinearTransformsDemo.java)
- The following code performs the operation \f$g(i,j) = \alpha \cdot f(i,j) + \beta\f$ :
@include samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/BasicLinearTransformsDemo.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py)
- The following code performs the operation \f$g(i,j) = \alpha \cdot f(i,j) + \beta\f$ :
@include samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py
### Code
@add_toggle_cpp
-Code for the tutorial is [here](https://github.com/opencv/opencv/blob/3.4/samples/cpp/tutorial_code/ImgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.cpp).
+Code for the tutorial is [here](https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/ImgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.cpp).
@end_toggle
@add_toggle_java
-Code for the tutorial is [here](https://github.com/opencv/opencv/blob/3.4/samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/ChangingContrastBrightnessImageDemo.java).
+Code for the tutorial is [here](https://github.com/opencv/opencv/blob/master/samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/ChangingContrastBrightnessImageDemo.java).
@end_toggle
@add_toggle_python
-Code for the tutorial is [here](https://github.com/opencv/opencv/blob/3.4/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py).
+Code for the tutorial is [here](https://github.com/opencv/opencv/blob/master/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py).
@end_toggle
Code for the gamma correction:
@add_toggle_cpp
You can [download this from here
-](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp) or
+](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp) or
find it in the
`samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp` of the
OpenCV source code library.
@add_toggle_java
You can [download this from here
-](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java) or
+](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java) or
find it in the
`samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java` of the
OpenCV source code library.
@add_toggle_python
You can [download this from here
-](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py) or
+](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py) or
find it in the
`samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py` of the
OpenCV source code library.
example, let us find out if a text is horizontal or not? Looking at some text you'll notice that the
text lines sort of form also horizontal lines and the letters form sort of vertical lines. These two
main components of a text snippet may be also seen in case of the Fourier transform. Let us use
-[this horizontal ](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/imageTextN.png) and [this rotated](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/imageTextR.png)
+[this horizontal ](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/imageTextN.png) and [this rotated](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/imageTextR.png)
image about a text.
In case of the horizontal text:
-----------
You can [download this from here
-](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp) or find it in the
+](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp) or find it in the
`samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp` of the OpenCV source code
library.
will make the scanning for each image using all of these methods, and print out how long it took.
You can download the full source code [here
-](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp) or look it up in
+](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp) or look it up in
the samples directory of OpenCV at the cpp tutorial code for the core section. Its basic usage is:
@code{.bash}
how_to_scan_images imageName.jpg intValueToReduce [G]
The goal of this tutorial is to show you how to use the OpenCV `parallel_for_` framework to easily
parallelize your code. To illustrate the concept, we will write a program to draw a Mandelbrot set
exploiting almost all the CPU load available.
-The full tutorial code is [here](https://github.com/opencv/opencv/blob/3.4/samples/cpp/tutorial_code/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.cpp).
+The full tutorial code is [here](https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.cpp).
If you want more information about multithreading, you will have to refer to a reference book or course as this tutorial is intended
to remain simple.
Results
-----------
-You can find the full tutorial code [here](https://github.com/opencv/opencv/blob/3.4/samples/cpp/tutorial_code/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.cpp).
+You can find the full tutorial code [here](https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.cpp).
The performance of the parallel implementation depends of the type of CPU you have. For instance, on 4 cores / 8 threads
CPU, you can expect a speed-up of around 6.9X. There are many factors to explain why we do not achieve a speed-up of almost 8X.
Main reasons should be mostly due to:
You may also find the source code in the
`samples/cpp/tutorial_code/core/ippasync/ippasync_sample.cpp` file of the OpenCV source library or
-download it from [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/core/ippasync/ippasync_sample.cpp).
+download it from [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/ippasync/ippasync_sample.cpp).
@include cpp/tutorial_code/core/ippasync/ippasync_sample.cpp
A case study
------------
-Now that you have the basics done [here's](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp)
+Now that you have the basics done [here's](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp)
an example that mixes the usage of the C interface with the C++ one. You will also find it in the
sample directory of the OpenCV source code library at the
`samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp` .
You may observe a runtime instance of this on the [YouTube
here](https://www.youtube.com/watch?v=qckm-zvo31w) and you can [download the source code from here
-](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp)
+](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp)
or find it in the
`samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp`
of the OpenCV source code library.
@add_toggle_cpp
You can download this source code from [here
-](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp) or look in the
+](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp) or look in the
OpenCV source code libraries sample directory at
`samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp`.
@include samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp
@add_toggle_java
You can download this source code from [here
-](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java) or look in the
+](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java) or look in the
OpenCV source code libraries sample directory at
`samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java`.
@include samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java
@add_toggle_python
You can download this source code from [here
-](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py) or look in the
+](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py) or look in the
OpenCV source code libraries sample directory at
`samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py`.
@include samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py
![](images/MatBasicContainerOut15.png)
Most of the samples here have been included in a small console application. You can download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp)
or in the core section of the cpp samples.
You can also find a quick video demonstration of this on
@snippet dnn/edge_detection.py Register
That's it! We've replaced an implemented OpenCV's layer to a custom one.
-You may find a full script in the [source code](https://github.com/opencv/opencv/tree/3.4/samples/dnn/edge_detection.py).
+You may find a full script in the [source code](https://github.com/opencv/opencv/tree/master/samples/dnn/edge_detection.py).
<table border="0">
<tr>
Source Code
-----------
-We will be using snippets from the example application, that can be downloaded [here](https://github.com/opencv/opencv/blob/3.4/samples/dnn/classification.cpp).
+We will be using snippets from the example application, that can be downloaded [here](https://github.com/opencv/opencv/blob/master/samples/dnn/classification.cpp).
@include dnn/classification.cpp
[bvlc_googlenet.caffemodel](http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel)
Also you need file with names of [ILSVRC2012](http://image-net.org/challenges/LSVRC/2012/browse-synsets) classes:
- [classification_classes_ILSVRC2012.txt](https://github.com/opencv/opencv/tree/3.4/samples/dnn/classification_classes_ILSVRC2012.txt).
+ [classification_classes_ILSVRC2012.txt](https://github.com/opencv/opencv/tree/master/samples/dnn/classification_classes_ILSVRC2012.txt).
Put these files into working dir of this program example.
## Build OpenCV with Halide backend
When you build OpenCV add the following configuration flags:
-- `ENABLE_CXX11` - enable C++11 standard
-
- `WITH_HALIDE` - enable Halide linkage
- `HALIDE_ROOT_DIR` - path to Halide build directory
-----------
Use a universal sample for object detection models written
-[in C++](https://github.com/opencv/opencv/blob/3.4/samples/dnn/object_detection.cpp) and
-[in Python](https://github.com/opencv/opencv/blob/3.4/samples/dnn/object_detection.py) languages
+[in C++](https://github.com/opencv/opencv/blob/master/samples/dnn/object_detection.cpp) and
+[in Python](https://github.com/opencv/opencv/blob/master/samples/dnn/object_detection.py) languages
Usage examples
--------------
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/features2D/AKAZE_match.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/features2D/akaze_matching/AKAZEMatchDemo.java)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/features2D/akaze_matching/AKAZEMatchDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/features2D/akaze_matching/AKAZEMatchDemo.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py)
- **Code at glance:**
@include samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp)
@include samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java)
@include samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py)
@include samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp)
@include samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java)
@include samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py)
@include samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp)
@include samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java)
@include samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py)
@include samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp)
@include samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java)
@include samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py)
@include samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py
@end_toggle
* An Invitation to 3-D Vision: From Images to Geometric Models, @cite Ma:2003:IVI
* Computer Vision: Algorithms and Applications, @cite RS10
-The tutorial code can be found [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/features2D/Homography).
-The images used in this tutorial can be found [here](https://github.com/opencv/opencv/tree/3.4/samples/data) (`left*.jpg`).
+The tutorial code can be found [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/Homography).
+The images used in this tutorial can be found [here](https://github.com/opencv/opencv/tree/master/samples/data) (`left*.jpg`).
Basic theory {#tutorial_homography_Basic_theory}
------------
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp)
@include samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java)
@include samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py)
@include samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp)
@include samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java)
@include samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py)
@include samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp)
@include samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java)
@include samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py)
@include samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp)
@include samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java)
@include samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py)
@include samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py
@end_toggle
You may also find the source code and the video file in the
`samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity` directory of the OpenCV
-source library or download it from [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp).
+source library or download it from [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp).
The full source code is quite long (due to the controlling of the application via the command line
arguments and performance measurement). Therefore, to avoid cluttering up these sections with those
you'll find here only the functions itself.
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp)
@include cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/highgui/trackbar/AddingImagesTrackbar.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/highgui/trackbar/AddingImagesTrackbar.java)
@include java/tutorial_code/highgui/trackbar/AddingImagesTrackbar.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py)
@include python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py
@end_toggle
@add_toggle_cpp
This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp)
@include samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp
@end_toggle
@add_toggle_java
This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java)
@include samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java
@end_toggle
@add_toggle_python
This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py)
@include samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py
@end_toggle
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgProc/Smoothing/Smoothing.java)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgProc/Smoothing/Smoothing.java)
- **Code at glance:**
@include samples/java/tutorial_code/ImgProc/Smoothing/Smoothing.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py)
- **Code at glance:**
@include samples/python/tutorial_code/imgProc/Smoothing/smoothing.py
Results
-------
-- The code opens an image (in this case [lena.jpg](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/lena.jpg))
+- The code opens an image (in this case [lena.jpg](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/lena.jpg))
and display it under the effects of the 4 filters explained.
- Here is a snapshot of the image smoothed using *medianBlur*:
@add_toggle_cpp
- **Downloadable code**:
- Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp)
for the basic version (explained in this tutorial).
- For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
skin area) you can check the [improved
- demo](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp)
+ demo](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp)
- ...or you can always check out the classical
- [camshiftdemo](https://github.com/opencv/opencv/tree/3.4/samples/cpp/camshiftdemo.cpp)
+ [camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
in samples.
- **Code at glance:**
@add_toggle_java
- **Downloadable code**:
- Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java)
for the basic version (explained in this tutorial).
- For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
skin area) you can check the [improved
- demo](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo2.java)
+ demo](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo2.java)
- ...or you can always check out the classical
- [camshiftdemo](https://github.com/opencv/opencv/tree/3.4/samples/cpp/camshiftdemo.cpp)
+ [camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
in samples.
- **Code at glance:**
@add_toggle_python
- **Downloadable code**:
- Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py)
for the basic version (explained in this tutorial).
- For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
skin area) you can check the [improved
- demo](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo2.py)
+ demo](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo2.py)
- ...or you can always check out the classical
- [camshiftdemo](https://github.com/opencv/opencv/tree/3.4/samples/cpp/camshiftdemo.cpp)
+ [camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
in samples.
- **Code at glance:**
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py)
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py)
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py)
- **Code at glance:**
@include samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ImgProc/tutorial_template_matching/MatchTemplateDemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/tutorial_template_matching/MatchTemplateDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/ImgProc/tutorial_template_matching/MatchTemplateDemo.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/imgProc/match_template/match_template.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/match_template/match_template.py)
- **Code at glance:**
@include samples/python/tutorial_code/imgProc/match_template/match_template.py
@add_toggle_cpp
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgProc/HitMiss/HitMiss.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/HitMiss/HitMiss.cpp)
@include samples/cpp/tutorial_code/ImgProc/HitMiss/HitMiss.cpp
@end_toggle
@add_toggle_java
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgProc/HitMiss/HitMiss.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgProc/HitMiss/HitMiss.java)
@include samples/java/tutorial_code/ImgProc/HitMiss/HitMiss.java
@end_toggle
@add_toggle_python
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py)
@include samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py
@end_toggle
@add_toggle_cpp
- The tutorial code's is shown lines below. You can also download it from
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp)
@include samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp
@end_toggle
@add_toggle_java
- The tutorial code's is shown lines below. You can also download it from
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ImgTrans/canny_detector/CannyDetectorDemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgTrans/canny_detector/CannyDetectorDemo.java)
@include samples/java/tutorial_code/ImgTrans/canny_detector/CannyDetectorDemo.java
@end_toggle
@add_toggle_python
- The tutorial code's is shown lines below. You can also download it from
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py)
@include samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py
@end_toggle
@add_toggle_cpp
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp)
@include samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp
@end_toggle
@add_toggle_java
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgTrans/MakeBorder/CopyMakeBorder.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/MakeBorder/CopyMakeBorder.java)
@include samples/java/tutorial_code/ImgTrans/MakeBorder/CopyMakeBorder.java
@end_toggle
@add_toggle_python
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py)
@include samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp).
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp).
@include samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ImgTrans/distance_transformation/ImageSegmentationDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgTrans/distance_transformation/ImageSegmentationDemo.java)
@include samples/java/tutorial_code/ImgTrans/distance_transformation/ImageSegmentationDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py)
@include samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py
@end_toggle
@add_toggle_cpp
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp)
@include cpp/tutorial_code/ImgTrans/filter2D_demo.cpp
@end_toggle
@add_toggle_java
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgTrans/Filter2D/Filter2D_Demo.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/Filter2D/Filter2D_Demo.java)
@include java/tutorial_code/ImgTrans/Filter2D/Filter2D_Demo.java
@end_toggle
@add_toggle_python
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py)
@include python/tutorial_code/ImgTrans/Filter2D/filter2D.py
@end_toggle
@add_toggle_cpp
The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp).
A slightly fancier version (which shows trackbars for changing the threshold values) can be found
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp).
@include samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp
@end_toggle
@add_toggle_java
The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java).
@include samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java
@end_toggle
@add_toggle_python
The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py).
@include samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py
@end_toggle
Explanation
-----------
-The image we used can be found [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/smarties.png)
+The image we used can be found [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/smarties.png)
#### Load an image:
@add_toggle_cpp
The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgTrans/houghlines.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/houghlines.cpp).
A slightly fancier version (which shows both Hough standard and probabilistic
with trackbars for changing the threshold values) can be found
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp).
@include samples/cpp/tutorial_code/ImgTrans/houghlines.cpp
@end_toggle
@add_toggle_java
The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgTrans/HoughLine/HoughLines.java).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/HoughLine/HoughLines.java).
@include samples/java/tutorial_code/ImgTrans/HoughLine/HoughLines.java
@end_toggle
@add_toggle_python
The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py).
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py).
@include samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py
@end_toggle
section. It still implements the same stuff as above, only adding the Trackbar for the
Threshold.
-Using an input image such as a [sudoku image](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/sudoku.png).
+Using an input image such as a [sudoku image](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/sudoku.png).
We get the following result by using the Standard Hough Line Transform:
![](images/hough_lines_result1.png)
And by using the Probabilistic Hough Line Transform:
@add_toggle_cpp
-# The tutorial code's is shown lines below. You can also download it from
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp)
@include samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp
@end_toggle
@add_toggle_java
-# The tutorial code's is shown lines below. You can also download it from
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgTrans/LaPlace/LaplaceDemo.java)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/LaPlace/LaplaceDemo.java)
@include samples/java/tutorial_code/ImgTrans/LaPlace/LaplaceDemo.java
@end_toggle
@add_toggle_python
-# The tutorial code's is shown lines below. You can also download it from
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py)
@include samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py
@end_toggle
@add_toggle_cpp
- The tutorial code's is shown lines below. You can also download it from
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp)
@include samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp
@end_toggle
@add_toggle_java
- The tutorial code's is shown lines below. You can also download it from
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ImgTrans/remap/RemapDemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgTrans/remap/RemapDemo.java)
@include samples/java/tutorial_code/ImgTrans/remap/RemapDemo.java
@end_toggle
@add_toggle_python
- The tutorial code's is shown lines below. You can also download it from
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py)
@include samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py
@end_toggle
@add_toggle_cpp
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp)
@include samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp
@end_toggle
@add_toggle_java
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgTrans/SobelDemo/SobelDemo.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/SobelDemo/SobelDemo.java)
@include samples/java/tutorial_code/ImgTrans/SobelDemo/SobelDemo.java
@end_toggle
@add_toggle_python
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py)
@include samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py
@end_toggle
@add_toggle_cpp
- The tutorial's code is shown below. You can also download it
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
@include samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp
@end_toggle
@add_toggle_java
- The tutorial's code is shown below. You can also download it
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
@include samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java
@end_toggle
@add_toggle_python
- The tutorial's code is shown below. You can also download it
- [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py)
+ [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py)
@include samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py
@end_toggle
This tutorial code's is shown lines below.
@add_toggle_cpp
-You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp).
+You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp).
@include samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp
@end_toggle
@add_toggle_java
-You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.java).
+You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.java).
@include samples/java/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.java
@end_toggle
@add_toggle_python
-You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py).
+You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py).
@include samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py
@end_toggle
Explanation / Result
--------------------
-Get image from [here](https://raw.githubusercontent.com/opencv/opencv/3.4/doc/tutorials/imgproc/morph_lines_detection/images/src.png) .
+Get image from [here](https://raw.githubusercontent.com/opencv/opencv/master/doc/tutorials/imgproc/morph_lines_detection/images/src.png) .
#### Load Image
@add_toggle_cpp
This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp)
@include cpp/tutorial_code/ImgProc/Morphology_2.cpp
@end_toggle
@add_toggle_java
This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ImgProc/opening_closing_hats/MorphologyDemo2.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/opening_closing_hats/MorphologyDemo2.java)
@include java/tutorial_code/ImgProc/opening_closing_hats/MorphologyDemo2.java
@end_toggle
@add_toggle_python
This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py)
@include python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py
@end_toggle
@add_toggle_cpp
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp)
@include samples/cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp
@end_toggle
@add_toggle_java
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/java/tutorial_code/ImgProc/Pyramids/Pyramids.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgProc/Pyramids/Pyramids.java)
@include samples/java/tutorial_code/ImgProc/Pyramids/Pyramids.java
@end_toggle
@add_toggle_python
You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py)
@include samples/python/tutorial_code/imgProc/Pyramids/pyramids.py
@end_toggle
Results
-------
-- The program calls by default an image [chicky_512.png](https://raw.githubusercontent.com/opencv/opencv/3.4/samples/data/chicky_512.png)
+- The program calls by default an image [chicky_512.png](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/chicky_512.png)
that comes in the `samples/data` folder. Notice that this image is \f$512 \times 512\f$,
hence a downsample won't generate any error (\f$512 = 2^{9}\f$). The original image is shown below:
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp)
@include samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ShapeDescriptors/bounding_rects_circles/GeneralContoursDemo1.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/bounding_rects_circles/GeneralContoursDemo1.java)
@include samples/java/tutorial_code/ShapeDescriptors/bounding_rects_circles/GeneralContoursDemo1.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py)
@include samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp)
@include samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/GeneralContoursDemo2.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/GeneralContoursDemo2.java)
@include samples/java/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/GeneralContoursDemo2.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py)
@include samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp)
@include samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ShapeDescriptors/find_contours/FindContoursDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/find_contours/FindContoursDemo.java)
@include samples/java/tutorial_code/ShapeDescriptors/find_contours/FindContoursDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py)
@include samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp)
@include samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ShapeDescriptors/hull/HullDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/hull/HullDemo.java)
@include samples/java/tutorial_code/ShapeDescriptors/hull/HullDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py)
@include samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp)
@include samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ShapeDescriptors/moments/MomentsDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/moments/MomentsDemo.java)
@include samples/java/tutorial_code/ShapeDescriptors/moments/MomentsDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py)
@include samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py
@end_toggle
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp)
@include samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ShapeDescriptors/point_polygon_test/PointPolygonTestDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/point_polygon_test/PointPolygonTestDemo.java)
@include samples/java/tutorial_code/ShapeDescriptors/point_polygon_test/PointPolygonTestDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ShapeDescriptors/point_polygon_test/pointPolygonTest_demo.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/point_polygon_test/pointPolygonTest_demo.py)
@include samples/python/tutorial_code/ShapeDescriptors/point_polygon_test/pointPolygonTest_demo.py
@end_toggle
@add_toggle_cpp
The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ImgProc/Threshold.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Threshold.cpp)
@include samples/cpp/tutorial_code/ImgProc/Threshold.cpp
@end_toggle
@add_toggle_java
The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ImgProc/threshold/Threshold.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/threshold/Threshold.java)
@include samples/java/tutorial_code/ImgProc/threshold/Threshold.java
@end_toggle
@add_toggle_python
The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/imgProc/threshold/threshold.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/threshold/threshold.py)
@include samples/python/tutorial_code/imgProc/threshold/threshold.py
@end_toggle
@add_toggle_cpp
The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ImgProc/Threshold_inRange.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Threshold_inRange.cpp)
@include samples/cpp/tutorial_code/ImgProc/Threshold_inRange.cpp
@end_toggle
@add_toggle_java
The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ImgProc/threshold_inRange/ThresholdInRange.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/threshold_inRange/ThresholdInRange.java)
@include samples/java/tutorial_code/ImgProc/threshold_inRange/ThresholdInRange.java
@end_toggle
@add_toggle_python
The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py)
@include samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py
@end_toggle
[OpenCV4Android](https://groups.google.com/group/android-opencv/) discussion group or OpenCV [Q&A
forum](http://answers.opencv.org). We'll do our best to help you out.
-Tegra Android Development Pack users
-------------------------------------
-
-You may have used [Tegra Android Development
-Pack](http://developer.nvidia.com/tegra-android-development-pack) (**TADP**) released by **NVIDIA**
-for Android development environment setup.
-
-Beside Android development tools the TADP 2.0 includes OpenCV4Android SDK, so it can be already
-installed in your system and you can skip to @ref tutorial_O4A_SDK_samples "samples" section of this tutorial.
-
-More details regarding TADP can be found in the @ref tutorial_android_dev_intro guide.
-
General info
------------
-# OpenCV development will certainly require some knowledge of the [Android
Camera](http://developer.android.com/guide/topics/media/camera.html) specifics.
-Quick environment setup for Android development
------------------------------------------------
-
-If you are making a clean environment install, then you can try [Tegra Android Development
-Pack](https://developer.nvidia.com/tegra-android-development-pack) (**TADP**) released by
-**NVIDIA**.
-
-@note Starting the *version 2.0* the TADP package includes *OpenCV for Tegra* SDK that is a regular
-*OpenCV4Android SDK* extended with Tegra-specific stuff. When unpacked, TADP will cover all of the
-environment setup automatically and you can skip the rest of the guide.
-
-If you are a beginner in Android development then we also recommend you to start with TADP.
-
-@note *NVIDIA*'s Tegra Android Development Pack includes some special features for *NVIDIA*’s [Tegra
-platform](http://www.nvidia.com/object/tegra-3-processor.html)
-but its use is not limited to *Tegra* devices only. \* You need at least *1.6 Gb* free
-disk space for the install.
-
-- TADP will download Android SDK platforms and Android NDK from Google's server, so Internet
- connection is required for the installation.
-- TADP may ask you to flash your development kit at the end of installation process. Just skip
- this step if you have no [Tegra Development Kit](http://developer.nvidia.com/mobile/tegra-hardware-sales-inquiries).
-- (UNIX) TADP will ask you for *root* in the middle of installation, so you need to be a member of
- *sudo* group.
-
Manual environment setup for Android development
------------------------------------------------
This tutorial also assumes you have an Android operated device with OpenCL enabled.
The related source code is located within OpenCV samples at
-[opencv/samples/android/tutorial-4-opencl](https://github.com/opencv/opencv/tree/3.4/samples/android/tutorial-4-opencl/) directory.
+[opencv/samples/android/tutorial-4-opencl](https://github.com/opencv/opencv/tree/master/samples/android/tutorial-4-opencl/) directory.
Preface
-------
public class NativeGLRenderer {
static
{
- System.loadLibrary("opencv_java3"); // comment this when using OpenCV Manager
+ System.loadLibrary("opencv_java4"); // comment this when using OpenCV Manager
System.loadLibrary("JNIrender");
}
@endcode
Let's leave the details of their implementation beyond of this tutorial, please refer the
-[source code](https://github.com/opencv/opencv/tree/3.4/samples/android/tutorial-4-opencl/) to see them.
+[source code](https://github.com/opencv/opencv/tree/master/samples/android/tutorial-4-opencl/) to see them.
Preview Frames modification
---------------------------
path/to/cmake.exe -GNinja -DCMAKE_MAKE_PROGRAM="path/to/ninja.exe" -DCMAKE_TOOLCHAIN_FILE=path/to/opencv/platforms/android/android.toolchain.cmake -DANDROID_ABI="armeabi-v7a with NEON" -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON path/to/opencv
path/to/ninja.exe install/strip
@endcode
- To use your own modified `libopencv_java3.so` you have to keep inside your APK, not to use OpenCV Manager and load it manually via `System.loadLibrary("opencv_java3")`.
+ To use your own modified `libopencv_java4.so` you have to keep inside your APK, not to use OpenCV Manager and load it manually via `System.loadLibrary("opencv_java4")`.
Performance notes
-----------------
-----------
Download the source code from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp).
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp).
@include cpp/tutorial_code/introduction/display_image/display_image.cpp
--------
Now to try this out download our little test [source code
-](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/introduction/windows_visual_studio_opencv/introduction_windows_vs.cpp)
+](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/introduction/windows_visual_studio_opencv/introduction_windows_vs.cpp)
or get it from the sample code folder of the OpenCV sources. Add this to your project and build it.
Here's its content:
This is important to remember when you code inside the code open and save commands. Your resources
will be saved ( and queried for at opening!!!) relatively to your working directory. This is unless
you give a full, explicit path as a parameter for the I/O functions. In the code above we open [this
-OpenCV logo](https://github.com/opencv/opencv/tree/3.4/samples/data/opencv-logo.png). Before starting up the application,
+OpenCV logo](https://github.com/opencv/opencv/tree/master/samples/data/opencv-logo.png). Before starting up the application,
make sure you place
the image file in your current working directory. Modify the image file name inside the code to try
it out on other images too. Run it and voil á:
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ml/introduction_to_pca/IntroductionToPCADemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ml/introduction_to_pca/IntroductionToPCADemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/ml/introduction_to_pca/IntroductionToPCADemo.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py)
- **Code at glance:**
@include samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py
@end_toggle
-@note Another example using PCA for dimensionality reduction while maintaining an amount of variance can be found at [opencv_source_code/samples/cpp/pca.cpp](https://github.com/opencv/opencv/tree/3.4/samples/cpp/pca.cpp)
+@note Another example using PCA for dimensionality reduction while maintaining an amount of variance can be found at [opencv_source_code/samples/cpp/pca.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/pca.cpp)
Explanation
-----------
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py)
- **Code at glance:**
@include samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py
-----------
You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or
-[download it from here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
+[download it from here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
@note The following code has been implemented with OpenCV 3.0 classes and functions. An equivalent version of the code
using OpenCV 2.4 can be found in [this page.](http://docs.opencv.org/2.4/doc/tutorials/ml/non_linear_svms/non_linear_svms.html#nonlinearsvms)
@add_toggle_cpp
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp)
+ [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp)
- **Code at glance:**
@include samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp
@add_toggle_java
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java)
+ [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java)
- **Code at glance:**
@include samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java
@add_toggle_python
- **Downloadable code**: Click
- [here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py)
+ [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py)
- **Code at glance:**
@include samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp)
@include samples/cpp/tutorial_code/objectDetection/objectDetection.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/objectDetection/cascade_classifier/ObjectDetectionDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/objectDetection/cascade_classifier/ObjectDetectionDemo.java)
@include samples/java/tutorial_code/objectDetection/cascade_classifier/ObjectDetectionDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py)
@include samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py
@end_toggle
Working with a boosted cascade of weak classifiers includes two major stages: the training and the detection stage. The detection stage using either HAAR or LBP based models, is described in the @ref tutorial_cascade_classifier "object detection tutorial". This documentation gives an overview of the functionality needed to train your own boosted cascade of weak classifiers. The current guide will walk through all the different stages: collecting training data, preparation of the training data and executing the actual model training.
-To support this tutorial, several official OpenCV applications will be used: [opencv_createsamples](https://github.com/opencv/opencv/tree/3.4/apps/createsamples), [opencv_annotation](https://github.com/opencv/opencv/tree/3.4/apps/annotation), [opencv_traincascade](https://github.com/opencv/opencv/tree/3.4/apps/traincascade) and [opencv_visualisation](https://github.com/opencv/opencv/tree/3.4/apps/visualisation).
+To support this tutorial, several official OpenCV applications will be used: [opencv_createsamples](https://github.com/opencv/opencv/tree/master/apps/createsamples), [opencv_annotation](https://github.com/opencv/opencv/tree/master/apps/annotation), [opencv_traincascade](https://github.com/opencv/opencv/tree/master/apps/traincascade) and [opencv_visualisation](https://github.com/opencv/opencv/tree/master/apps/visualisation).
### Important notes
@add_toggle_cpp
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp)
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp)
@include samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp
@end_toggle
@add_toggle_java
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/java/tutorial_code/photo/hdr_imaging/HDRImagingDemo.java)
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/photo/hdr_imaging/HDRImagingDemo.java)
@include samples/java/tutorial_code/photo/hdr_imaging/HDRImagingDemo.java
@end_toggle
@add_toggle_python
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/python/tutorial_code/photo/hdr_imaging/hdr_imaging.py)
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/photo/hdr_imaging/hdr_imaging.py)
@include samples/python/tutorial_code/photo/hdr_imaging/hdr_imaging.py
@end_toggle
-------------
Data directory that contains images, exposure times and `list.txt` file can be downloaded from
-[here](https://github.com/opencv/opencv_extra/tree/3.4/testdata/cv/hdr/exposures).
+[here](https://github.com/opencv/opencv_extra/tree/master/testdata/cv/hdr/exposures).
Explanation
-----------
----
This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/3.4/samples/cpp/samples/cpp/stitching.cpp).
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/samples/cpp/stitching.cpp).
@include samples/cpp/stitching.cpp
If you want to study internals of the stitching pipeline or you want to experiment with detailed
configuration see
-[stitching_detailed.cpp](https://github.com/opencv/opencv/tree/3.4/samples/cpp/stitching_detailed.cpp)
+[stitching_detailed.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/stitching_detailed.cpp)
in `opencv/samples/cpp` folder.
We will use @ref cv::BackgroundSubtractorMOG2 in this sample, to generate the foreground mask.
The results as well as the input data are shown on the screen.
-The source file can be downloaded [here ](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/video/bg_sub.cpp).
+The source file can be downloaded [here ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/video/bg_sub.cpp).
@include samples/cpp/tutorial_code/video/bg_sub.cpp
flag value is assumed by default if neither of the two possible values of the property is set.
For more information please refer to the example of usage
-[intelperc_capture.cpp](https://github.com/opencv/opencv/tree/3.4/samples/cpp/intelperc_capture.cpp)
+[intelperc_capture.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/intelperc_capture.cpp)
in opencv/samples/cpp folder.
- CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION
For more information please refer to the example of usage
-[openni_capture.cpp](https://github.com/opencv/opencv/tree/3.4/samples/cpp/openni_capture.cpp) in
+[openni_capture.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/openni_capture.cpp) in
opencv/samples/cpp folder.
video files and performs a similarity check between them. This is something you could use to check
just how well a new video compressing algorithms works. Let there be a reference (original) video
like [this small Megamind clip
-](https://github.com/opencv/opencv/tree/3.4/samples/data/Megamind.avi) and [a compressed
-version of it ](https://github.com/opencv/opencv/tree/3.4/samples/data/Megamind_bugy.avi).
+](https://github.com/opencv/opencv/tree/master/samples/data/Megamind.avi) and [a compressed
+version of it ](https://github.com/opencv/opencv/tree/master/samples/data/Megamind_bugy.avi).
You may also find the source code and these video file in the
`samples/data` folder of the OpenCV source library.
You may also find the source code and these video file in the
`samples/cpp/tutorial_code/videoio/video-write/` folder of the OpenCV source library or [download it
-from here ](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/videoio/video-write/video-write.cpp).
+from here ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/videoio/video-write/video-write.cpp).
@include cpp/tutorial_code/videoio/video-write/video-write.cpp
Code
----
-You can download the code from [here ](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/viz/creating_widgets.cpp).
+You can download the code from [here ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/viz/creating_widgets.cpp).
@include samples/cpp/tutorial_code/viz/creating_widgets.cpp
Explanation
Code
----
-You can download the code from [here ](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/viz/histo3D.cpp).
+You can download the code from [here ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/viz/histo3D.cpp).
@include samples/cpp/tutorial_code/viz/histo3D.cpp
Explanation
Code
----
-You can download the code from [here ](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/viz/launching_viz.cpp).
+You can download the code from [here ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/viz/launching_viz.cpp).
@include samples/cpp/tutorial_code/viz/launching_viz.cpp
Explanation
Code
----
-You can download the code from [here ](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/viz/transformations.cpp).
+You can download the code from [here ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/viz/transformations.cpp).
@include samples/cpp/tutorial_code/viz/transformations.cpp
Explanation
Code
----
-You can download the code from [here ](https://github.com/opencv/opencv/tree/3.4/samples/cpp/tutorial_code/viz/widget_pose.cpp).
+You can download the code from [here ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/viz/widget_pose.cpp).
@include samples/cpp/tutorial_code/viz/widget_pose.cpp
Explanation
SYMMETRIC_GRID, ASYMMETRIC_GRID
};
GridType gridType;
-};
-
-struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters2 : public CirclesGridFinderParameters
-{
- CV_WRAP CirclesGridFinderParameters2();
CV_PROP_RW float squareSize; //!< Distance between two adjacent points. Used by CALIB_CB_CLUSTERING.
CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from predicion. Used by CALIB_CB_CLUSTERING.
};
+#ifndef DISABLE_OPENCV_3_COMPATIBILITY
+typedef CirclesGridFinderParameters CirclesGridFinderParameters2;
+#endif
+
/** @brief Finds centers in the grid of circles.
@param image grid view of input circles; it must be an 8-bit grayscale or color image.
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags,
const Ptr<FeatureDetector> &blobDetector,
- CirclesGridFinderParameters parameters);
-
-/** @overload */
-CV_EXPORTS_W bool findCirclesGrid2( InputArray image, Size patternSize,
- OutputArray centers, int flags,
- const Ptr<FeatureDetector> &blobDetector,
- CirclesGridFinderParameters2 parameters);
+ const CirclesGridFinderParameters& parameters);
/** @overload */
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
} //end namespace cv
-#ifndef DISABLE_OPENCV_24_COMPATIBILITY
-#include "opencv2/calib3d/calib3d_c.h"
-#endif
-
#endif
{
"class_ignore_list": [
- "CirclesGridFinderParameters",
- "CirclesGridFinderParameters2"
+ "CirclesGridFinderParameters"
],
"missing_consts" : {
"Calib3d": {
return 0;
}
-bool findCirclesGrid(InputArray image, Size patternSize,
- OutputArray centers, int flags,
- const Ptr<FeatureDetector> &blobDetector,
- CirclesGridFinderParameters parameters)
-{
- CirclesGridFinderParameters2 parameters2;
- *((CirclesGridFinderParameters*)¶meters2) = parameters;
- return cv::findCirclesGrid2(image, patternSize, centers, flags, blobDetector, parameters2);
-}
-
-bool findCirclesGrid2(InputArray _image, Size patternSize,
- OutputArray _centers, int flags, const Ptr<FeatureDetector> &blobDetector,
- CirclesGridFinderParameters2 parameters)
+bool findCirclesGrid( InputArray _image, Size patternSize,
+ OutputArray _centers, int flags, const Ptr<FeatureDetector> &blobDetector,
+ const CirclesGridFinderParameters& parameters_)
{
CV_INSTRUMENT_REGION()
+ CirclesGridFinderParameters parameters = parameters_; // parameters.gridType is amended below
+
bool isAsymmetricGrid = (flags & CALIB_CB_ASYMMETRIC_GRID) ? true : false;
bool isSymmetricGrid = (flags & CALIB_CB_SYMMETRIC_GRID ) ? true : false;
CV_Assert(isAsymmetricGrid ^ isSymmetricGrid);
bool findCirclesGrid(InputArray _image, Size patternSize,
OutputArray _centers, int flags, const Ptr<FeatureDetector> &blobDetector)
{
- return cv::findCirclesGrid2(_image, patternSize, _centers, flags, blobDetector, CirclesGridFinderParameters2());
+ return cv::findCirclesGrid(_image, patternSize, _centers, flags, blobDetector, CirclesGridFinderParameters());
}
} // namespace
void CirclesGridClusterFinder::hierarchicalClustering(const std::vector<Point2f> &points, const Size &patternSz, std::vector<Point2f> &patternPoints)
{
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(tegra::useTegra() && tegra::hierarchicalClustering(points, patternSz, patternPoints))
- return;
-#endif
int j, n = (int)points.size();
size_t pn = static_cast<size_t>(patternSz.area());
minRNGEdgeSwitchDist = 5.f;
gridType = SYMMETRIC_GRID;
-}
-CirclesGridFinderParameters2::CirclesGridFinderParameters2()
-: CirclesGridFinderParameters()
-{
- squareSize = 1.0f;
- maxRectifiedDistance = squareSize/2.0f;
+ squareSize = 1.0f;
+ maxRectifiedDistance = squareSize/2.0f;
}
CirclesGridFinder::CirclesGridFinder(Size _patternSize, const std::vector<Point2f> &testKeypoints,
#include <numeric>
#include <map>
-#include "precomp.hpp"
-
class CirclesGridClusterFinder
{
CirclesGridClusterFinder& operator=(const CirclesGridClusterFinder&);
CirclesGridClusterFinder(const CirclesGridClusterFinder&);
public:
- CirclesGridClusterFinder(const cv::CirclesGridFinderParameters2 ¶meters)
+ CirclesGridClusterFinder(const cv::CirclesGridFinderParameters ¶meters)
{
isAsymmetricGrid = parameters.gridType == cv::CirclesGridFinderParameters::ASYMMETRIC_GRID;
squareSize = parameters.squareSize;
#include "opencv2/core/ocl.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/calib3d/calib3d_tegra.hpp"
-#else
#define GET_OPTIMIZED(func) (func)
-#endif
namespace cv
std::transform(fpts.ptr<Point3f>(), fpts.ptr<Point3f>() + n, tpts.ptr<Point3f>(), WrapAff(aff));
/* adding noise*/
-#ifdef CV_CXX11
std::transform(tpts.ptr<Point3f>() + m, tpts.ptr<Point3f>() + n, tpts.ptr<Point3f>() + m,
[=] (const Point3f& pt) -> Point3f { return Noise(noise_level)(pt + shift_outl); });
-#else
- std::transform(tpts.ptr<Point3f>() + m, tpts.ptr<Point3f>() + n, tpts.ptr<Point3f>() + m, std::bind2nd(std::plus<Point3f>(), shift_outl));
- std::transform(tpts.ptr<Point3f>() + m, tpts.ptr<Point3f>() + n, tpts.ptr<Point3f>() + m, Noise(noise_level));
-#endif
Mat aff_est;
vector<uchar> outl;
3. Merge the results into a single disparity map.
With this algorithm, a dual GPU gave a 180% performance increase comparing to the single Fermi GPU.
-For a source code example, see <https://github.com/opencv/opencv/tree/3.4/samples/gpu/>.
+For a source code example, see <https://github.com/opencv/opencv/tree/master/samples/gpu/>.
@param exc the exception raisen.
@deprecated drop this version
*/
-CV_EXPORTS void error( const Exception& exc );
+CV_EXPORTS CV_NORETURN void error(const Exception& exc);
enum SortFlags { SORT_EVERY_ROW = 0, //!< each matrix row is sorted independently
SORT_EVERY_COLUMN = 1, //!< each matrix column is sorted
// access pixel coordinates
Point pnt = locations[i];
@endcode
-@param src single-channel array (type CV_8UC1)
+@param src single-channel array
@param idx the output array, type of cv::Mat or std::vector<Point>, corresponding to non-zero indices in the input
*/
CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx );
/** @brief Computes the Peak Signal-to-Noise Ratio (PSNR) image quality metric.
-This function calculates the Peak Signal-to-Noise Ratio (PSNR) image quality metric in decibels (dB), between two input arrays src1 and src2. Arrays must have depth CV_8U.
+This function calculates the Peak Signal-to-Noise Ratio (PSNR) image quality metric in decibels (dB),
+between two input arrays src1 and src2. The arrays must have the same type.
The PSNR is calculated as follows:
\texttt{PSNR} = 10 \cdot \log_{10}{\left( \frac{R^2}{MSE} \right) }
\f]
-where R is the maximum integer value of depth CV_8U (255) and MSE is the mean squared error between the two arrays.
+where R is the maximum integer value of depth (e.g. 255 in the case of CV_8U data)
+and MSE is the mean squared error between the two arrays.
@param src1 first input array.
@param src2 second input array of the same size as src1.
+@param R the maximum pixel value (255 by default)
*/
-CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2);
+CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2, double R=255.);
/** @brief naive nearest neighbor finder
static Mat subspaceReconstruct(InputArray W, InputArray mean, InputArray src);
protected:
- bool _dataAsRow; // unused, but needed for 3.0 ABI compatibility.
int _num_components;
Mat _eigenvectors;
Mat _eigenvalues;
@param _line - line number in the source file where the error has occurred
@see CV_Error, CV_Error_, CV_Assert, CV_DbgAssert
*/
-CV_EXPORTS void error(int _code, const String& _err, const char* _func, const char* _file, int _line);
-
-#ifdef __GNUC__
-# if defined __clang__ || defined __APPLE__
-# pragma GCC diagnostic push
-# pragma GCC diagnostic ignored "-Winvalid-noreturn"
-# endif
-#endif
-
-/** same as cv::error, but does not return */
-CV_INLINE CV_NORETURN void errorNoReturn(int _code, const String& _err, const char* _func, const char* _file, int _line)
-{
- error(_code, _err, _func, _file, _line);
-#ifdef __GNUC__
-# if !defined __clang__ && !defined __APPLE__
- // this suppresses this warning: "noreturn" function does return [enabled by default]
- __builtin_trap();
- // or use infinite loop: for (;;) {}
-# endif
-#endif
-}
-#ifdef __GNUC__
-# if defined __clang__ || defined __APPLE__
-# pragma GCC diagnostic pop
-# endif
-#endif
+CV_EXPORTS CV_NORETURN void error(int _code, const String& _err, const char* _func, const char* _file, int _line);
#if defined __GNUC__
#define CV_Func __func__
#define CV_Assert_1( expr ) if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ )
+#endif // CV_STATIC_ANALYSIS
+
//! @cond IGNORED
-#define CV__ErrorNoReturn( code, msg ) cv::errorNoReturn( code, msg, CV_Func, __FILE__, __LINE__ )
-#define CV__ErrorNoReturn_( code, args ) cv::errorNoReturn( code, cv::format args, CV_Func, __FILE__, __LINE__ )
-#ifdef __OPENCV_BUILD
-#undef CV_Error
-#define CV_Error CV__ErrorNoReturn
-#undef CV_Error_
-#define CV_Error_ CV__ErrorNoReturn_
-#undef CV_Assert_1
-#define CV_Assert_1( expr ) if(!!(expr)) ; else cv::errorNoReturn( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ )
-#else
-// backward compatibility
-#define CV_ErrorNoReturn CV__ErrorNoReturn
-#define CV_ErrorNoReturn_ CV__ErrorNoReturn_
+#if !defined(__OPENCV_BUILD) // TODO: backward compatibility only
+#ifndef CV_ErrorNoReturn
+#define CV_ErrorNoReturn CV_Error
+#endif
+#ifndef CV_ErrorNoReturn_
+#define CV_ErrorNoReturn_ CV_Error_
+#endif
#endif
//! @endcond
-#endif // CV_STATIC_ANALYSIS
-
#define CV_Assert_2( expr1, expr2 ) CV_Assert_1(expr1); CV_Assert_1(expr2)
#define CV_Assert_3( expr1, expr2, expr3 ) CV_Assert_2(expr1, expr2); CV_Assert_1(expr3)
#define CV_Assert_4( expr1, expr2, expr3, expr4 ) CV_Assert_3(expr1, expr2, expr3); CV_Assert_1(expr4)
namespace ipp
{
-#if OPENCV_ABI_COMPATIBILITY > 300
CV_EXPORTS unsigned long long getIppFeatures();
-#else
-CV_EXPORTS int getIppFeatures();
-#endif
CV_EXPORTS void setIppStatus(int status, const char * const funcname = NULL, const char * const filename = NULL,
int line = 0);
CV_EXPORTS int getIppStatus();
namespace cv { namespace cuda { namespace device
{
// Function Objects
-#ifdef CV_CXX11
template<typename Argument, typename Result> struct unary_function
{
typedef Argument argument_type;
typedef Argument2 second_argument_type;
typedef Result result_type;
};
-#else
- template<typename Argument, typename Result> struct unary_function : public std::unary_function<Argument, Result> {};
- template<typename Argument1, typename Argument2, typename Result> struct binary_function : public std::binary_function<Argument1, Argument2, Result> {};
-#endif
// Arithmetic Operations
template <typename T> struct plus : binary_function<T, T, T>
return VecTraits<output_type ## 4>::make(func (a.x), func (a.y), func (a.z), func (a.w)); \
}
-CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uchar, uchar)
-CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, char, char)
-CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, ushort, ushort)
-CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, short, short)
-CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::abs, int, int)
-CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, /*::abs*/, uint, uint)
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabsf, float, float)
-CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(abs, ::fabs, double, double)
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, uchar, float)
CV_CUDEV_IMPLEMENT_VEC_UNARY_FUNC(sqrt, ::sqrtf, char, float)
}
Cv64suf;
-#define OPENCV_ABI_COMPATIBILITY 300
+#define OPENCV_ABI_COMPATIBILITY 400
#ifdef __OPENCV_BUILD
-# define DISABLE_OPENCV_24_COMPATIBILITY
+# define DISABLE_OPENCV_3_COMPATIBILITY
# define OPENCV_DISABLE_DEPRECATED_COMPATIBILITY
#endif
# undef CV_CXX11
# endif
#endif
-
-
-/****************************************************************************************\
-* C++ Move semantics *
-\****************************************************************************************/
-
-#ifndef CV_CXX_MOVE_SEMANTICS
-# if __cplusplus >= 201103L || defined(__GXX_EXPERIMENTAL_CXX0X__) || (defined(_MSC_VER) && _MSC_VER >= 1600)
-# define CV_CXX_MOVE_SEMANTICS 1
-# elif defined(__clang)
-# if __has_feature(cxx_rvalue_references)
-# define CV_CXX_MOVE_SEMANTICS 1
-# endif
-# endif
-#else
-# if CV_CXX_MOVE_SEMANTICS == 0
-# undef CV_CXX_MOVE_SEMANTICS
-# endif
-#endif
-
-/****************************************************************************************\
-* C++11 std::array *
-\****************************************************************************************/
-
-#ifndef CV_CXX_STD_ARRAY
-# if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1900/*MSVS 2015*/)
-# define CV_CXX_STD_ARRAY 1
-# include <array>
-# endif
-#else
-# if CV_CXX_STD_ARRAY == 0
-# undef CV_CXX_STD_ARRAY
-# endif
+#ifndef CV_CXX11
+# error "OpenCV 4.x+ requires enabled C++11 support"
#endif
-
-/****************************************************************************************\
-* C++11 override / final *
-\****************************************************************************************/
-
+#define CV_CXX_MOVE_SEMANTICS 1
+#define CV_CXX_STD_ARRAY 1
+#include <array>
#ifndef CV_OVERRIDE
-# ifdef CV_CXX11
-# define CV_OVERRIDE override
-# endif
+# define CV_OVERRIDE override
#endif
-#ifndef CV_OVERRIDE
-# define CV_OVERRIDE
-#endif
-
#ifndef CV_FINAL
-# ifdef CV_CXX11
-# define CV_FINAL final
-# endif
+# define CV_FINAL final
#endif
-#ifndef CV_FINAL
-# define CV_FINAL
-#endif
-
// Integer types portatibility
@note It is often easier to use makePtr instead.
*/
template<typename Y>
-#ifdef DISABLE_OPENCV_24_COMPATIBILITY
- explicit
-#endif
- Ptr(Y* p);
+ explicit Ptr(Y* p);
/** @overload
@param d Deleter to use for the owned pointer.
template<typename Y>
Ptr<Y> dynamicCast() const;
-#ifdef CV_CXX_MOVE_SEMANTICS
Ptr(Ptr&& o);
Ptr& operator = (Ptr&& o);
-#endif
private:
detail::PtrOwner* owner;
# endif
#endif
-#ifdef HAVE_TEGRA_OPTIMIZATION
-# include "tegra_round.hpp"
-#endif
-
#if defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__ || defined __ARM_NEON__) && !defined __SOFTFP__ && !defined(__CUDACC__)
// 1. general scheme
#define ARM_ROUND(_value, _asm_string) \
fistp t;
}
return t;
-#elif ((defined _MSC_VER && defined _M_ARM) || defined CV_ICC || \
- defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
- TEGRA_ROUND_DBL(value);
#elif defined CV_ICC || defined __GNUC__
# if defined ARM_ROUND_DBL
ARM_ROUND_DBL(value);
fistp t;
}
return t;
-#elif ((defined _MSC_VER && defined _M_ARM) || defined CV_ICC || \
- defined __GNUC__) && defined HAVE_TEGRA_OPTIMIZATION
- TEGRA_ROUND_FLT(value);
#elif defined CV_ICC || defined __GNUC__
# if defined ARM_ROUND_FLT
ARM_ROUND_FLT(value);
#include "opencv2/core/bufferpool.hpp"
-#ifdef CV_CXX11
#include <type_traits>
-#endif
namespace cv
{
_InputArray(const UMat& um);
_InputArray(const std::vector<UMat>& umv);
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp, std::size_t _Nm> _InputArray(const std::array<_Tp, _Nm>& arr);
template<std::size_t _Nm> _InputArray(const std::array<Mat, _Nm>& arr);
-#endif
Mat getMat(int idx=-1) const;
Mat getMat_(int idx=-1) const;
*/
template<typename _Tp> explicit Mat(const std::vector<_Tp>& vec, bool copyData=false);
-#ifdef CV_CXX11
/** @overload
*/
template<typename _Tp, typename = typename std::enable_if<std::is_arithmetic<_Tp>::value>::type>
/** @overload
*/
template<typename _Tp> explicit Mat(const std::initializer_list<int> sizes, const std::initializer_list<_Tp> list);
-#endif
#ifdef CV_CXX_STD_ARRAY
/** @overload
/** @overload */
template<typename _Tp, typename Functor> void forEach(const Functor& operation) const;
-#ifdef CV_CXX_MOVE_SEMANTICS
Mat(Mat&& m);
Mat& operator = (Mat&& m);
-#endif
enum { MAGIC_VAL = 0x42FF0000, AUTO_STEP = 0, CONTINUOUS_FLAG = CV_MAT_CONT_FLAG, SUBMATRIX_FLAG = CV_SUBMAT_FLAG };
enum { MAGIC_MASK = 0xFFFF0000, TYPE_MASK = 0x00000FFF, DEPTH_MASK = 7 };
explicit Mat_(const Point3_<typename DataType<_Tp>::channel_type>& pt, bool copyData=true);
explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer);
-#ifdef CV_CXX11
Mat_(std::initializer_list<_Tp> values);
explicit Mat_(const std::initializer_list<int> sizes, const std::initializer_list<_Tp> values);
-#endif
#ifdef CV_CXX_STD_ARRAY
template <std::size_t _Nm> explicit Mat_(const std::array<_Tp, _Nm>& arr, bool copyData=false);
//! conversion to Matx
template<int m, int n> operator Matx<typename DataType<_Tp>::channel_type, m, n>() const;
-#ifdef CV_CXX_MOVE_SEMANTICS
Mat_(Mat_&& m);
Mat_& operator = (Mat_&& m);
Mat_& operator = (Mat&& m);
Mat_(MatExpr&& e);
-#endif
};
typedef Mat_<uchar> Mat1b;
//! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise
int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const;
-#ifdef CV_CXX_MOVE_SEMANTICS
UMat(UMat&& m);
UMat& operator = (UMat&& m);
-#endif
/*! Returns the OpenCL buffer handle on which UMat operates on.
The UMat instance should be kept alive during the use of the handle to prevent the buffer to be
_InputArray::_InputArray(const std::vector<_Tp>& vec)
{ init(FIXED_TYPE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_READ, &vec); }
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp, std::size_t _Nm> inline
_InputArray::_InputArray(const std::array<_Tp, _Nm>& arr)
{ init(FIXED_TYPE + FIXED_SIZE + STD_ARRAY + traits::Type<_Tp>::value + ACCESS_READ, arr.data(), Size(1, _Nm)); }
template<std::size_t _Nm> inline
_InputArray::_InputArray(const std::array<Mat, _Nm>& arr)
{ init(STD_ARRAY_MAT + ACCESS_READ, arr.data(), Size(1, _Nm)); }
-#endif
inline
_InputArray::_InputArray(const std::vector<bool>& vec)
_OutputArray::_OutputArray(std::vector<_Tp>& vec)
{ init(FIXED_TYPE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_WRITE, &vec); }
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp, std::size_t _Nm> inline
_OutputArray::_OutputArray(std::array<_Tp, _Nm>& arr)
{ init(FIXED_TYPE + FIXED_SIZE + STD_ARRAY + traits::Type<_Tp>::value + ACCESS_WRITE, arr.data(), Size(1, _Nm)); }
template<std::size_t _Nm> inline
_OutputArray::_OutputArray(std::array<Mat, _Nm>& arr)
{ init(STD_ARRAY_MAT + ACCESS_WRITE, arr.data(), Size(1, _Nm)); }
-#endif
inline
_OutputArray::_OutputArray(std::vector<bool>&)
_OutputArray::_OutputArray(const std::vector<_Tp>& vec)
{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_WRITE, &vec); }
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp, std::size_t _Nm> inline
_OutputArray::_OutputArray(const std::array<_Tp, _Nm>& arr)
{ init(FIXED_TYPE + FIXED_SIZE + STD_ARRAY + traits::Type<_Tp>::value + ACCESS_WRITE, arr.data(), Size(1, _Nm)); }
template<std::size_t _Nm> inline
_OutputArray::_OutputArray(const std::array<Mat, _Nm>& arr)
{ init(FIXED_SIZE + STD_ARRAY_MAT + ACCESS_WRITE, arr.data(), Size(1, _Nm)); }
-#endif
template<typename _Tp> inline
_OutputArray::_OutputArray(const std::vector<std::vector<_Tp> >& vec)
_InputOutputArray::_InputOutputArray(std::vector<_Tp>& vec)
{ init(FIXED_TYPE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_RW, &vec); }
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp, std::size_t _Nm> inline
_InputOutputArray::_InputOutputArray(std::array<_Tp, _Nm>& arr)
{ init(FIXED_TYPE + FIXED_SIZE + STD_ARRAY + traits::Type<_Tp>::value + ACCESS_RW, arr.data(), Size(1, _Nm)); }
template<std::size_t _Nm> inline
_InputOutputArray::_InputOutputArray(std::array<Mat, _Nm>& arr)
{ init(STD_ARRAY_MAT + ACCESS_RW, arr.data(), Size(1, _Nm)); }
-#endif
inline _InputOutputArray::_InputOutputArray(std::vector<bool>&)
{ CV_Error(Error::StsUnsupportedFormat, "std::vector<bool> cannot be an input/output array\n"); }
_InputOutputArray::_InputOutputArray(const std::vector<_Tp>& vec)
{ init(FIXED_TYPE + FIXED_SIZE + STD_VECTOR + traits::Type<_Tp>::value + ACCESS_RW, &vec); }
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp, std::size_t _Nm> inline
_InputOutputArray::_InputOutputArray(const std::array<_Tp, _Nm>& arr)
{ init(FIXED_TYPE + FIXED_SIZE + STD_ARRAY + traits::Type<_Tp>::value + ACCESS_RW, arr.data(), Size(1, _Nm)); }
template<std::size_t _Nm> inline
_InputOutputArray::_InputOutputArray(const std::array<Mat, _Nm>& arr)
{ init(FIXED_SIZE + STD_ARRAY_MAT + ACCESS_RW, arr.data(), Size(1, _Nm)); }
-#endif
template<typename _Tp> inline
_InputOutputArray::_InputOutputArray(const std::vector<std::vector<_Tp> >& vec)
Mat((int)vec.size(), 1, traits::Type<_Tp>::value, (uchar*)&vec[0]).copyTo(*this);
}
-#ifdef CV_CXX11
template<typename _Tp, typename> inline
Mat::Mat(const std::initializer_list<_Tp> list)
: Mat()
CV_Assert(size_total == list.size());
Mat((int)sizes.size(), (int*)sizes.begin(), traits::Type<_Tp>::value, (uchar*)list.begin()).copyTo(*this);
}
-#endif
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp, std::size_t _Nm> inline
Mat::Mat(const std::array<_Tp, _Nm>& arr, bool copyData)
: flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows((int)arr.size()),
else
Mat((int)arr.size(), 1, traits::Type<_Tp>::value, (uchar*)arr.data()).copyTo(*this);
}
-#endif
template<typename _Tp, int n> inline
Mat::Mat(const Vec<_Tp, n>& vec, bool copyData)
return v;
}
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp, std::size_t _Nm> inline
Mat::operator std::array<_Tp, _Nm>() const
{
copyTo(v);
return v;
}
-#endif
template<typename _Tp, int n> inline
Mat::operator Vec<_Tp, n>() const
push_back(Mat(v));
}
-#ifdef CV_CXX_MOVE_SEMANTICS
-
inline
Mat::Mat(Mat&& m)
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data),
return *this;
}
-#endif
-
///////////////////////////// MatSize ////////////////////////////
: Mat(vec, copyData)
{}
-#ifdef CV_CXX11
template<typename _Tp> inline
Mat_<_Tp>::Mat_(std::initializer_list<_Tp> list)
: Mat(list)
Mat_<_Tp>::Mat_(const std::initializer_list<int> sizes, std::initializer_list<_Tp> list)
: Mat(sizes, list)
{}
-#endif
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp> template<std::size_t _Nm> inline
Mat_<_Tp>::Mat_(const std::array<_Tp, _Nm>& arr, bool copyData)
: Mat(arr, copyData)
{}
-#endif
template<typename _Tp> inline
Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m)
return v;
}
-#ifdef CV_CXX_STD_ARRAY
template<typename _Tp> template<std::size_t _Nm> inline
Mat_<_Tp>::operator std::array<_Tp, _Nm>() const
{
copyTo(a);
return a;
}
-#endif
template<typename _Tp> template<int n> inline
Mat_<_Tp>::operator Vec<typename DataType<_Tp>::channel_type, n>() const
Mat::forEach<_Tp, Functor>(operation);
}
-#ifdef CV_CXX_MOVE_SEMANTICS
-
template<typename _Tp> inline
Mat_<_Tp>::Mat_(Mat_&& m)
: Mat(m)
*this = Mat(e);
}
-#endif
///////////////////////////// SparseMat /////////////////////////////
return p;
}
-#ifdef CV_CXX_MOVE_SEMANTICS
-
inline
UMat::UMat(UMat&& m)
: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator),
return *this;
}
-#endif
-
inline bool UMatData::hostCopyObsolete() const { return (flags & HOST_COPY_OBSOLETE) != 0; }
inline bool UMatData::deviceCopyObsolete() const { return (flags & DEVICE_COPY_OBSOLETE) != 0; }
#include "opencv2/core/traits.hpp"
#include "opencv2/core/saturate.hpp"
-#ifdef CV_CXX11
#include <initializer_list>
-#endif
namespace cv
{
_Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix
explicit Matx(const _Tp* vals); //!< initialize from a plain array
-#ifdef CV_CXX11
Matx(std::initializer_list<_Tp>); //!< initialize from an initializer list
-#endif
static Matx all(_Tp alpha);
static Matx zeros();
Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9, _Tp v10, _Tp v11, _Tp v12, _Tp v13); //!< 14-element vector constructor
explicit Vec(const _Tp* values);
-#ifdef CV_CXX11
Vec(std::initializer_list<_Tp>);
-#endif
Vec(const Vec<_Tp, cn>& v);
for( int i = 0; i < channels; i++ ) val[i] = values[i];
}
-#ifdef CV_CXX11
template<typename _Tp, int m, int n> inline
Matx<_Tp, m, n>::Matx(std::initializer_list<_Tp> list)
{
val[i++] = elem;
}
}
-#endif
template<typename _Tp, int m, int n> inline
Matx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha)
Vec<_Tp, cn>::Vec(const _Tp* values)
: Matx<_Tp, cn, 1>(values) {}
-#ifdef CV_CXX11
template<typename _Tp, int cn> inline
Vec<_Tp, cn>::Vec(std::initializer_list<_Tp> list)
: Matx<_Tp, cn, 1>(list) {}
-#endif
template<typename _Tp, int cn> inline
Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m)
/// @overload
CV_WRAP void write(const String& name, const String& val);
/// @overload
- CV_WRAP void write(const String& name, InputArray val);
+ CV_WRAP void write(const String& name, const Mat& val);
+ /// @overload
+ CV_WRAP void write(const String& name, const std::vector<String>& val);
/** @brief Writes a comment.
}
CvStatus;
-#ifdef HAVE_TEGRA_OPTIMIZATION
-namespace tegra {
-
-CV_EXPORTS bool useTegra();
-CV_EXPORTS void setUseTegra(bool flag);
-
-}
-#endif
-
#ifdef ENABLE_INSTRUMENTATION
namespace cv
{
return Ptr<Y>(*this, dynamic_cast<Y*>(stored));
}
-#ifdef CV_CXX_MOVE_SEMANTICS
-
template<typename T>
Ptr<T>::Ptr(Ptr&& o) : owner(o.owner), stored(o.stored)
{
return *this;
}
-#endif
-
template<typename T>
void swap(Ptr<T>& ptr1, Ptr<T>& ptr2){
Point_();
Point_(_Tp _x, _Tp _y);
Point_(const Point_& pt);
+ Point_(Point_&& pt) noexcept;
Point_(const Size_<_Tp>& sz);
Point_(const Vec<_Tp, 2>& v);
Point_& operator = (const Point_& pt);
+ Point_& operator = (Point_&& pt) noexcept;
//! conversion to another data type
template<typename _Tp2> operator Point_<_Tp2>() const;
Point3_();
Point3_(_Tp _x, _Tp _y, _Tp _z);
Point3_(const Point3_& pt);
+ Point3_(Point3_&& pt) noexcept;
explicit Point3_(const Point_<_Tp>& pt);
Point3_(const Vec<_Tp, 3>& v);
Point3_& operator = (const Point3_& pt);
+ Point3_& operator = (Point3_&& pt) noexcept;
//! conversion to another data type
template<typename _Tp2> operator Point3_<_Tp2>() const;
//! conversion to cv::Vec<>
-#if OPENCV_ABI_COMPATIBILITY > 300
- template<typename _Tp2> operator Vec<_Tp2, 3>() const;
-#else
operator Vec<_Tp, 3>() const;
-#endif
//! dot product
_Tp dot(const Point3_& pt) const;
Size_();
Size_(_Tp _width, _Tp _height);
Size_(const Size_& sz);
+ Size_(Size_&& sz) noexcept;
Size_(const Point_<_Tp>& pt);
Size_& operator = (const Size_& sz);
+ Size_& operator = (Size_&& sz) noexcept;
//! the area (width*height)
_Tp area() const;
+ //! aspect ratio (width/height)
+ double aspectRatio() const;
//! true if empty
bool empty() const;
Rect_();
Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height);
Rect_(const Rect_& r);
+ Rect_(Rect_&& r) noexcept;
Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz);
Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2);
Rect_& operator = ( const Rect_& r );
+ Rect_& operator = ( Rect_&& r ) noexcept;
//! the top-left corner
Point_<_Tp> tl() const;
//! the bottom-right corner
Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0);
Scalar_(_Tp v0);
+ Scalar_(const Scalar_& s);
+ Scalar_(Scalar_&& s) noexcept;
+
+ Scalar_& operator=(const Scalar_& s);
+ Scalar_& operator=(Scalar_&& s) noexcept;
+
template<typename _Tp2, int cn>
Scalar_(const Vec<_Tp2, cn>& v);
: x(pt.x), y(pt.y) {}
template<typename _Tp> inline
+Point_<_Tp>::Point_(Point_&& pt) noexcept
+ : x(std::move(pt.x)), y(std::move(pt.y)) {}
+
+template<typename _Tp> inline
Point_<_Tp>::Point_(const Size_<_Tp>& sz)
: x(sz.width), y(sz.height) {}
return *this;
}
+template<typename _Tp> inline
+Point_<_Tp>& Point_<_Tp>::operator = (Point_&& pt) noexcept
+{
+ x = std::move(pt.x); y = std::move(pt.y);
+ return *this;
+}
+
template<typename _Tp> template<typename _Tp2> inline
Point_<_Tp>::operator Point_<_Tp2>() const
{
: x(pt.x), y(pt.y), z(pt.z) {}
template<typename _Tp> inline
+Point3_<_Tp>::Point3_(Point3_&& pt) noexcept
+ : x(std::move(pt.x)), y(std::move(pt.y)), z(std::move(pt.z)) {}
+
+template<typename _Tp> inline
Point3_<_Tp>::Point3_(const Point_<_Tp>& pt)
: x(pt.x), y(pt.y), z(_Tp()) {}
return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z));
}
-#if OPENCV_ABI_COMPATIBILITY > 300
-template<typename _Tp> template<typename _Tp2> inline
-Point3_<_Tp>::operator Vec<_Tp2, 3>() const
-{
- return Vec<_Tp2, 3>(x, y, z);
-}
-#else
template<typename _Tp> inline
Point3_<_Tp>::operator Vec<_Tp, 3>() const
{
return Vec<_Tp, 3>(x, y, z);
}
-#endif
template<typename _Tp> inline
Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt)
}
template<typename _Tp> inline
+Point3_<_Tp>& Point3_<_Tp>::operator = (Point3_&& pt) noexcept
+{
+ x = std::move(pt.x); y = std::move(pt.y); z = std::move(pt.z);
+ return *this;
+}
+
+template<typename _Tp> inline
_Tp Point3_<_Tp>::dot(const Point3_& pt) const
{
return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z);
: width(sz.width), height(sz.height) {}
template<typename _Tp> inline
+Size_<_Tp>::Size_(Size_&& sz) noexcept
+ : width(std::move(sz.width)), height(std::move(sz.height)) {}
+
+template<typename _Tp> inline
Size_<_Tp>::Size_(const Point_<_Tp>& pt)
: width(pt.x), height(pt.y) {}
}
template<typename _Tp> inline
+Size_<_Tp>& Size_<_Tp>::operator = (Size_<_Tp>&& sz) noexcept
+{
+ width = std::move(sz.width); height = std::move(sz.height);
+ return *this;
+}
+
+template<typename _Tp> inline
_Tp Size_<_Tp>::area() const
{
const _Tp result = width * height;
}
template<typename _Tp> inline
+double Size_<_Tp>::aspectRatio() const
+{
+ return width / static_cast<double>(height);
+}
+
+template<typename _Tp> inline
bool Size_<_Tp>::empty() const
{
return width <= 0 || height <= 0;
: x(r.x), y(r.y), width(r.width), height(r.height) {}
template<typename _Tp> inline
+Rect_<_Tp>::Rect_(Rect_<_Tp>&& r) noexcept
+ : x(std::move(r.x)), y(std::move(r.y)), width(std::move(r.width)), height(std::move(r.height)) {}
+
+template<typename _Tp> inline
Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz)
: x(org.x), y(org.y), width(sz.width), height(sz.height) {}
}
template<typename _Tp> inline
+Rect_<_Tp>& Rect_<_Tp>::operator = ( Rect_<_Tp>&& r ) noexcept
+{
+ x = std::move(r.x);
+ y = std::move(r.y);
+ width = std::move(r.width);
+ height = std::move(r.height);
+ return *this;
+}
+
+template<typename _Tp> inline
Point_<_Tp> Rect_<_Tp>::tl() const
{
return Point_<_Tp>(x,y);
RotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle)
: center(_center), size(_size), angle(_angle) {}
-
-
///////////////////////////////// Range /////////////////////////////////
inline
this->val[3] = v3;
}
+template<typename _Tp> inline
+Scalar_<_Tp>::Scalar_(const Scalar_<_Tp>& s) : Vec<_Tp, 4>(s) {
+}
+
+template<typename _Tp> inline
+Scalar_<_Tp>::Scalar_(Scalar_<_Tp>&& s) noexcept {
+ this->val[0] = std::move(s.val[0]);
+ this->val[1] = std::move(s.val[1]);
+ this->val[2] = std::move(s.val[2]);
+ this->val[3] = std::move(s.val[3]);
+}
+
+template<typename _Tp> inline
+Scalar_<_Tp>& Scalar_<_Tp>::operator=(const Scalar_<_Tp>& s) {
+ this->val[0] = s.val[0];
+ this->val[1] = s.val[1];
+ this->val[2] = s.val[2];
+ this->val[3] = s.val[3];
+ return *this;
+}
+
+template<typename _Tp> inline
+Scalar_<_Tp>& Scalar_<_Tp>::operator=(Scalar_<_Tp>&& s) noexcept {
+ this->val[0] = std::move(s.val[0]);
+ this->val[1] = std::move(s.val[1]);
+ this->val[2] = std::move(s.val[2]);
+ this->val[3] = std::move(s.val[3]);
+ return *this;
+}
+
template<typename _Tp> template<typename _Tp2, int cn> inline
Scalar_<_Tp>::Scalar_(const Vec<_Tp2, cn>& v)
{
#include "opencv2/core.hpp"
#include <ostream>
-#ifdef CV_CXX11
#include <functional>
-#endif
+
+#include <mutex> // std::mutex, std::lock_guard
namespace cv
{
*/
CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.);
-#ifdef CV_CXX11
class ParallelLoopBodyLambdaWrapper : public ParallelLoopBody
{
private:
{
parallel_for_(range, ParallelLoopBodyLambdaWrapper(functor), nstripes);
}
-#endif
/////////////////////////////// forEach method of cv::Mat ////////////////////////////
template<typename _Tp, typename Functor> inline
/////////////////////////// Synchronization Primitives ///////////////////////////////
-class CV_EXPORTS Mutex
-{
-public:
- Mutex();
- ~Mutex();
- Mutex(const Mutex& m);
- Mutex& operator = (const Mutex& m);
-
- void lock();
- bool trylock();
- void unlock();
-
- struct Impl;
-protected:
- Impl* impl;
-};
-
-class CV_EXPORTS AutoLock
-{
-public:
- AutoLock(Mutex& m) : mutex(&m) { mutex->lock(); }
- ~AutoLock() { mutex->unlock(); }
-protected:
- Mutex* mutex;
-private:
- AutoLock(const AutoLock&);
- AutoLock& operator = (const AutoLock&);
-};
+typedef std::recursive_mutex Mutex;
+typedef std::lock_guard<cv::Mutex> AutoLock;
// TLS interface
class CV_EXPORTS TLSDataContainer
virtual ~TLSDataContainer();
void gatherData(std::vector<void*> &data) const;
-#if OPENCV_ABI_COMPATIBILITY > 300
void* getData() const;
void release();
private:
-#else
- void release();
-
-public:
- void* getData() const;
-#endif
virtual void* createDataInstance() const = 0;
virtual void deleteDataInstance(void* pData) const = 0;
} //namespace cv
-#ifndef DISABLE_OPENCV_24_COMPATIBILITY
-#include "opencv2/core/core_c.h"
-#endif
-
#endif //OPENCV_CORE_UTILITY_H
#ifndef OPENCV_VERSION_HPP
#define OPENCV_VERSION_HPP
-#define CV_VERSION_MAJOR 3
-#define CV_VERSION_MINOR 4
-#define CV_VERSION_REVISION 2
-#define CV_VERSION_STATUS "-dev"
+#define CV_VERSION_MAJOR 4
+#define CV_VERSION_MINOR 0
+#define CV_VERSION_REVISION 0
+#define CV_VERSION_STATUS "-pre"
#define CVAUX_STR_EXP(__A) #__A
#define CVAUX_STR(__A) CVAUX_STR_EXP(__A)
{
CV_INSTRUMENT_REGION()
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra())
- {
- int kind1 = _src1.kind(), kind2 = _src2.kind();
- Mat src1 = _src1.getMat(), src2 = _src2.getMat();
- bool src1Scalar = checkScalar(src1, _src2.type(), kind1, kind2);
- bool src2Scalar = checkScalar(src2, _src1.type(), kind2, kind1);
-
- if (!src1Scalar && !src2Scalar &&
- src1.depth() == CV_8U && src2.type() == src1.type() &&
- src1.dims == 2 && src2.size() == src1.size() &&
- mask.empty())
- {
- if (dtype < 0)
- {
- if (_dst.fixedType())
- {
- dtype = _dst.depth();
- }
- else
- {
- dtype = src1.depth();
- }
- }
-
- dtype = CV_MAT_DEPTH(dtype);
-
- if (!_dst.fixedType() || dtype == _dst.depth())
- {
- _dst.create(src1.size(), CV_MAKE_TYPE(dtype, src1.channels()));
-
- if (dtype == CV_16S)
- {
- Mat dst = _dst.getMat();
- if(tegra::subtract_8u8u16s(src1, src2, dst))
- return;
- }
- else if (dtype == CV_32F)
- {
- Mat dst = _dst.getMat();
- if(tegra::subtract_8u8u32f(src1, src2, dst))
- return;
- }
- else if (dtype == CV_8S)
- {
- Mat dst = _dst.getMat();
- if(tegra::subtract_8u8u8s(src1, src2, dst))
- return;
- }
- }
- }
- }
-#endif
arithm_op(_src1, _src2, _dst, mask, dtype, getSubTab(), false, 0, OCL_OP_SUB );
}
ss << "must be " << getTestOpPhraseStr(ctx.testOp) << std::endl;
}
ss << " '" << ctx.p2_str << "' is " << v2;
- cv::errorNoReturn(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
+ cv::error(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
}
void check_failed_MatDepth(const int v1, const int v2, const CheckContext& ctx)
{
ss << "must be " << getTestOpPhraseStr(ctx.testOp) << std::endl;
}
ss << " '" << ctx.p2_str << "' is " << v2 << " (" << depthToString(v2) << ")";
- cv::errorNoReturn(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
+ cv::error(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
}
void check_failed_MatType(const int v1, const int v2, const CheckContext& ctx)
{
ss << "must be " << getTestOpPhraseStr(ctx.testOp) << std::endl;
}
ss << " '" << ctx.p2_str << "' is " << v2 << " (" << typeToString(v2) << ")";
- cv::errorNoReturn(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
+ cv::error(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
}
void check_failed_MatChannels(const int v1, const int v2, const CheckContext& ctx)
{
<< " '" << ctx.p2_str << "'" << std::endl
<< "where" << std::endl
<< " '" << ctx.p1_str << "' is " << v;
- cv::errorNoReturn(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
+ cv::error(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
}
void check_failed_MatDepth(const int v, const CheckContext& ctx)
{
<< " '" << ctx.p2_str << "'" << std::endl
<< "where" << std::endl
<< " '" << ctx.p1_str << "' is " << v << " (" << depthToString(v) << ")";
- cv::errorNoReturn(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
+ cv::error(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
}
void check_failed_MatType(const int v, const CheckContext& ctx)
{
<< " '" << ctx.p2_str << "'" << std::endl
<< "where" << std::endl
<< " '" << ctx.p1_str << "' is " << v << " (" << typeToString(v) << ")";
- cv::errorNoReturn(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
+ cv::error(cv::Error::StsError, ss.str(), ctx.func, ctx.file, ctx.line);
}
void check_failed_MatChannels(const int v, const CheckContext& ctx)
{
CV_INSTRUMENT_REGION()
Mat src = _src.getMat();
- CV_Assert( src.type() == CV_8UC1 );
- int n = countNonZero(src);
- if( n == 0 )
+ CV_Assert( src.channels() == 1 && src.dims == 2 );
+
+ int depth = src.depth();
+ std::vector<Point> idxvec;
+ int rows = src.rows, cols = src.cols;
+ AutoBuffer<int> buf_(cols + 1);
+ int* buf = buf_.data();
+
+ for( int i = 0; i < rows; i++ )
{
- _idx.release();
- return;
+ int j, k = 0;
+ const uchar* ptr8 = src.ptr(i);
+ if( depth == CV_8U || depth == CV_8S )
+ {
+ for( j = 0; j < cols; j++ )
+ if( ptr8[j] != 0 ) buf[k++] = j;
+ }
+ else if( depth == CV_16U || depth == CV_16S )
+ {
+ const ushort* ptr16 = (const ushort*)ptr8;
+ for( j = 0; j < cols; j++ )
+ if( ptr16[j] != 0 ) buf[k++] = j;
+ }
+ else if( depth == CV_32S )
+ {
+ const int* ptr32s = (const int*)ptr8;
+ for( j = 0; j < cols; j++ )
+ if( ptr32s[j] != 0 ) buf[k++] = j;
+ }
+ else if( depth == CV_32F )
+ {
+ const float* ptr32f = (const float*)ptr8;
+ for( j = 0; j < cols; j++ )
+ if( ptr32f[j] != 0 ) buf[k++] = j;
+ }
+ else
+ {
+ const double* ptr64f = (const double*)ptr8;
+ for( j = 0; j < cols; j++ )
+ if( ptr64f[j] != 0 ) buf[k++] = j;
+ }
+
+ if( k > 0 )
+ {
+ size_t sz = idxvec.size();
+ idxvec.resize(sz + k);
+ for( j = 0; j < k; j++ )
+ idxvec[sz + j] = Point(buf[j], i);
+ }
}
- if( _idx.kind() == _InputArray::MAT && !_idx.getMatRef().isContinuous() )
+
+ if( idxvec.empty() || (_idx.kind() == _InputArray::MAT && !_idx.getMatRef().isContinuous()) )
_idx.release();
- _idx.create(n, 1, CV_32SC2);
- Mat idx = _idx.getMat();
- CV_Assert(idx.isContinuous());
- Point* idx_ptr = idx.ptr<Point>();
- for( int i = 0; i < src.rows; i++ )
- {
- const uchar* bin_ptr = src.ptr(i);
- for( int j = 0; j < src.cols; j++ )
- if( bin_ptr[j] )
- *idx_ptr++ = Point(j, i);
- }
+ if( !idxvec.empty() )
+ Mat(idxvec).copyTo(_idx);
}
// Linear Discriminant Analysis implementation
//------------------------------------------------------------------------------
-LDA::LDA(int num_components) : _dataAsRow(true), _num_components(num_components) { }
+LDA::LDA(int num_components) : _num_components(num_components) { }
-LDA::LDA(InputArrayOfArrays src, InputArray labels, int num_components) : _dataAsRow(true), _num_components(num_components)
+LDA::LDA(InputArrayOfArrays src, InputArray labels, int num_components) : _num_components(num_components)
{
this->compute(src, labels); //! compute eigenvectors and eigenvalues
}
return cv::hal::normHamming(a, b, size);
}
-double cv::PSNR(InputArray _src1, InputArray _src2)
+double cv::PSNR(InputArray _src1, InputArray _src2, double R)
{
CV_INSTRUMENT_REGION()
//Input arrays must have depth CV_8U
- CV_Assert( _src1.depth() == CV_8U && _src2.depth() == CV_8U );
+ CV_Assert( _src1.type() == _src2.type() );
double diff = std::sqrt(norm(_src1, _src2, NORM_L2SQR)/(_src1.total()*_src1.channels()));
- return 20*log10(255./(diff+DBL_EPSILON));
+ return 20*log10(R/(diff+DBL_EPSILON));
}
default:
msg = "Unknown error";
};
- cv::errorNoReturn(Error::OpenGlApiCallError, func, msg, file, line);
+ cv::error(Error::OpenGlApiCallError, func, msg, file, line);
}
return true;
}
#endif
/* IMPORTANT: always use the same order of defines
- 1. HAVE_TBB - 3rdparty library, should be explicitly enabled
- 2. HAVE_CSTRIPES - 3rdparty library, should be explicitly enabled
- 3. HAVE_OPENMP - integrated to compiler, should be explicitly enabled
- 4. HAVE_GCD - system wide, used automatically (APPLE only)
- 5. WINRT - system wide, used automatically (Windows RT only)
- 6. HAVE_CONCURRENCY - part of runtime, used automatically (Windows only - MSVS 10, MSVS 11)
- 7. HAVE_PTHREADS_PF - pthreads if available
+ - HAVE_TBB - 3rdparty library, should be explicitly enabled
+ - HAVE_OPENMP - integrated to compiler, should be explicitly enabled
+ - HAVE_GCD - system wide, used automatically (APPLE only)
+ - WINRT - system wide, used automatically (Windows RT only)
+ - HAVE_CONCURRENCY - part of runtime, used automatically (Windows only - MSVS 10, MSVS 11)
+ - HAVE_PTHREADS_PF - pthreads if available
*/
#if defined HAVE_TBB
#endif
#undef min
#undef max
-#elif defined HAVE_CSTRIPES
- #include "C=.h"
- #undef shared
#elif defined HAVE_OPENMP
#include <omp.h>
#elif defined HAVE_GCD
#if defined HAVE_TBB
# define CV_PARALLEL_FRAMEWORK "tbb"
-#elif defined HAVE_CSTRIPES
-# define CV_PARALLEL_FRAMEWORK "cstripes"
#elif defined HAVE_OPENMP
# define CV_PARALLEL_FRAMEWORK "openmp"
#elif defined HAVE_GCD
tbb::parallel_for(tbb::blocked_range<int>(range.start, range.end), *this);
}
};
-#elif defined HAVE_CSTRIPES || defined HAVE_OPENMP
- typedef ParallelLoopBodyWrapper ProxyLoopBody;
#elif defined HAVE_GCD
typedef ParallelLoopBodyWrapper ProxyLoopBody;
static void block_function(void* context, size_t index)
#else
static tbb::task_scheduler_init tbbScheduler(tbb::task_scheduler_init::deferred);
#endif
-#elif defined HAVE_CSTRIPES
-// nothing for C=
#elif defined HAVE_OPENMP
static int numThreadsMax = omp_get_max_threads();
#elif defined HAVE_GCD
pbody();
#endif
-#elif defined HAVE_CSTRIPES
-
- parallel(MAX(0, numThreads))
- {
- int offset = stripeRange.start;
- int len = stripeRange.end - offset;
- Range r(offset + CPX_RANGE_START(len), offset + CPX_RANGE_END(len));
- pbody(r);
- barrier();
- }
-
#elif defined HAVE_OPENMP
#pragma omp parallel for schedule(dynamic) num_threads(numThreads > 0 ? numThreads : numThreadsMax)
: tbb::task_scheduler_init::default_num_threads();
#endif
-#elif defined HAVE_CSTRIPES
-
- return numThreads > 0
- ? numThreads
- : cv::getNumberOfCPUs();
-
#elif defined HAVE_OPENMP
return numThreads > 0
if(threads > 0) tbbScheduler.initialize(threads);
#endif
-#elif defined HAVE_CSTRIPES
-
- return; // nothing needed
-
#elif defined HAVE_OPENMP
return; // nothing needed as num_threads clause is used in #pragma omp parallel for
#else
return 0;
#endif
-#elif defined HAVE_CSTRIPES
- return pix();
#elif defined HAVE_OPENMP
return omp_get_thread_num();
#elif defined HAVE_GCD
//#define CV_USE_GLOBAL_WORKERS_COND_VAR // not effective on many-core systems (10+)
-#ifdef CV_CXX11
#include <atomic>
-#else
-#include <unistd.h> // _POSIX_PRIORITY_SCHEDULING
-#endif
// Spin lock's OS-level yield
#ifdef DECLARE_CV_YIELD
DECLARE_CV_YIELD
#endif
#ifndef CV_YIELD
-# ifdef CV_CXX11
-# include <thread>
-# define CV_YIELD() std::this_thread::yield()
-# elif defined(_POSIX_PRIORITY_SCHEDULING)
-# include <sched.h>
-# define CV_YIELD() sched_yield()
-# else
-# warning "Can't detect sched_yield() on the target platform. Specify CV_YIELD() definition via compiler flags."
-# define CV_YIELD() /* no-op: works, but not effective */
-# endif
+# include <thread>
+# define CV_YIELD() std::this_thread::yield()
#endif // CV_YIELD
// Spin lock's CPU-level yield (required for Hyper-Threading)
is_completed(false)
{
CV_LOG_VERBOSE(NULL, 5, "ParallelJob::ParallelJob(" << (void*)this << ")");
-#ifdef CV_CXX11
current_task.store(0, std::memory_order_relaxed);
active_thread_count.store(0, std::memory_order_relaxed);
completed_thread_count.store(0, std::memory_order_relaxed);
-#else
- current_task = 0;
- active_thread_count = 0;
- completed_thread_count = 0;
-#endif
dummy0_[0] = 0, dummy1_[0] = 0, dummy2_[0] = 0; // compiler warning
}
for (;;)
{
int chunk_size = std::max(1, (task_count - current_task) / remaining_multiplier);
-#ifdef CV_CXX11
int id = current_task.fetch_add(chunk_size, std::memory_order_seq_cst);
-#else
- int id = (int)CV_XADD(¤t_task, chunk_size);
-#endif
if (id >= task_count)
break; // no more free tasks
const ParallelLoopBody& body;
const Range range;
const unsigned nstripes;
-#ifdef CV_CXX11
+
std::atomic<int> current_task; // next free part of job
int64 dummy0_[8]; // avoid cache-line reusing for the same atomics
std::atomic<int> completed_thread_count; // number of threads completed any activities on this job
int64 dummy2_[8]; // avoid cache-line reusing for the same atomics
-#else
- /*CV_DECL_ALIGNED(64)*/ volatile int current_task; // next free part of job
- int64 dummy0_[8]; // avoid cache-line reusing for the same atomics
-
- /*CV_DECL_ALIGNED(64)*/ volatile int active_thread_count; // number of threads worked on this job
- int64 dummy1_[8]; // avoid cache-line reusing for the same atomics
-
- /*CV_DECL_ALIGNED(64)*/ volatile int completed_thread_count; // number of threads completed any activities on this job
- int64 dummy2_[8]; // avoid cache-line reusing for the same atomics
-#endif
volatile bool is_completed; // std::atomic_flag ?
CV_LOG_VERBOSE(NULL, 5, "Thread: job size=" << j->range.size() << " done=" << j->current_task);
if (j->current_task < j->range.size())
{
-#ifdef CV_CXX11
int other = j->active_thread_count.fetch_add(1, std::memory_order_seq_cst);
-#else
- int other = CV_XADD(&j->active_thread_count, 1);
-#endif
CV_LOG_VERBOSE(NULL, 5, "Thread: processing new job (with " << other << " other threads)"); CV_UNUSED(other);
#ifdef CV_PROFILE_THREADS
stat.threadExecuteStart = getTickCount();
#else
j->execute(true);
#endif
-#ifdef CV_CXX11
int completed = j->completed_thread_count.fetch_add(1, std::memory_order_seq_cst) + 1;
int active = j->active_thread_count.load(std::memory_order_acquire);
-#else
- int completed = (int)CV_XADD(&j->completed_thread_count, 1) + 1;
- int active = j->active_thread_count;
-#endif
if (CV_WORKER_ACTIVE_WAIT_THREADS_LIMIT > 0)
{
allow_active_wait = true;
const char* err_msg, const char* source_file, int source_line )
{
cv::String msg = cv::format("%s(%d): %s", fs->filename, fs->lineno, err_msg);
- cv::errorNoReturn(cv::Error::StsParseError, func_name, msg.c_str(), source_file, source_line );
+ cv::error(cv::Error::StsParseError, func_name, msg.c_str(), source_file, source_line );
}
void icvFSCreateCollection( CvFileStorage* fs, int tag, CvFileNode* collection )
*this << name << val;
}
-void FileStorage::write( const String& name, InputArray val )
+void FileStorage::write( const String& name, const Mat& val )
{
- *this << name << val.getMat();
+ *this << name << val;
+}
+
+void FileStorage::write( const String& name, const std::vector<String>& val )
+{
+ *this << name << val;
}
void FileStorage::writeComment( const String& comment, bool append )
#include "arithm_core.hpp"
#include "hal_replacement.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/core/core_tegra.hpp"
-#else
#define GET_OPTIMIZED(func) (func)
-#endif
namespace cv
{
//#endif
useIPP(-1),
useIPP_NE(-1)
-#ifdef HAVE_TEGRA_OPTIMIZATION
- ,useTegra(-1)
-#endif
#ifdef HAVE_OPENVX
,useOpenVX(-1)
#endif
//#endif
int useIPP; // 1 - use, 0 - do not use, -1 - auto/not initialized
int useIPP_NE; // 1 - use, 0 - do not use, -1 - auto/not initialized
-#ifdef HAVE_TEGRA_OPTIMIZATION
- int useTegra; // 1 - use, 0 - do not use, -1 - auto/not initialized
-#endif
#ifdef HAVE_OPENVX
int useOpenVX; // 1 - use, 0 - do not use, -1 - auto/not initialized
#endif
Mutex* __initialization_mutex_initializer = &getInitializationMutex();
static bool param_dumpErrors = utils::getConfigurationParameterBool("OPENCV_DUMP_ERRORS",
-#if defined(_DEBUG) || defined(__ANDROID__) || (defined(__GNUC__) && !defined(__EXCEPTIONS))
+#if defined(_DEBUG) || defined(__ANDROID__)
true
#else
false
#include <cstdlib> // std::abort
#endif
-#if defined __ANDROID__ || defined __linux__ || defined __FreeBSD__ || defined __HAIKU__
+#if defined __ANDROID__ || defined __linux__ || defined __FreeBSD__ || defined __HAIKU__ || defined __Fuchsia__
# include <unistd.h>
# include <fcntl.h>
# include <elf.h>
#ifdef HAVE_OPENCL
ocl::setUseOpenCL(flag);
#endif
-#ifdef HAVE_TEGRA_OPTIMIZATION
- ::tegra::setUseTegra(flag);
-#endif
}
bool useOptimized(void)
#endif
+#ifdef __GNUC__
+# if defined __clang__ || defined __APPLE__
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Winvalid-noreturn"
+# endif
+#endif
+
void error( const Exception& exc )
{
#ifdef CV_ERROR_SET_TERMINATE_HANDLER
}
CV_THROW(exc);
+#ifdef __GNUC__
+# if !defined __clang__ && !defined __APPLE__
+ // this suppresses this warning: "noreturn" function does return [enabled by default]
+ __builtin_trap();
+ // or use infinite loop: for (;;) {}
+# endif
+#endif
}
void error(int _code, const String& _err, const char* _func, const char* _file, int _line)
{
error(cv::Exception(_code, _err, _func, _file, _line));
+#ifdef __GNUC__
+# if !defined __clang__ && !defined __APPLE__
+ // this suppresses this warning: "noreturn" function does return [enabled by default]
+ __builtin_trap();
+ // or use infinite loop: for (;;) {}
+# endif
+#endif
}
+#ifdef __GNUC__
+# if defined __clang__ || defined __APPLE__
+# pragma GCC diagnostic pop
+# endif
+#endif
+
ErrorCallback
redirectError( ErrorCallback errCallback, void* userdata, void** prevUserdata)
namespace cv {
bool __termination = false;
-}
-
-namespace cv
-{
-
-#if defined _WIN32 || defined WINCE
-
-struct Mutex::Impl
-{
- Impl()
- {
-#if (_WIN32_WINNT >= 0x0600)
- ::InitializeCriticalSectionEx(&cs, 1000, 0);
-#else
- ::InitializeCriticalSection(&cs);
-#endif
- refcount = 1;
- }
- ~Impl() { DeleteCriticalSection(&cs); }
-
- void lock() { EnterCriticalSection(&cs); }
- bool trylock() { return TryEnterCriticalSection(&cs) != 0; }
- void unlock() { LeaveCriticalSection(&cs); }
-
- CRITICAL_SECTION cs;
- int refcount;
-};
-
-#else
-
-struct Mutex::Impl
-{
- Impl()
- {
- pthread_mutexattr_t attr;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&mt, &attr);
- pthread_mutexattr_destroy(&attr);
-
- refcount = 1;
- }
- ~Impl() { pthread_mutex_destroy(&mt); }
-
- void lock() { pthread_mutex_lock(&mt); }
- bool trylock() { return pthread_mutex_trylock(&mt) == 0; }
- void unlock() { pthread_mutex_unlock(&mt); }
-
- pthread_mutex_t mt;
- int refcount;
-};
-
-#endif
-
-Mutex::Mutex()
-{
- impl = new Mutex::Impl;
-}
-
-Mutex::~Mutex()
-{
- if( CV_XADD(&impl->refcount, -1) == 1 )
- delete impl;
- impl = 0;
-}
-
-Mutex::Mutex(const Mutex& m)
-{
- impl = m.impl;
- CV_XADD(&impl->refcount, 1);
-}
-
-Mutex& Mutex::operator = (const Mutex& m)
-{
- if (this != &m)
- {
- CV_XADD(&m.impl->refcount, 1);
- if( CV_XADD(&impl->refcount, -1) == 1 )
- delete impl;
- impl = m.impl;
- }
- return *this;
-}
-
-void Mutex::lock() { impl->lock(); }
-void Mutex::unlock() { impl->unlock(); }
-bool Mutex::trylock() { return impl->trylock(); }
//////////////////////////////// thread-local storage ////////////////////////////////
}
#endif
-#if OPENCV_ABI_COMPATIBILITY > 300
unsigned long long getIppFeatures()
-#else
-int getIppFeatures()
-#endif
{
#ifdef HAVE_IPP
-#if OPENCV_ABI_COMPATIBILITY > 300
return getIPPSingleton().ippFeatures;
#else
- return (int)getIPPSingleton().ippFeatures;
-#endif
-#else
return 0;
#endif
}
} // namespace cv
-#ifdef HAVE_TEGRA_OPTIMIZATION
-
-namespace tegra {
-
-bool useTegra()
-{
- cv::CoreTLSData* data = cv::getCoreTlsData().get();
-
- if (data->useTegra < 0)
- {
- const char* pTegraEnv = getenv("OPENCV_TEGRA");
- if (pTegraEnv && (cv::String(pTegraEnv) == "disabled"))
- data->useTegra = false;
- else
- data->useTegra = true;
- }
-
- return (data->useTegra > 0);
-}
-
-void setUseTegra(bool flag)
-{
- cv::CoreTLSData* data = cv::getCoreTlsData().get();
- data->useTegra = flag;
-}
-
-} // namespace tegra
-
-#endif
-
/* End of file. */
testing::Values(-1, CV_16S, CV_32S, CV_32F),
testing::Bool()));
-TEST(Core_FindNonZero, singular)
+TEST(Core_FindNonZero, regression)
{
Mat img(10, 10, CV_8U, Scalar::all(0));
- vector<Point> pts, pts2(10);
+ vector<Point> pts, pts2(5);
findNonZero(img, pts);
findNonZero(img, pts2);
ASSERT_TRUE(pts.empty() && pts2.empty());
+
+ RNG rng((uint64)-1);
+ size_t nz = 0;
+ for( int i = 0; i < 10; i++ )
+ {
+ int idx = rng.uniform(0, img.rows*img.cols);
+ if( !img.data[idx] ) nz++;
+ img.data[idx] = (uchar)rng.uniform(1, 256);
+ }
+ findNonZero(img, pts);
+ ASSERT_TRUE(pts.size() == nz);
+
+ img.convertTo( img, CV_8S );
+ pts.clear();
+ findNonZero(img, pts);
+ ASSERT_TRUE(pts.size() == nz);
+
+ img.convertTo( img, CV_16U );
+ pts.resize(pts.size()*2);
+ findNonZero(img, pts);
+ ASSERT_TRUE(pts.size() == nz);
+
+ img.convertTo( img, CV_16S );
+ pts.resize(pts.size()*3);
+ findNonZero(img, pts);
+ ASSERT_TRUE(pts.size() == nz);
+
+ img.convertTo( img, CV_32S );
+ pts.resize(pts.size()*4);
+ findNonZero(img, pts);
+ ASSERT_TRUE(pts.size() == nz);
+
+ img.convertTo( img, CV_32F );
+ pts.resize(pts.size()*5);
+ findNonZero(img, pts);
+ ASSERT_TRUE(pts.size() == nz);
+
+ img.convertTo( img, CV_64F );
+ pts.clear();
+ findNonZero(img, pts);
+ ASSERT_TRUE(pts.size() == nz);
}
TEST(Core_BoolVector, support)
ASSERT_EQ( cvtest::norm(a, b, NORM_INF), 0.);
}
-#ifdef CV_CXX11
-
TEST(Core_Matx, from_initializer_list)
{
Mat_<double> a = (Mat_<double>(2,2) << 10, 11, 12, 13);
EXPECT_EQ(25u, m2.total());
}
-#endif // CXX11
-
TEST(Core_InputArray, empty)
{
vector<vector<Point> > data;
}
}
-#ifdef CV_CXX_STD_ARRAY
TEST(Core_Mat_array, outputArray_create_getMat)
{
cv::Mat_<uchar> src_base(5, 1);
EXPECT_EQ(0, cvtest::norm(src[i], dst[i], NORM_INF));
}
}
-#endif
TEST(Mat, regression_8680)
{
ASSERT_EQ(mat.channels(), 2);
}
-#ifdef CV_CXX11
-
TEST(Mat_, range_based_for)
{
Mat_<uchar> img = Mat_<uchar>::zeros(3, 3);
ASSERT_FLOAT_EQ(66.0f, *(mat.ptr<float>(idx)));
}
-#endif
-
BIGDATA_TEST(Mat, push_back_regression_4158) // memory usage: ~10.6 Gb
{
if (compactResult)
{
-#ifdef CV_CXX11
std::vector< std::vector<DMatch> >::iterator new_end = std::remove_if(matches.begin(), matches.end(),
[](const std::vector<DMatch>& e)->bool { return e.empty(); });
-#else
- std::vector< std::vector<DMatch> >::iterator new_end = std::remove_if(matches.begin(), matches.end(), std::mem_fun_ref(&std::vector<DMatch>::empty));
-#endif
matches.erase(new_end, matches.end());
}
}
ocv_cuda_filter_options()
- CUDA_ADD_EXECUTABLE(${the_target} ${OPENCV_TEST_${the_module}_SOURCES})
+ CUDA_ADD_EXECUTABLE(${the_target} ${OPENCV_TEST_${the_module}_SOURCES} OPTIONS -std=c++11)
ocv_target_link_libraries(${the_target} LINK_PRIVATE
${test_deps} ${OPENCV_LINKER_LIBS} ${CUDA_LIBRARIES}
)
case Param::REAL:
return (int)pd->size();
}
-#ifdef __OPENCV_BUILD
CV_Error(Error::StsInternal, "");
-#else
- CV_ErrorNoReturn(Error::StsInternal, "");
-#endif
}
inline std::ostream &operator<<(std::ostream &stream, const DictValue &dictv)
parser.add_argument("--val_names", help="path to file with validation set image names, download it here: "
"https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/data/pascal/seg11valid.txt")
parser.add_argument("--cls_file", help="path to file with colors for classes, download it here: "
- "https://github.com/opencv/opencv/blob/3.4/samples/data/dnn/pascal-classes.txt")
+ "https://github.com/opencv/opencv/blob/master/samples/data/dnn/pascal-classes.txt")
parser.add_argument("--prototxt", help="path to caffe prototxt, download it here: "
- "https://github.com/opencv/opencv/blob/3.4/samples/data/dnn/fcn8s-heavy-pascal.prototxt")
+ "https://github.com/opencv/opencv/blob/master/samples/data/dnn/fcn8s-heavy-pascal.prototxt")
parser.add_argument("--caffemodel", help="path to caffemodel file, download it here: "
"http://dl.caffe.berkeleyvision.org/fcn8s-heavy-pascal.caffemodel")
parser.add_argument("--log", help="path to logging file")
-misc/java/src/cpp/features2d_manual.hpp
include/opencv2/features2d.hpp
"class_ignore_list" : [
"SimpleBlobDetector"
],
- "const_private_list" : [
- "OPPONENTEXTRACTOR",
- "GRIDDETECTOR",
- "PYRAMIDDETECTOR",
- "DYNAMICDETECTOR"
- ],
"type_dict" : {
"Feature2D": {
"j_type": "Feature2D",
+++ /dev/null
-#ifndef __OPENCV_FEATURES_2D_MANUAL_HPP__
-#define __OPENCV_FEATURES_2D_MANUAL_HPP__
-
-#include "opencv2/opencv_modules.hpp"
-
-#ifdef HAVE_OPENCV_FEATURES2D
-#include "opencv2/features2d.hpp"
-#include "features2d_converters.hpp"
-
-#undef SIMPLEBLOB // to solve conflict with wincrypt.h on windows
-
-namespace cv
-{
-
-/**
- * @deprecated Please use direct instantiation of Feature2D classes
- */
-class CV_EXPORTS_AS(FeatureDetector) javaFeatureDetector
-{
-public:
- CV_WRAP void detect( const Mat& image, CV_OUT std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const
- { return wrapped->detect(image, keypoints, mask); }
-
- CV_WRAP void detect( const std::vector<Mat>& images, CV_OUT std::vector<std::vector<KeyPoint> >& keypoints, const std::vector<Mat>& masks=std::vector<Mat>() ) const
- { return wrapped->detect(images, keypoints, masks); }
-
- CV_WRAP bool empty() const
- { return wrapped->empty(); }
-
- enum
- {
- FAST = 1,
- STAR = 2,
- SIFT = 3,
- SURF = 4,
- ORB = 5,
- MSER = 6,
- GFTT = 7,
- HARRIS = 8,
- SIMPLEBLOB = 9,
- DENSE = 10,
- BRISK = 11,
- AKAZE = 12,
-
-
- GRIDDETECTOR = 1000,
-
- GRID_FAST = GRIDDETECTOR + FAST,
- GRID_STAR = GRIDDETECTOR + STAR,
- GRID_SIFT = GRIDDETECTOR + SIFT,
- GRID_SURF = GRIDDETECTOR + SURF,
- GRID_ORB = GRIDDETECTOR + ORB,
- GRID_MSER = GRIDDETECTOR + MSER,
- GRID_GFTT = GRIDDETECTOR + GFTT,
- GRID_HARRIS = GRIDDETECTOR + HARRIS,
- GRID_SIMPLEBLOB = GRIDDETECTOR + SIMPLEBLOB,
- GRID_DENSE = GRIDDETECTOR + DENSE,
- GRID_BRISK = GRIDDETECTOR + BRISK,
- GRID_AKAZE = GRIDDETECTOR + AKAZE,
-
-
- PYRAMIDDETECTOR = 2000,
-
- PYRAMID_FAST = PYRAMIDDETECTOR + FAST,
- PYRAMID_STAR = PYRAMIDDETECTOR + STAR,
- PYRAMID_SIFT = PYRAMIDDETECTOR + SIFT,
- PYRAMID_SURF = PYRAMIDDETECTOR + SURF,
- PYRAMID_ORB = PYRAMIDDETECTOR + ORB,
- PYRAMID_MSER = PYRAMIDDETECTOR + MSER,
- PYRAMID_GFTT = PYRAMIDDETECTOR + GFTT,
- PYRAMID_HARRIS = PYRAMIDDETECTOR + HARRIS,
- PYRAMID_SIMPLEBLOB = PYRAMIDDETECTOR + SIMPLEBLOB,
- PYRAMID_DENSE = PYRAMIDDETECTOR + DENSE,
- PYRAMID_BRISK = PYRAMIDDETECTOR + BRISK,
- PYRAMID_AKAZE = PYRAMIDDETECTOR + AKAZE,
-
- DYNAMICDETECTOR = 3000,
-
- DYNAMIC_FAST = DYNAMICDETECTOR + FAST,
- DYNAMIC_STAR = DYNAMICDETECTOR + STAR,
- DYNAMIC_SIFT = DYNAMICDETECTOR + SIFT,
- DYNAMIC_SURF = DYNAMICDETECTOR + SURF,
- DYNAMIC_ORB = DYNAMICDETECTOR + ORB,
- DYNAMIC_MSER = DYNAMICDETECTOR + MSER,
- DYNAMIC_GFTT = DYNAMICDETECTOR + GFTT,
- DYNAMIC_HARRIS = DYNAMICDETECTOR + HARRIS,
- DYNAMIC_SIMPLEBLOB = DYNAMICDETECTOR + SIMPLEBLOB,
- DYNAMIC_DENSE = DYNAMICDETECTOR + DENSE,
- DYNAMIC_BRISK = DYNAMICDETECTOR + BRISK,
- DYNAMIC_AKAZE = DYNAMICDETECTOR + AKAZE
- };
-
- /**
- * supported: FAST STAR SIFT SURF ORB MSER GFTT HARRIS BRISK AKAZE Grid(XXXX) Pyramid(XXXX) Dynamic(XXXX)
- * not supported: SimpleBlob, Dense
- * @deprecated
- */
- CV_WRAP static Ptr<javaFeatureDetector> create( int detectorType )
- {
- //String name;
- if (detectorType > DYNAMICDETECTOR)
- {
- //name = "Dynamic";
- detectorType -= DYNAMICDETECTOR;
- }
- if (detectorType > PYRAMIDDETECTOR)
- {
- //name = "Pyramid";
- detectorType -= PYRAMIDDETECTOR;
- }
- if (detectorType > GRIDDETECTOR)
- {
- //name = "Grid";
- detectorType -= GRIDDETECTOR;
- }
-
- Ptr<FeatureDetector> fd;
- switch(detectorType)
- {
- case FAST:
- fd = FastFeatureDetector::create();
- break;
- //case STAR:
- // fd = xfeatures2d::StarDetector::create();
- // break;
- //case SIFT:
- // name = name + "SIFT";
- // break;
- //case SURF:
- // name = name + "SURF";
- // break;
- case ORB:
- fd = ORB::create();
- break;
- case MSER:
- fd = MSER::create();
- break;
- case GFTT:
- fd = GFTTDetector::create();
- break;
- case HARRIS:
- {
- Ptr<GFTTDetector> gftt = GFTTDetector::create();
- gftt->setHarrisDetector(true);
- fd = gftt;
- }
- break;
- case SIMPLEBLOB:
- fd = SimpleBlobDetector::create();
- break;
- //case DENSE:
- // name = name + "Dense";
- // break;
- case BRISK:
- fd = BRISK::create();
- break;
- case AKAZE:
- fd = AKAZE::create();
- break;
- default:
- CV_Error( Error::StsBadArg, "Specified feature detector type is not supported." );
- break;
- }
-
- return makePtr<javaFeatureDetector>(fd);
- }
-
- CV_WRAP void write( const String& fileName ) const
- {
- FileStorage fs(fileName, FileStorage::WRITE);
- wrapped->write(fs);
- }
-
- CV_WRAP void read( const String& fileName )
- {
- FileStorage fs(fileName, FileStorage::READ);
- wrapped->read(fs.root());
- }
-
- javaFeatureDetector(Ptr<FeatureDetector> _wrapped) : wrapped(_wrapped)
- {}
-
-private:
-
- Ptr<FeatureDetector> wrapped;
-};
-
-/**
- * @deprecated
- */
-class CV_EXPORTS_AS(DescriptorExtractor) javaDescriptorExtractor
-{
-public:
- CV_WRAP void compute( const Mat& image, CV_IN_OUT std::vector<KeyPoint>& keypoints, Mat& descriptors ) const
- { return wrapped->compute(image, keypoints, descriptors); }
-
- CV_WRAP void compute( const std::vector<Mat>& images, CV_IN_OUT std::vector<std::vector<KeyPoint> >& keypoints, CV_OUT std::vector<Mat>& descriptors ) const
- { return wrapped->compute(images, keypoints, descriptors); }
-
- CV_WRAP int descriptorSize() const
- { return wrapped->descriptorSize(); }
-
- CV_WRAP int descriptorType() const
- { return wrapped->descriptorType(); }
-
- CV_WRAP bool empty() const
- { return wrapped->empty(); }
-
- enum
- {
- SIFT = 1,
- SURF = 2,
- ORB = 3,
- BRIEF = 4,
- BRISK = 5,
- FREAK = 6,
- AKAZE = 7,
-
-
- OPPONENTEXTRACTOR = 1000,
-
-
-
- OPPONENT_SIFT = OPPONENTEXTRACTOR + SIFT,
- OPPONENT_SURF = OPPONENTEXTRACTOR + SURF,
- OPPONENT_ORB = OPPONENTEXTRACTOR + ORB,
- OPPONENT_BRIEF = OPPONENTEXTRACTOR + BRIEF,
- OPPONENT_BRISK = OPPONENTEXTRACTOR + BRISK,
- OPPONENT_FREAK = OPPONENTEXTRACTOR + FREAK,
- OPPONENT_AKAZE = OPPONENTEXTRACTOR + AKAZE
- };
-
- //supported SIFT, SURF, ORB, BRIEF, BRISK, FREAK, AKAZE, Opponent(XXXX)
- //not supported: Calonder
- CV_WRAP static Ptr<javaDescriptorExtractor> create( int extractorType )
- {
- //String name;
-
- if (extractorType > OPPONENTEXTRACTOR)
- {
- //name = "Opponent";
- extractorType -= OPPONENTEXTRACTOR;
- }
-
- Ptr<DescriptorExtractor> de;
- switch(extractorType)
- {
- //case SIFT:
- // name = name + "SIFT";
- // break;
- //case SURF:
- // name = name + "SURF";
- // break;
- case ORB:
- de = ORB::create();
- break;
- //case BRIEF:
- // name = name + "BRIEF";
- // break;
- case BRISK:
- de = BRISK::create();
- break;
- //case FREAK:
- // name = name + "FREAK";
- // break;
- case AKAZE:
- de = AKAZE::create();
- break;
- default:
- CV_Error( Error::StsBadArg, "Specified descriptor extractor type is not supported." );
- break;
- }
-
- return makePtr<javaDescriptorExtractor>(de);
- }
-
- CV_WRAP void write( const String& fileName ) const
- {
- FileStorage fs(fileName, FileStorage::WRITE);
- wrapped->write(fs);
- }
-
- CV_WRAP void read( const String& fileName )
- {
- FileStorage fs(fileName, FileStorage::READ);
- wrapped->read(fs.root());
- }
-
- javaDescriptorExtractor(Ptr<DescriptorExtractor> _wrapped) : wrapped(_wrapped)
- {}
-
-private:
-
- Ptr<DescriptorExtractor> wrapped;
-};
-
-#if 0
-//DO NOT REMOVE! The block is required for sources parser
-enum
-{
- DRAW_OVER_OUTIMG = 1, // Output image matrix will not be created (Mat::create).
- // Matches will be drawn on existing content of output image.
- NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn.
- DRAW_RICH_KEYPOINTS = 4 // For each keypoint the circle around keypoint with keypoint size and
- // orientation will be drawn.
-};
-
-CV_EXPORTS_AS(drawMatches2) void drawMatches( const Mat& img1, const std::vector<KeyPoint>& keypoints1,
- const Mat& img2, const std::vector<KeyPoint>& keypoints2,
- const std::vector<std::vector<DMatch> >& matches1to2, Mat& outImg,
- const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
- const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), int flags=0);
-
-#endif
-
-} //cv
-
-#endif // HAVE_OPENCV_FEATURES2D
-
-#endif // __OPENCV_FEATURES_2D_MANUAL_HPP__
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
-import org.opencv.features2d.FeatureDetector;
+import org.opencv.features2d.FastFeatureDetector;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
- FeatureDetector detector = FeatureDetector.create(FeatureDetector.FAST);
+ Feature2D detector = FastFeatureDetector.create();
Feature2D extractor = createClassInstance(XFEATURES2D+"BriefDescriptorExtractor", DEFAULT_FACTORY, null, null);
detector.detect(img, keypoints);
import org.opencv.core.Scalar;
import org.opencv.core.DMatch;
import org.opencv.features2d.DescriptorMatcher;
-import org.opencv.features2d.FeatureDetector;
+import org.opencv.features2d.FastFeatureDetector;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import org.opencv.imgproc.Imgproc;
MatOfKeyPoint keypoints = new MatOfKeyPoint();
Mat descriptors = new Mat();
- FeatureDetector detector = FeatureDetector.create(FeatureDetector.FAST);
+ Feature2D detector = FastFeatureDetector.create();
Feature2D extractor = createClassInstance(XFEATURES2D+"BriefDescriptorExtractor", DEFAULT_FACTORY, null, null);
detector.detect(img, keypoints);
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
-import org.opencv.features2d.FeatureDetector;
+import org.opencv.features2d.Feature2D;
+import org.opencv.features2d.FastFeatureDetector;
import org.opencv.core.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
public class FASTFeatureDetectorTest extends OpenCVTestCase {
- FeatureDetector detector;
+ Feature2D detector;
KeyPoint[] truth;
private Mat getMaskImg() {
@Override
protected void setUp() throws Exception {
super.setUp();
- detector = FeatureDetector.create(FeatureDetector.FAST);
+ detector = FastFeatureDetector.create();
truth = new KeyPoint[] { new KeyPoint(32, 27, 7, -1, 254, 0, -1), new KeyPoint(27, 32, 7, -1, 254, 0, -1), new KeyPoint(73, 68, 7, -1, 254, 0, -1),
new KeyPoint(68, 73, 7, -1, 254, 0, -1) };
}
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
-import org.opencv.features2d.DescriptorExtractor;
import org.opencv.core.KeyPoint;
import org.opencv.features2d.ORB;
import org.opencv.test.OpenCVTestCase;
FAST_t<12>(_img, keypoints, threshold, nonmax_suppression);
break;
case FastFeatureDetector::TYPE_9_16:
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(tegra::useTegra() && tegra::FAST(_img, keypoints, threshold, nonmax_suppression))
- break;
-#endif
FAST_t<16>(_img, keypoints, threshold, nonmax_suppression);
break;
}
#include <algorithm>
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/features2d/features2d_tegra.hpp"
-#endif
-
#endif
@param winname Name of the window.
@param onMouse Mouse callback. See OpenCV samples, such as
-<https://github.com/opencv/opencv/tree/3.4/samples/cpp/ffilldemo.cpp>, on how to specify and
+<https://github.com/opencv/opencv/tree/master/samples/cpp/ffilldemo.cpp>, on how to specify and
use the callback.
@param userdata The optional parameter passed to the callback.
*/
} // cv
-#ifndef DISABLE_OPENCV_24_COMPATIBILITY
-#include "opencv2/highgui/highgui_c.h"
-#endif
-
#endif
#undef abs
#endif
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/highgui/highgui_tegra.hpp"
-#endif
-
/* Errors */
#define HG_OK 0 /* Don't bet on it! */
#define HG_BADNAME -1 /* Bad window or file name */
}
#define CV_NO_GUI_ERROR(funcname) \
- cv::errorNoReturn(cv::Error::StsError, \
+ cv::error(cv::Error::StsError, \
"The function is not implemented. " \
"Rebuild the library with Windows, GTK+ 2.x or Carbon support. "\
"If you are on Ubuntu or Debian, install libgtk2.0-dev and pkg-config, then re-run cmake or configure script", \
use `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and
r.br()-Point(1,1)` are opposite corners
*/
-CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec,
+CV_EXPORTS_W void rectangle(InputOutputArray img, Rect rec,
const Scalar& color, int thickness = 1,
int lineType = LINE_8, int shift = 0);
@param line_type Type of the line, See #LineTypes
@param markerSize The length of the marker axis [default = 20 pixels]
*/
-CV_EXPORTS_W void drawMarker(CV_IN_OUT Mat& img, Point position, const Scalar& color,
+CV_EXPORTS_W void drawMarker(InputOutputArray img, Point position, const Scalar& color,
int markerType = MARKER_CROSS, int markerSize=20, int thickness=1,
int line_type=8);
} // cv
-#ifndef DISABLE_OPENCV_24_COMPATIBILITY
-#include "opencv2/imgproc/imgproc_c.h"
-#endif
-
#endif
aperture_size,
L2gradient ) )
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::canny(src, dst, low_thresh, high_thresh, aperture_size, L2gradient))
- return;
-#endif
-
CV_IPP_RUN_FAST(ipp_Canny(src, Mat(), Mat(), dst, (float)low_thresh, (float)high_thresh, L2gradient, aperture_size))
if (L2gradient)
int aperture_size, int op_type, double k=0.,
int borderType=BORDER_DEFAULT )
{
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::cornerEigenValsVecs(src, eigenv, block_size, aperture_size, op_type, k, borderType))
- return;
-#endif
#if CV_TRY_AVX
bool haveAvx = CV_CPU_HAS_SUPPORT_AVX;
#endif
CV_IPP_RUN(!(cv::ocl::isOpenCLActivated() && _dst.isUMat()), ipp_Laplacian(_src, _dst, ksize, scale, delta, borderType));
-
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && scale == 1.0 && delta == 0)
- {
- Mat src = _src.getMat(), dst = _dst.getMat();
- if (ksize == 1 && tegra::laplace1(src, dst, borderType))
- return;
- if (ksize == 3 && tegra::laplace3(src, dst, borderType))
- return;
- if (ksize == 5 && tegra::laplace5(src, dst, borderType))
- return;
- }
-#endif
-
if( ksize == 1 || ksize == 3 )
{
float K[2][9] =
/* ADDING A SET OF PREDEFINED MARKERS WHICH COULD BE USED TO HIGHLIGHT POSITIONS IN AN IMAGE */
/* ----------------------------------------------------------------------------------------- */
-void drawMarker(Mat& img, Point position, const Scalar& color, int markerType, int markerSize, int thickness, int line_type)
+void drawMarker(InputOutputArray img, Point position, const Scalar& color, int markerType, int markerSize, int thickness, int line_type)
{
switch(markerType)
{
}
-void rectangle( Mat& img, Rect rec,
+void rectangle( InputOutputArray img, Rect rec,
const Scalar& color, int thickness,
int lineType, int shift )
{
CV_INSTRUMENT_REGION()
- CV_Assert( 0 <= shift && shift <= XY_SHIFT );
if( rec.area() > 0 )
rectangle( img, rec.tl(), rec.br() - Point(1<<shift,1<<shift),
color, thickness, lineType, shift );
namespace cv
{
-#ifdef CV_CXX11
struct greaterThanPtr
-#else
-struct greaterThanPtr : public std::binary_function<const float *, const float *, bool>
-#endif
{
bool operator () (const float * a, const float * b) const
// Ensure a fully deterministic result of the sort
const double thetaScale = levels_ / 360.0;
r_table_.resize(levels_ + 1);
-#ifdef CV_CXX11
std::for_each(r_table_.begin(), r_table_.end(), [](std::vector<Point>& e)->void { e.clear(); });
-#else
- std::for_each(r_table_.begin(), r_table_.end(), std::mem_fun_ref(&std::vector<Point>::clear));
-#endif
for (int y = 0; y < templSize_.height; ++y)
{
getContourPoints(edges, dx, dy, points);
features.resize(levels_ + 1);
-#ifdef CV_CXX11
std::for_each(features.begin(), features.end(), [=](std::vector<Feature>& e) { e.clear(); e.reserve(maxBufferSize_); });
-#else
- std::for_each(features.begin(), features.end(), std::mem_fun_ref(&std::vector<Feature>::clear));
- std::for_each(features.begin(), features.end(), std::bind2nd(std::mem_fun_ref(&std::vector<Feature>::reserve), maxBufferSize_));
-#endif
for (size_t i = 0; i < points.size(); ++i)
{
#include <limits.h>
#include <float.h>
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/imgproc/imgproc_tegra.hpp"
-#else
#define GET_OPTIMIZED(func) (func)
-#endif
/* helper tables */
extern const uchar icvSaturate8u_cv[];
CALL_HAL(pyrDown, cv_hal_pyrdown, src.data, src.step, src.cols, src.rows, dst.data, dst.step, dst.cols, dst.rows, depth, src.channels(), borderType);
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(borderType == BORDER_DEFAULT && tegra::useTegra() && tegra::pyrDown(src, dst))
- return;
-#endif
-
#ifdef HAVE_IPP
bool isolated = (borderType & BORDER_ISOLATED) != 0;
int borderTypeNI = borderType & ~BORDER_ISOLATED;
Mat dst = _dst.getMat();
int depth = src.depth();
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(borderType == BORDER_DEFAULT && tegra::useTegra() && tegra::pyrUp(src, dst))
- return;
-#endif
-
#ifdef HAVE_IPP
bool isolated = (borderType & BORDER_ISOLATED) != 0;
int borderTypeNI = borderType & ~BORDER_ISOLATED;
CV_IPP_RUN_FAST(ipp_medianFilter(src0, dst, ksize));
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::medianBlur(src0, dst, ksize))
- return;
-#endif
-
bool useSortNet = ksize == 3 || (ksize == 5
#if !(CV_SIMD128)
&& ( src0.depth() > CV_8U || src0.channels() == 2 || src0.channels() > 4 )
_result.create(corrSize, CV_32F);
Mat result = _result.getMat();
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::matchTemplate(img, templ, result, method))
- return;
-#endif
-
CV_IPP_RUN_FAST(ipp_matchTemplate(img, templ, result, method))
crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0);
src_step = dst_step = roi.width;
}
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::thresh_8u(_src, _dst, roi.width, roi.height, thresh, maxval, type))
- return;
-#endif
-
#if defined(HAVE_IPP)
CV_IPP_CHECK()
{
src_step = dst_step = roi.width;
}
- // HAVE_TEGRA_OPTIMIZATION not supported
-
// HAVE_IPP not supported
const ushort* src = _src.ptr<ushort>();
src_step = dst_step = roi.width;
}
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::thresh_16s(_src, _dst, roi.width, roi.height, thresh, maxval, type))
- return;
-#endif
-
#if defined(HAVE_IPP)
CV_IPP_CHECK()
{
roi.height = 1;
}
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::thresh_32f(_src, _dst, roi.width, roi.height, thresh, maxval, type))
- return;
-#endif
-
#if defined(HAVE_IPP)
CV_IPP_CHECK()
{
/////////////////////ref//////////////////////
-#ifdef CV_CXX11
struct greaterThanPtr
-#else
-struct greaterThanPtr : public std::binary_function<const float *, const float *, bool>
-#endif
{
bool operator () (const float * a, const float * b) const
{ return *a > *b; }
// - avoid using of "OpenCVLoader.initAsync()" approach - it is deprecated
// It may load library with different version (from OpenCV Android Manager, which is installed separatelly on device)
//
-// - use "System.loadLibrary("opencv_java3")" or "OpenCVLoader.initDebug()"
+// - use "System.loadLibrary("opencv_java4")" or "OpenCVLoader.initDebug()"
// TODO: Add accurate API to load OpenCV native library
//
//
else
{
// If the dependencies list is not defined or empty.
- String AbsLibraryPath = Path + File.separator + "libopencv_java3.so";
+ String AbsLibraryPath = Path + File.separator + "libopencv_java4.so";
result = loadLibrary(AbsLibraryPath);
}
else
{
// If dependencies list is not defined or empty.
- result = loadLibrary("opencv_java3");
+ result = loadLibrary("opencv_java4");
}
return result;
CV_WRAP virtual Mat getTestSampleWeights() const = 0;
CV_WRAP virtual Mat getVarIdx() const = 0;
CV_WRAP virtual Mat getVarType() const = 0;
- CV_WRAP Mat getVarSymbolFlags() const;
+ CV_WRAP virtual Mat getVarSymbolFlags() const = 0;
CV_WRAP virtual int getResponseType() const = 0;
CV_WRAP virtual Mat getTrainSampleIdx() const = 0;
CV_WRAP virtual Mat getTestSampleIdx() const = 0;
CV_WRAP virtual void shuffleTrainTest() = 0;
/** @brief Returns matrix of test samples */
- CV_WRAP Mat getTestSamples() const;
+ CV_WRAP virtual Mat getTestSamples() const = 0;
/** @brief Returns vector of symbolic names captured in loadFromCSV() */
- CV_WRAP void getNames(std::vector<String>& names) const;
+ CV_WRAP virtual void getNames(std::vector<String>& names) const = 0;
CV_WRAP static Mat getSubVector(const Mat& vec, const Mat& idx);
regression (SVM::EPS_SVR or SVM::NU_SVR). If it is SVM::ONE_CLASS, no optimization is made and
the usual %SVM with parameters specified in params is executed.
*/
- CV_WRAP bool trainAuto(InputArray samples,
+ CV_WRAP virtual bool trainAuto(InputArray samples,
int layout,
InputArray responses,
int kFold = 10,
Ptr<ParamGrid> nuGrid = SVM::getDefaultGridPtr(SVM::NU),
Ptr<ParamGrid> coeffGrid = SVM::getDefaultGridPtr(SVM::COEF),
Ptr<ParamGrid> degreeGrid = SVM::getDefaultGridPtr(SVM::DEGREE),
- bool balanced=false);
+ bool balanced=false) = 0;
/** @brief Retrieves all the support vectors
support vector, used for prediction, was derived from. They are returned in a floating-point
matrix, where the support vectors are stored as matrix rows.
*/
- CV_WRAP Mat getUncompressedSupportVectors() const;
+ CV_WRAP virtual Mat getUncompressedSupportVectors() const = 0;
/** @brief Retrieves the decision function
@param results Array where the result of the calculation will be written.
@param flags Flags for defining the type of RTrees.
*/
- CV_WRAP void getVotes(InputArray samples, OutputArray results, int flags) const;
+ CV_WRAP virtual void getVotes(InputArray samples, OutputArray results, int flags) const = 0;
/** Creates the empty model.
Use StatModel::train to train the model, StatModel::train to create and train the model,
/** ANNEAL: Update initial temperature.
It must be \>=0. Default value is 10.*/
/** @see setAnnealInitialT */
- CV_WRAP double getAnnealInitialT() const;
+ CV_WRAP virtual double getAnnealInitialT() const = 0;
/** @copybrief getAnnealInitialT @see getAnnealInitialT */
- CV_WRAP void setAnnealInitialT(double val);
+ CV_WRAP virtual void setAnnealInitialT(double val) = 0;
/** ANNEAL: Update final temperature.
It must be \>=0 and less than initialT. Default value is 0.1.*/
/** @see setAnnealFinalT */
- CV_WRAP double getAnnealFinalT() const;
+ CV_WRAP virtual double getAnnealFinalT() const = 0;
/** @copybrief getAnnealFinalT @see getAnnealFinalT */
- CV_WRAP void setAnnealFinalT(double val);
+ CV_WRAP virtual void setAnnealFinalT(double val) = 0;
/** ANNEAL: Update cooling ratio.
It must be \>0 and less than 1. Default value is 0.95.*/
/** @see setAnnealCoolingRatio */
- CV_WRAP double getAnnealCoolingRatio() const;
+ CV_WRAP virtual double getAnnealCoolingRatio() const = 0;
/** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */
- CV_WRAP void setAnnealCoolingRatio(double val);
+ CV_WRAP virtual void setAnnealCoolingRatio(double val) = 0;
/** ANNEAL: Update iteration per step.
It must be \>0 . Default value is 10.*/
/** @see setAnnealItePerStep */
- CV_WRAP int getAnnealItePerStep() const;
+ CV_WRAP virtual int getAnnealItePerStep() const = 0;
/** @copybrief getAnnealItePerStep @see getAnnealItePerStep */
- CV_WRAP void setAnnealItePerStep(int val);
+ CV_WRAP virtual void setAnnealItePerStep(int val) = 0;
/** @brief Set/initialize anneal RNG */
- void setAnnealEnergyRNG(const RNG& rng);
+ virtual void setAnnealEnergyRNG(const RNG& rng) = 0;
/** possible activation functions */
enum ActivationFunctions {
};
+#ifndef DISABLE_OPENCV_3_COMPATIBILITY
+typedef ANN_MLP ANN_MLP_ANNEAL;
+#endif
+
/****************************************************************************************\
* Logistic Regression *
\****************************************************************************************/
CV_EXPORTS void createConcentricSpheresTestSet( int nsamples, int nfeatures, int nclasses,
OutputArray samples, OutputArray responses);
-/** @brief Artificial Neural Networks - Multi-Layer Perceptrons.
-
-@sa @ref ml_intro_ann
-*/
-class CV_EXPORTS_W ANN_MLP_ANNEAL : public ANN_MLP
-{
-public:
- /** @see setAnnealInitialT */
- CV_WRAP virtual double getAnnealInitialT() const = 0;
- /** @copybrief getAnnealInitialT @see getAnnealInitialT */
- CV_WRAP virtual void setAnnealInitialT(double val) = 0;
-
- /** ANNEAL: Update final temperature.
- It must be \>=0 and less than initialT. Default value is 0.1.*/
- /** @see setAnnealFinalT */
- CV_WRAP virtual double getAnnealFinalT() const = 0;
- /** @copybrief getAnnealFinalT @see getAnnealFinalT */
- CV_WRAP virtual void setAnnealFinalT(double val) = 0;
-
- /** ANNEAL: Update cooling ratio.
- It must be \>0 and less than 1. Default value is 0.95.*/
- /** @see setAnnealCoolingRatio */
- CV_WRAP virtual double getAnnealCoolingRatio() const = 0;
- /** @copybrief getAnnealCoolingRatio @see getAnnealCoolingRatio */
- CV_WRAP virtual void setAnnealCoolingRatio(double val) = 0;
-
- /** ANNEAL: Update iteration per step.
- It must be \>0 . Default value is 10.*/
- /** @see setAnnealItePerStep */
- CV_WRAP virtual int getAnnealItePerStep() const = 0;
- /** @copybrief getAnnealItePerStep @see getAnnealItePerStep */
- CV_WRAP virtual void setAnnealItePerStep(int val) = 0;
-
- /** @brief Set/initialize anneal RNG */
- virtual void setAnnealEnergyRNG(const RNG& rng) = 0;
-};
-
/****************************************************************************************\
* Simulated annealing solver *
};
-double ANN_MLP::getAnnealInitialT() const
-{
- const ANN_MLP_ANNEAL* this_ = dynamic_cast<const ANN_MLP_ANNEAL*>(this);
- if (!this_)
- CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
- return this_->getAnnealInitialT();
-}
-
-void ANN_MLP::setAnnealInitialT(double val)
-{
- ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
- if (!this_)
- CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
- this_->setAnnealInitialT(val);
-}
-
-double ANN_MLP::getAnnealFinalT() const
-{
- const ANN_MLP_ANNEAL* this_ = dynamic_cast<const ANN_MLP_ANNEAL*>(this);
- if (!this_)
- CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
- return this_->getAnnealFinalT();
-}
-
-void ANN_MLP::setAnnealFinalT(double val)
-{
- ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
- if (!this_)
- CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
- this_->setAnnealFinalT(val);
-}
-
-double ANN_MLP::getAnnealCoolingRatio() const
-{
- const ANN_MLP_ANNEAL* this_ = dynamic_cast<const ANN_MLP_ANNEAL*>(this);
- if (!this_)
- CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
- return this_->getAnnealCoolingRatio();
-}
-
-void ANN_MLP::setAnnealCoolingRatio(double val)
-{
- ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
- if (!this_)
- CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
- this_->setAnnealCoolingRatio(val);
-}
-
-int ANN_MLP::getAnnealItePerStep() const
-{
- const ANN_MLP_ANNEAL* this_ = dynamic_cast<const ANN_MLP_ANNEAL*>(this);
- if (!this_)
- CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
- return this_->getAnnealItePerStep();
-}
-
-void ANN_MLP::setAnnealItePerStep(int val)
-{
- ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
- if (!this_)
- CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
- this_->setAnnealItePerStep(val);
-}
-
-void ANN_MLP::setAnnealEnergyRNG(const RNG& rng)
-{
- ANN_MLP_ANNEAL* this_ = dynamic_cast<ANN_MLP_ANNEAL*>(this);
- if (!this_)
- CV_Error(Error::StsNotImplemented, "the class is not ANN_MLP_ANNEAL");
- this_->setAnnealEnergyRNG(rng);
-}
-
-class ANN_MLPImpl CV_FINAL : public ANN_MLP_ANNEAL
+class ANN_MLPImpl CV_FINAL : public ANN_MLP
{
public:
ANN_MLPImpl()
setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON);
}
- virtual ~ANN_MLPImpl() {}
+ virtual ~ANN_MLPImpl() CV_OVERRIDE {}
inline TermCriteria getTermCriteria() const CV_OVERRIDE { return params.termCrit; }
inline void setTermCriteria(TermCriteria val) CV_OVERRIDE { params.termCrit = val; }
TrainData::~TrainData() {}
-Mat TrainData::getTestSamples() const
-{
- Mat idx = getTestSampleIdx();
- Mat samples = getSamples();
- return idx.empty() ? Mat() : getSubVector(samples, idx);
-}
-
Mat TrainData::getSubVector(const Mat& vec, const Mat& idx)
{
if( idx.empty() )
return subvec;
}
+
class TrainDataImpl CV_FINAL : public TrainData
{
public:
return layout == ROW_SAMPLE ? samples.cols : samples.rows;
}
+ Mat getTestSamples() const CV_OVERRIDE
+ {
+ Mat idx = getTestSampleIdx();
+ return idx.empty() ? Mat() : getSubVector(samples, idx);
+ }
+
Mat getSamples() const CV_OVERRIDE { return samples; }
Mat getResponses() const CV_OVERRIDE { return responses; }
Mat getMissing() const CV_OVERRIDE { return missing; }
}
}
+ void getNames(std::vector<String>& names) const CV_OVERRIDE
+ {
+ size_t n = nameMap.size();
+ TrainDataImpl::MapType::const_iterator it = nameMap.begin(),
+ it_end = nameMap.end();
+ names.resize(n+1);
+ names[0] = "?";
+ for( ; it != it_end; ++it )
+ {
+ String s = it->first;
+ int label = it->second;
+ CV_Assert( label > 0 && label <= (int)n );
+ names[label] = s;
+ }
+ }
+
+ Mat getVarSymbolFlags() const CV_OVERRIDE
+ {
+ return varSymbolFlags;
+ }
+
FILE* file;
int layout;
Mat samples, missing, varType, varIdx, varSymbolFlags, responses, missingSubst;
MapType nameMap;
};
-void TrainData::getNames(std::vector<String>& names) const
-{
- const TrainDataImpl* impl = dynamic_cast<const TrainDataImpl*>(this);
- CV_Assert(impl != 0);
- size_t n = impl->nameMap.size();
- TrainDataImpl::MapType::const_iterator it = impl->nameMap.begin(),
- it_end = impl->nameMap.end();
- names.resize(n+1);
- names[0] = "?";
- for( ; it != it_end; ++it )
- {
- String s = it->first;
- int label = it->second;
- CV_Assert( label > 0 && label <= (int)n );
- names[label] = s;
- }
-}
-
-Mat TrainData::getVarSymbolFlags() const
-{
- const TrainDataImpl* impl = dynamic_cast<const TrainDataImpl*>(this);
- CV_Assert(impl != 0);
- return impl->varSymbolFlags;
-}
Ptr<TrainData> TrainData::loadFromCSV(const String& filename,
int headerLines,
inline void setRegressionAccuracy(float val) CV_OVERRIDE { impl.params.setRegressionAccuracy(val); }
inline cv::Mat getPriors() const CV_OVERRIDE { return impl.params.getPriors(); }
inline void setPriors(const cv::Mat& val) CV_OVERRIDE { impl.params.setPriors(val); }
+ inline void getVotes(InputArray input, OutputArray output, int flags) const CV_OVERRIDE {return impl.getVotes(input,output,flags);}
RTreesImpl() {}
virtual ~RTreesImpl() CV_OVERRIDE {}
impl.read(fn);
}
- void getVotes_( InputArray samples, OutputArray results, int flags ) const
- {
- CV_TRACE_FUNCTION();
- impl.getVotes(samples, results, flags);
- }
-
Mat getVarImportance() const CV_OVERRIDE { return Mat_<float>(impl.varImportance, true); }
int getVarCount() const CV_OVERRIDE { return impl.getVarCount(); }
return Algorithm::load<RTrees>(filepath, nodeName);
}
-void RTrees::getVotes(InputArray input, OutputArray output, int flags) const
-{
- CV_TRACE_FUNCTION();
- const RTreesImpl* this_ = dynamic_cast<const RTreesImpl*>(this);
- if(!this_)
- CV_Error(Error::StsNotImplemented, "the class is not RTreesImpl");
- return this_->getVotes_(input, output, flags);
-}
-
}}
// End of file.
uncompressed_sv.release();
}
- Mat getUncompressedSupportVectors_() const
+ Mat getUncompressedSupportVectors() const CV_OVERRIDE
{
return uncompressed_sv;
}
bool returnDFVal;
};
- bool trainAuto_(InputArray samples, int layout,
+ bool trainAuto(InputArray samples, int layout,
InputArray responses, int kfold, Ptr<ParamGrid> Cgrid,
Ptr<ParamGrid> gammaGrid, Ptr<ParamGrid> pGrid, Ptr<ParamGrid> nuGrid,
- Ptr<ParamGrid> coeffGrid, Ptr<ParamGrid> degreeGrid, bool balanced)
+ Ptr<ParamGrid> coeffGrid, Ptr<ParamGrid> degreeGrid, bool balanced) CV_OVERRIDE
{
Ptr<TrainData> data = TrainData::create(samples, layout, responses);
return this->trainAuto(
return svm;
}
-Mat SVM::getUncompressedSupportVectors() const
-{
- const SVMImpl* this_ = dynamic_cast<const SVMImpl*>(this);
- if(!this_)
- CV_Error(Error::StsNotImplemented, "the class is not SVMImpl");
- return this_->getUncompressedSupportVectors_();
-}
-
-bool SVM::trainAuto(InputArray samples, int layout,
- InputArray responses, int kfold, Ptr<ParamGrid> Cgrid,
- Ptr<ParamGrid> gammaGrid, Ptr<ParamGrid> pGrid, Ptr<ParamGrid> nuGrid,
- Ptr<ParamGrid> coeffGrid, Ptr<ParamGrid> degreeGrid, bool balanced)
-{
- SVMImpl* this_ = dynamic_cast<SVMImpl*>(this);
- if (!this_) {
- CV_Error(Error::StsNotImplemented, "the class is not SVMImpl");
- }
- return this_->trainAuto_(samples, layout, responses,
- kfold, Cgrid, gammaGrid, pGrid, nuGrid, coeffGrid, degreeGrid, balanced);
-}
}
}
#ifdef GENERATE_TESTDATA
{
- Ptr<ml::ANN_MLP> xx = ml::ANN_MLP_ANNEAL::create();
+ Ptr<ml::ANN_MLP> xx = ml::ANN_MLP::create();
Mat_<int> layerSizesXX(1, 4);
layerSizesXX(0, 0) = tdata->getNVars();
layerSizesXX(0, 1) = 30;
{
FileStorage fs;
fs.open(dataname + "_init_weight.yml.gz", FileStorage::READ);
- Ptr<ml::ANN_MLP> x = ml::ANN_MLP_ANNEAL::create();
+ Ptr<ml::ANN_MLP> x = ml::ANN_MLP::create();
x->read(fs.root());
x->setTrainMethod(methodType);
if (methodType == ml::ANN_MLP::ANNEAL)
regions are calculated rapidly using integral images (see below and the integral description).
To see the object detector at work, have a look at the facedetect demo:
-<https://github.com/opencv/opencv/tree/3.4/samples/cpp/dbt_face_detection.cpp>
+<https://github.com/opencv/opencv/tree/master/samples/cpp/dbt_face_detection.cpp>
The following reference is for the detection part only. There is a separate application called
opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
#include "opencv2/objdetect/detection_based_tracker.hpp"
-#ifndef DISABLE_OPENCV_24_COMPATIBILITY
-#include "opencv2/objdetect/objdetect_c.h"
-#endif
-
#endif
#include <opencv2/core.hpp>
-// After this condition removal update blacklist for bindings: modules/python/common.cmake
-#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(__ANDROID__) || \
- defined(CV_CXX11)
-
#include <vector>
namespace cv
//! @} objdetect
} //end of cv namespace
-#endif
#endif
namespace ocl {
///////////// HOG////////////////////////
-#ifdef CV_CXX11
struct RectLess
-#else
-struct RectLess : public std::binary_function<cv::Rect, cv::Rect, bool>
-#endif
{
bool operator()(const cv::Rect& a,
const cv::Rect& b) const
Ptr<BaseCascadeClassifier::MaskGenerator> createFaceDetectionMaskGenerator()
{
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra())
- return tegra::getCascadeClassifierMaskGenerator();
-#endif
return Ptr<BaseCascadeClassifier::MaskGenerator>();
}
//M*/
#include "precomp.hpp"
-#include <cassert>
-
-#ifdef CV_CXX11
-#define USE_STD_THREADS
-#endif
-
-#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(__ANDROID__) || defined(USE_STD_THREADS)
#include "opencv2/core/utility.hpp"
-#ifdef USE_STD_THREADS
#include <thread>
#include <mutex>
#include <condition_variable>
-#else //USE_STD_THREADS
-#include <pthread.h>
-#endif //USE_STD_THREADS
#if defined(DEBUG) || defined(_DEBUG)
#undef DEBUGLOGS
}
void setParameters(const cv::DetectionBasedTracker::Parameters& params)
{
-#ifdef USE_STD_THREADS
std::unique_lock<std::mutex> mtx_lock(mtx);
-#else
- pthread_mutex_lock(&mutex);
-#endif
parameters = params;
-#ifndef USE_STD_THREADS
- pthread_mutex_unlock(&mutex);
-#endif
}
inline void init()
{
-#ifdef USE_STD_THREADS
std::unique_lock<std::mutex> mtx_lock(mtx);
-#else
- pthread_mutex_lock(&mutex);
-#endif
stateThread = STATE_THREAD_STOPPED;
isObjectDetectingReady = false;
shouldObjectDetectingResultsBeForgot = false;
-#ifdef USE_STD_THREADS
objectDetectorThreadStartStop.notify_one();
-#else
- pthread_cond_signal(&(objectDetectorThreadStartStop));
- pthread_mutex_unlock(&mutex);
-#endif
}
protected:
DetectionBasedTracker& detectionBasedTracker;
cv::Ptr<DetectionBasedTracker::IDetector> cascadeInThread;
-#ifdef USE_STD_THREADS
std::thread second_workthread;
std::mutex mtx;
std::condition_variable objectDetectorRun;
std::condition_variable objectDetectorThreadStartStop;
-#else
- pthread_t second_workthread;
- pthread_mutex_t mutex;
- pthread_cond_t objectDetectorRun;
- pthread_cond_t objectDetectorThreadStartStop;
-#endif
std::vector<cv::Rect> resultDetect;
volatile bool isObjectDetectingReady;
volatile bool shouldObjectDetectingResultsBeForgot;
CV_Assert(_detector);
cascadeInThread = _detector;
-#ifndef USE_STD_THREADS
- second_workthread = 0;
- int res=0;
- res=pthread_mutex_init(&mutex, NULL);//TODO: should be attributes?
- if (res) {
- LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_mutex_init(&mutex, NULL) is %d", res);
- throw(std::exception());
- }
- res=pthread_cond_init (&objectDetectorRun, NULL);
- if (res) {
- LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_cond_init(&objectDetectorRun,, NULL) is %d", res);
- pthread_mutex_destroy(&mutex);
- throw(std::exception());
- }
- res=pthread_cond_init (&objectDetectorThreadStartStop, NULL);
- if (res) {
- LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_cond_init(&objectDetectorThreadStartStop,, NULL) is %d", res);
- pthread_cond_destroy(&objectDetectorRun);
- pthread_mutex_destroy(&mutex);
- throw(std::exception());
- }
-#endif
}
cv::DetectionBasedTracker::SeparateDetectionWork::~SeparateDetectionWork()
if(stateThread!=STATE_THREAD_STOPPED) {
LOGE("\n\n\nATTENTION!!! dangerous algorithm error: destructor DetectionBasedTracker::DetectionBasedTracker::~SeparateDetectionWork is called before stopping the workthread");
}
-#ifndef USE_STD_THREADS
- pthread_cond_destroy(&objectDetectorThreadStartStop);
- pthread_cond_destroy(&objectDetectorRun);
- pthread_mutex_destroy(&mutex);
-#else
second_workthread.join();
-#endif
}
bool cv::DetectionBasedTracker::SeparateDetectionWork::run()
{
LOGD("DetectionBasedTracker::SeparateDetectionWork::run() --- start");
-#ifdef USE_STD_THREADS
std::unique_lock<std::mutex> mtx_lock(mtx);
// unlocked when leaving scope
-#else
- pthread_mutex_lock(&mutex);
-#endif
if (stateThread != STATE_THREAD_STOPPED) {
LOGE("DetectionBasedTracker::SeparateDetectionWork::run is called while the previous run is not stopped");
-#ifndef USE_STD_THREADS
- pthread_mutex_unlock(&mutex);
-#endif
return false;
}
stateThread=STATE_THREAD_WORKING_SLEEPING;
-#ifdef USE_STD_THREADS
second_workthread = std::thread(workcycleObjectDetectorFunction, (void*)this); //TODO: add attributes?
objectDetectorThreadStartStop.wait(mtx_lock);
-#else
- pthread_create(&second_workthread, NULL, workcycleObjectDetectorFunction, (void*)this); //TODO: add attributes?
- pthread_cond_wait(&objectDetectorThreadStartStop, &mutex);
- pthread_mutex_unlock(&mutex);
-#endif
LOGD("DetectionBasedTracker::SeparateDetectionWork::run --- end");
return true;
}
std::vector<Rect> objects;
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
-#ifdef USE_STD_THREADS
std::unique_lock<std::mutex> mtx_lock(mtx);
-#else
- pthread_mutex_lock(&mutex);
-#endif
{
-#ifdef USE_STD_THREADS
objectDetectorThreadStartStop.notify_one();
-#else
- pthread_cond_signal(&objectDetectorThreadStartStop);
-#endif
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- before waiting");
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
-#ifdef USE_STD_THREADS
objectDetectorRun.wait(mtx_lock);
-#else
- pthread_cond_wait(&objectDetectorRun, &mutex);
-#endif
if (isWorking()) {
stateThread=STATE_THREAD_WORKING_WITH_IMAGE;
}
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- after waiting");
}
-#ifdef USE_STD_THREADS
mtx_lock.unlock();
-#else
- pthread_mutex_unlock(&mutex);
-#endif
bool isFirstStep=true;
if (! isFirstStep) {
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- before waiting");
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
-#ifdef USE_STD_THREADS
mtx_lock.lock();
-#else
- pthread_mutex_lock(&mutex);
-#endif
if (!isWorking()) {//it is a rare case, but may cause a crash
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- go out from the workcycle from inner part of lock just before waiting");
-#ifdef USE_STD_THREADS
mtx_lock.unlock();
-#else
- pthread_mutex_unlock(&mutex);
-#endif
break;
}
CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
-#ifdef USE_STD_THREADS
objectDetectorRun.wait(mtx_lock);
-#else
- pthread_cond_wait(&objectDetectorRun, &mutex);
-#endif
if (isWorking()) {
stateThread=STATE_THREAD_WORKING_WITH_IMAGE;
}
-#ifdef USE_STD_THREADS
mtx_lock.unlock();
-#else
- pthread_mutex_unlock(&mutex);
-#endif
LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- after waiting");
} else {
(void)(dt_detect_ms);
LOGI("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- objects num==%d, t_ms=%.4f", (int)objects.size(), dt_detect_ms);
-#ifdef USE_STD_THREADS
mtx_lock.lock();
-#else
- pthread_mutex_lock(&mutex);
-#endif
if (!shouldObjectDetectingResultsBeForgot) {
resultDetect=objects;
isObjectDetectingReady=true;
if(isWorking()) {
stateThread=STATE_THREAD_WORKING_SLEEPING;
}
-#ifdef USE_STD_THREADS
mtx_lock.unlock();
-#else
- pthread_mutex_unlock(&mutex);
-#endif
objects.clear();
}// while(isWorking())
void cv::DetectionBasedTracker::SeparateDetectionWork::stop()
{
//FIXME: TODO: should add quickStop functionality
-#ifdef USE_STD_THREADS
std::unique_lock<std::mutex> mtx_lock(mtx);
-#else
- pthread_mutex_lock(&mutex);
-#endif
if (!isWorking()) {
-#ifdef USE_STD_THREADS
mtx_lock.unlock();
-#else
- pthread_mutex_unlock(&mutex);
-#endif
LOGE("SimpleHighguiDemoCore::stop is called but the SimpleHighguiDemoCore pthread is not active");
stateThread = STATE_THREAD_STOPPING;
return;
}
stateThread=STATE_THREAD_STOPPING;
LOGD("DetectionBasedTracker::SeparateDetectionWork::stop: before going to sleep to wait for the signal from the workthread");
-#ifdef USE_STD_THREADS
objectDetectorRun.notify_one();
objectDetectorThreadStartStop.wait(mtx_lock);
LOGD("DetectionBasedTracker::SeparateDetectionWork::stop: after receiving the signal from the workthread, stateThread=%d", (int)stateThread);
mtx_lock.unlock();
-#else
- pthread_cond_signal(&objectDetectorRun);
- pthread_cond_wait(&objectDetectorThreadStartStop, &mutex);
- LOGD("DetectionBasedTracker::SeparateDetectionWork::stop: after receiving the signal from the workthread, stateThread=%d", (int)stateThread);
- pthread_mutex_unlock(&mutex);
-#endif
}
void cv::DetectionBasedTracker::SeparateDetectionWork::resetTracking()
{
LOGD("DetectionBasedTracker::SeparateDetectionWork::resetTracking");
-#ifdef USE_STD_THREADS
std::unique_lock<std::mutex> mtx_lock(mtx);
-#else
- pthread_mutex_lock(&mutex);
-#endif
if (stateThread == STATE_THREAD_WORKING_WITH_IMAGE) {
LOGD("DetectionBasedTracker::SeparateDetectionWork::resetTracking: since workthread is detecting objects at the moment, we should make cascadeInThread stop detecting and forget the detecting results");
resultDetect.clear();
isObjectDetectingReady=false;
-#ifdef USE_STD_THREADS
mtx_lock.unlock();
-#else
- pthread_mutex_unlock(&mutex);
-#endif
-
}
bool cv::DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread(const Mat& imageGray, std::vector<Rect>& rectsWhereRegions)
bool shouldHandleResult = false;
-#ifdef USE_STD_THREADS
std::unique_lock<std::mutex> mtx_lock(mtx);
-#else
- pthread_mutex_lock(&mutex);
-#endif
if (isObjectDetectingReady) {
shouldHandleResult=true;
timeWhenDetectingThreadStartedWork = getTickCount() ;
-#ifdef USE_STD_THREADS
objectDetectorRun.notify_one();
-#else
- pthread_cond_signal(&objectDetectorRun);
-#endif
}
-#ifdef USE_STD_THREADS
mtx_lock.unlock();
-#else
- pthread_mutex_unlock(&mutex);
-#endif
LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: result: shouldHandleResult=%d", (shouldHandleResult?1:0));
return shouldHandleResult;
{
return parameters;
}
-
-#endif //defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(__ANDROID__) || defined(USE_STD_THREADS)
#include "opencv2/core/ocl.hpp"
#include "opencv2/core/private.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/objdetect/objdetect_tegra.hpp"
-#endif
-
#endif
} // cv
-#ifndef DISABLE_OPENCV_24_COMPATIBILITY
-#include "opencv2/photo/photo_c.h"
-#endif
-
#endif
switch (normType) {
case NORM_L2:
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if(hn == 1 && tegra::useTegra() &&
- tegra::fastNlMeansDenoising(src, dst, h[0], templateWindowSize, searchWindowSize))
- return;
-#endif
switch (depth) {
case CV_8U:
fastNlMeansDenoising_<uchar, int, unsigned, DistSquared>(src, dst, h,
#include "opencv2/core/ocl.hpp"
#include "opencv2/imgproc.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/photo/photo_tegra.hpp"
-#endif
-
#endif
# Writing ...
expected = np.array([[[0, 1, 2, 3, 4]]])
+ expected_str = ("Hello", "World", "!")
fs = cv.FileStorage(fname, cv.FILE_STORAGE_WRITE)
fs.write("test", expected)
+ fs.write("strings", expected_str)
fs.release()
# Reading ...
fs = cv.FileStorage(fname, cv.FILE_STORAGE_READ)
root = fs.getFirstTopLevelNode()
self.assertEqual(root.name(), "test")
+
test = fs.getNode("test")
self.assertEqual(test.empty(), False)
self.assertEqual(test.name(), "test")
actual = test.mat()
self.assertEqual(actual.shape, expected.shape)
self.assertEqual(np.array_equal(expected, actual), True)
+
+ strings = fs.getNode("strings")
+ self.assertEqual(strings.isSeq(), True)
+ self.assertEqual(strings.size(), len(expected_str))
+ self.assertEqual(all(strings.at(i).isString() for i in range(strings.size())), True)
+ self.assertSequenceEqual([strings.at(i).string() for i in range(strings.size())], expected_str)
fs.release()
os.remove(fname)
{
Mat src;
Mat weight;
-#ifdef HAVE_TEGRA_OPTIMIZATION
- src = _src.getMat();
- weight = _weight.getMat();
- if(tegra::useTegra() && tegra::normalizeUsingWeightMap(weight, src))
- return;
-#endif
#ifdef HAVE_OPENCL
if ( !cv::ocl::isOpenCLActivated() ||
void createLaplacePyr(InputArray img, int num_levels, std::vector<UMat> &pyr)
{
-#ifdef HAVE_TEGRA_OPTIMIZATION
- cv::Mat imgMat = img.getMat();
- if(tegra::useTegra() && tegra::createLaplacePyr(imgMat, num_levels, pyr))
- return;
-#endif
-
pyr.resize(num_levels + 1);
if(img.depth() == CV_8U)
CV_Assert(features1.descriptors.type() == features2.descriptors.type());
CV_Assert(features2.descriptors.depth() == CV_8U || features2.descriptors.depth() == CV_32F);
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::match2nearest(features1, features2, matches_info, match_conf_))
- return;
-#endif
-
matches_info.matches.clear();
Ptr<cv::DescriptorMatcher> matcher;
#include "opencv2/core/private.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-# include "opencv2/stitching/stitching_tegra.hpp"
-#endif
-
#include "util_log.hpp"
#endif
{
template<typename T>
-#ifdef CV_CXX11
struct RectLess_
-#else
-struct RectLess_ : public std::binary_function<cv::Rect_<T>, cv::Rect_<T>, bool>
-#endif
{
bool operator()(const cv::Rect_<T>& r1, const cv::Rect_<T>& r2) const
{
typedef RectLess_<int> RectLess;
-#ifdef CV_CXX11
struct KeypointGreater
-#else
-struct KeypointGreater : public std::binary_function<cv::KeyPoint, cv::KeyPoint, bool>
-#endif
{
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
{
return false;
}
-#ifdef CV_CXX11
struct KeyPointLess
-#else
- struct KeyPointLess : std::binary_function<cv::KeyPoint, cv::KeyPoint, bool>
-#endif
{
bool operator()(const cv::KeyPoint& kp1, const cv::KeyPoint& kp2) const
{
#include <limits.h>
#include "opencv2/imgproc/types_c.h"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "tegra.hpp"
-#endif
-
using namespace cv;
namespace cvtest
::testing::Test::RecordProperty("cv_cpu_features", cpu_features);
if (useStdOut) std::cout << "CPU features: " << cpu_features << std::endl;
-#ifdef HAVE_TEGRA_OPTIMIZATION
- const char * tegra_optimization = tegra::useTegra() && tegra::isDeviceSupported() ? "enabled" : "disabled";
- ::testing::Test::RecordProperty("cv_tegra_optimization", tegra_optimization);
- if (useStdOut) std::cout << "Tegra optimization: " << tegra_optimization << std::endl;
-#endif
-
#ifdef HAVE_IPP
const char * ipp_optimization = cv::ipp::useIPP()? "enabled" : "disabled";
::testing::Test::RecordProperty("cv_ipp_optimization", ipp_optimization);
#include "opencv2/video/tracking.hpp"
#include "opencv2/video/background_segm.hpp"
-#ifndef DISABLE_OPENCV_24_COMPATIBILITY
-#include "opencv2/video/tracking_c.h"
-#endif
-
#endif //OPENCV_VIDEO_HPP
CV_Assert(depth == CV_8U);
dst.create(rows, cols, CV_MAKETYPE(DataType<deriv_type>::depth, cn*2));
-#ifdef HAVE_TEGRA_OPTIMIZATION
- if (tegra::useTegra() && tegra::calcSharrDeriv(src, dst))
- return;
-#endif
-
int x, y, delta = (int)alignSize((cols + 2)*cn, 16);
AutoBuffer<deriv_type> _tempBuf(delta*2 + 64);
deriv_type *trow0 = alignPtr(_tempBuf.data() + cn, 16), *trow1 = alignPtr(trow0 + delta, 16);
CV_Assert(prevPyr[level * lvlStep1].size() == nextPyr[level * lvlStep2].size());
CV_Assert(prevPyr[level * lvlStep1].type() == nextPyr[level * lvlStep2].type());
-#ifdef HAVE_TEGRA_OPTIMIZATION
- typedef tegra::LKTrackerInvoker<cv::detail::LKTrackerInvoker> LKTrackerInvoker;
-#else
typedef cv::detail::LKTrackerInvoker LKTrackerInvoker;
-#endif
-
parallel_for_(Range(0, npoints), LKTrackerInvoker(prevPyr[level * lvlStep1], derivI,
nextPyr[level * lvlStep2], prevPts, nextPts,
status, err,
#include "opencv2/core/ocl.hpp"
#include "opencv2/core.hpp"
-#ifdef HAVE_TEGRA_OPTIMIZATION
-#include "opencv2/video/video_tegra.hpp"
-#endif
-
#endif
list(APPEND VIDEOIO_LIBRARIES ${INTELPERC_LIBRARIES})
endif(HAVE_INTELPERC)
+if(HAVE_LIBREALSENSE)
+ list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_librealsense.cpp)
+ ocv_include_directories(${LIBREALSENSE_INCLUDE_DIR})
+ list(APPEND VIDEOIO_LIBRARIES ${LIBREALSENSE_LIBRARIES})
+endif(HAVE_LIBREALSENSE)
+
if(HAVE_GPHOTO2)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_gphoto2.cpp)
endif(HAVE_GPHOTO2)
--- /dev/null
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "precomp.hpp"
+
+#ifdef HAVE_LIBREALSENSE
+#include "cap_librealsense.hpp"
+
+namespace cv
+{
+
+VideoCapture_LibRealsense::VideoCapture_LibRealsense(int) : mAlign(RS2_STREAM_COLOR)
+{
+ try
+ {
+ rs2::config config;
+ // Configure all streams to run at VGA resolution at default fps
+ config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16);
+ config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_BGR8);
+ config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8);
+ mPipe.start();
+ }
+ catch (const rs2::error&)
+ {
+ }
+}
+VideoCapture_LibRealsense::~VideoCapture_LibRealsense(){}
+
+double VideoCapture_LibRealsense::getProperty(int prop) const
+{
+ double propValue = 0;
+
+ if (prop == CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE)
+ return mPipe.get_active_profile().get_device().first<rs2::depth_sensor>().get_depth_scale();
+
+ return propValue;
+}
+bool VideoCapture_LibRealsense::setProperty(int, double)
+{
+ bool isSet = false;
+ return isSet;
+}
+
+bool VideoCapture_LibRealsense::grabFrame()
+{
+ if (!isOpened())
+ return false;
+
+ try
+ {
+ mData = mAlign.process(mPipe.wait_for_frames());
+ }
+ catch (const rs2::error&)
+ {
+ return false;
+ }
+
+ return true;
+}
+bool VideoCapture_LibRealsense::retrieveFrame(int outputType, cv::OutputArray frame)
+{
+ rs2::video_frame _frame(nullptr);
+ int type;
+ switch (outputType)
+ {
+ case CAP_INTELPERC_DEPTH_MAP:
+ _frame = mData.get_depth_frame().as<rs2::video_frame>();
+ type = CV_16UC1;
+ break;
+ case CAP_INTELPERC_IR_MAP:
+ _frame = mData.get_infrared_frame();
+ type = CV_8UC1;
+ break;
+ case CAP_INTELPERC_IMAGE:
+ _frame = mData.get_color_frame();
+ type = CV_8UC3;
+ break;
+ default:
+ return false;
+ }
+
+ try
+ {
+ // we copy the data straight away, so const_cast should be fine
+ void* data = const_cast<void*>(_frame.get_data());
+ Mat(_frame.get_height(), _frame.get_width(), type, data, _frame.get_stride_in_bytes()).copyTo(frame);
+
+ if(_frame.get_profile().format() == RS2_FORMAT_RGB8)
+ cvtColor(frame, frame, COLOR_RGB2BGR);
+ }
+ catch (const rs2::error&)
+ {
+ return false;
+ }
+
+ return true;
+}
+int VideoCapture_LibRealsense::getCaptureDomain()
+{
+ return CAP_INTELPERC;
+}
+
+bool VideoCapture_LibRealsense::isOpened() const
+{
+ return bool(std::shared_ptr<rs2_pipeline>(mPipe));
+}
+
+}
+
+#endif
--- /dev/null
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef _CAP_LIBREALSENE_HPP_
+#define _CAP_LIBREALSENE_HPP_
+
+#ifdef HAVE_LIBREALSENSE
+
+#include <librealsense2/rs.hpp>
+
+namespace cv
+{
+
+class VideoCapture_LibRealsense : public IVideoCapture
+{
+public:
+ VideoCapture_LibRealsense(int index);
+ virtual ~VideoCapture_LibRealsense();
+
+ virtual double getProperty(int propIdx) const CV_OVERRIDE;
+ virtual bool setProperty(int propIdx, double propVal) CV_OVERRIDE;
+
+ virtual bool grabFrame() CV_OVERRIDE;
+ virtual bool retrieveFrame(int outputType, OutputArray frame) CV_OVERRIDE;
+ virtual int getCaptureDomain() CV_OVERRIDE;
+ virtual bool isOpened() const CV_OVERRIDE;
+protected:
+ rs2::pipeline mPipe;
+ rs2::frameset mData;
+ rs2::align mAlign;
+};
+
+}
+
+#endif
+#endif
V4L2_PIX_FMT_MJPEG,
V4L2_PIX_FMT_JPEG,
#endif
- V4L2_PIX_FMT_Y16
+ V4L2_PIX_FMT_Y16,
+ V4L2_PIX_FMT_GREY
};
for (size_t i = 0; i < sizeof(try_order) / sizeof(__u32); i++) {
case V4L2_PIX_FMT_MJPEG:
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_Y16:
+ case V4L2_PIX_FMT_GREY:
return 1;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
cvtColor(gray8,Mat(height, width, CV_8UC3, dst),COLOR_GRAY2BGR);
}
+static inline void
+y8_to_rgb24 (int width, int height, unsigned char* src, unsigned char* dst)
+{
+ Mat gray8(height, width, CV_8UC1, src);
+ cvtColor(gray8,Mat(height, width, CV_8UC3, dst),COLOR_GRAY2BGR);
+}
+
#ifdef HAVE_JPEG
/* convert from mjpeg to rgb24 */
capture->frame.imageSize);
}
break;
+ case V4L2_PIX_FMT_GREY:
+ if(capture->convert_rgb){
+ y8_to_rgb24(capture->form.fmt.pix.width,
+ capture->form.fmt.pix.height,
+ (unsigned char*)capture->buffers[capture->bufferIndex].start,
+ (unsigned char*)capture->frame.imageData);
+ }else{
+ memcpy((char *)capture->frame.imageData,
+ (char *)capture->buffers[capture->bufferIndex].start,
+ capture->frame.imageSize);
+ }
+ break;
}
if (capture->returnFrame)
#include "videoio_registry.hpp"
#include "cap_intelperc.hpp"
+#include "cap_librealsense.hpp"
#include "cap_dshow.hpp"
#ifdef HAVE_MFX
#endif
#ifdef HAVE_INTELPERC
DECLARE_BACKEND(CAP_INTELPERC, "INTEL_PERC", MODE_CAPTURE_BY_INDEX),
+#elif defined(HAVE_LIBREALSENSE)
+ DECLARE_BACKEND(CAP_INTELPERC, "INTEL_REALSENSE", MODE_CAPTURE_BY_INDEX),
#endif
// OpenCV file-based only
case CAP_INTELPERC:
TRY_OPEN(makePtr<VideoCapture_IntelPerC>());
break;
+#elif defined(HAVE_LIBREALSENSE)
+ case CAP_INTELPERC:
+ TRY_OPEN(makePtr<VideoCapture_LibRealsense>(index));
+ break;
#endif
#ifdef WINRT_VIDEO
case CAP_WINRT:
/** @brief Returns the current pose of the viewer.
*/
- Affine3d getViewerPose();
+ Affine3d getViewerPose() const;
/** @brief Sets pose of the viewer.
Widget();
Widget(const Widget& other);
Widget& operator=(const Widget& other);
- ~Widget();
+ virtual ~Widget();
/** @brief Creates a widget from ply file.
void cv::viz::Viz3d::setCamera(const Camera &camera) { impl_->setCamera(camera); }
cv::viz::Camera cv::viz::Viz3d::getCamera() const { return impl_->getCamera(); }
void cv::viz::Viz3d::setViewerPose(const Affine3d &pose) { impl_->setViewerPose(pose); }
-cv::Affine3d cv::viz::Viz3d::getViewerPose() { return impl_->getViewerPose(); }
+cv::Affine3d cv::viz::Viz3d::getViewerPose() const { return impl_->getViewerPose(); }
void cv::viz::Viz3d::resetCameraViewpoint(const String &id) { impl_->resetCameraViewpoint(id); }
void cv::viz::Viz3d::resetCamera() { impl_->resetCamera(); }
CMAKE_TEMPLATE='''\
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+
+# Enable C++11
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
+
SET(PROJECT_NAME hello-android)
PROJECT(${PROJECT_NAME})
+
FIND_PACKAGE(OpenCV REQUIRED %(libset)s)
-INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
-INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS})
FILE(GLOB srcs "*.cpp")
+
ADD_EXECUTABLE(${PROJECT_NAME} ${srcs})
TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${OpenCV_LIBS} dl z)
'''
{
(void)argc; (void)argv;
printf("%s\\n", message);
- Size textsize = getTextSize(message, CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
+ Size textsize = getTextSize(message, FONT_HERSHEY_COMPLEX, 3, 5, 0);
Mat img(textsize.height + 20, textsize.width + 20, CV_32FC1, Scalar(230,230,230));
- putText(img, message, Point(10, img.rows - 10), CV_FONT_HERSHEY_COMPLEX, 3, Scalar(0, 0, 0), 5);
+ putText(img, message, Point(10, img.rows - 10), FONT_HERSHEY_COMPLEX, 3, Scalar(0, 0, 0), 5);
imwrite("/mnt/sdcard/HelloAndroid.png", img);
return 0;
}
TEMPLATE_APPLICATION_MK = '''\
APP_STL := gnustl_static
-APP_CPPFLAGS := -frtti -fexceptions
+APP_CPPFLAGS := -frtti -fexceptions -std=c++11
APP_ABI := {abi}
APP_PLATFORM := android-9
'''
# Standalone mode
#
#===================================================================================================
-cmake_minimum_required(VERSION 2.8)
+cmake_minimum_required(VERSION 3.1)
+
+# Enable C++11
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
project(samples C CXX)
option(BUILD_EXAMPLES "Build samples" ON)
public class NativePart {
static
{
- System.loadLibrary("opencv_java3");
+ System.loadLibrary("opencv_java4");
System.loadLibrary("JNIpart");
}
--- /dev/null
+/**
+ @file ela.cpp
+ @author Alessandro de Oliveira Faria (A.K.A. CABELO)
+ @brief Error Level Analysis (ELA) permits identifying areas within an image that are at different compression levels. With JPEG images, the entire picture should be at roughly the same level. If a section of the image is at a significantly different error level, then it likely indicates a digital modification. This example allows to see visually the changes made in a JPG image based in it's compression error analysis. Questions and suggestions email to: Alessandro de Oliveira Faria cabelo[at]opensuse[dot]org or OpenCV Team.
+ @date Jun 24, 2018
+*/
+
+#include <opencv2/highgui/highgui.hpp>
+#include <iostream>
+#include <vector>
+
+const char* keys =
+ "{ help h | | Print help message. }"
+ "{ input i | | Input image to calc ELA algorithm. }";
+
+using namespace cv;
+
+int scale_value = 7;
+int quality = 95;
+Mat image;
+Mat compressed_img;
+const char* decodedwin = "the recompressed image";
+const char* diffwin = "scaled difference between the original and recompressed images";
+
+static void processImage(int , void*)
+{
+ Mat Ela;
+
+ // Compression jpeg
+ std::vector<int> compressing_factor;
+ std::vector<uchar> buf;
+
+ compressing_factor.push_back(IMWRITE_JPEG_QUALITY);
+ compressing_factor.push_back(quality);
+
+ imencode(".jpg", image, buf, compressing_factor);
+
+ compressed_img = imdecode(buf, 1);
+
+ Mat output;
+ absdiff(image,compressed_img,output);
+ output.convertTo(Ela, CV_8UC3, scale_value);
+
+ // Shows processed image
+ imshow(decodedwin, compressed_img);
+ imshow(diffwin, Ela);
+}
+
+int main (int argc, char* argv[])
+{
+
+ CommandLineParser parser(argc, argv, keys);
+ if(argc == 1 || parser.has("help"))
+ {
+ parser.printMessage();
+ std::cout << "\nJpeg Recompression Example:\n\t" << argv[0] << " --input=../../data/ela_modified.jpg\n";
+ return 0;
+ }
+
+ if(parser.has("input"))
+ {
+ // Read the new image
+ image = imread(parser.get<String>("input"));
+ }
+ // Check image
+ if (!image.empty())
+ {
+ processImage(0, 0);
+ createTrackbar("Scale", diffwin, &scale_value, 100, processImage);
+ createTrackbar("Quality", diffwin, &quality, 100, processImage);
+ waitKey(0);
+ }
+ else
+ {
+ std::cout << "> Error in load image\n";
+ }
+
+ return 0;
+}
# cmake needs this line
-cmake_minimum_required(VERSION 2.8)
+cmake_minimum_required(VERSION 3.1)
+
+# Enable C++11
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
# Define project name
project(opencv_example_project)
message(STATUS " libraries: ${OpenCV_LIBS}")
message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
-if(CMAKE_VERSION VERSION_LESS "2.8.11")
- # Add OpenCV headers location to your include paths
- include_directories(${OpenCV_INCLUDE_DIRS})
-endif()
-
# Declare the executable target built from your sources
add_executable(opencv_example example.cpp)
"\tThis will detect only the face in image.jpg.\n";
cout << " \n\nThe classifiers for face and eyes can be downloaded from : "
- " \nhttps://github.com/opencv/opencv/tree/3.4/data/haarcascades";
+ " \nhttps://github.com/opencv/opencv/tree/master/data/haarcascades";
cout << "\n\nThe classifiers for nose and mouth can be downloaded from : "
" \nhttps://github.com/opencv/opencv_contrib/tree/master/modules/face/data/cascades\n";
| Model | Scale | Size WxH| Mean subtraction | Channels order |
|---------------|-------|-----------|--------------------|-------|
| [MobileNet-SSD, Caffe](https://github.com/chuanqi305/MobileNet-SSD/) | `0.00784 (2/255)` | `300x300` | `127.5 127.5 127.5` | BGR |
-| [OpenCV face detector](https://github.com/opencv/opencv/tree/3.4/samples/dnn/face_detector) | `1.0` | `300x300` | `104 177 123` | BGR |
+| [OpenCV face detector](https://github.com/opencv/opencv/tree/master/samples/dnn/face_detector) | `1.0` | `300x300` | `104 177 123` | BGR |
| [SSDs from TensorFlow](https://github.com/tensorflow/models/tree/master/research/object_detection/) | `0.00784 (2/255)` | `300x300` | `127.5 127.5 127.5` | RGB |
| [YOLO](https://pjreddie.com/darknet/yolo/) | `0.00392 (1/255)` | `416x416` | `0 0 0` | RGB |
| [VGG16-SSD](https://github.com/weiliu89/caffe/tree/ssd) | `1.0` | `300x300` | `104 117 123` | BGR |
| [Faster-RCNN, InceptionV2 backbone](https://github.com/tensorflow/models/tree/master/research/object_detection/) | `0.00784 (2/255)` | `300x300` | `127.5 127.5 127.5` | RGB |
#### Face detection
-[An origin model](https://github.com/opencv/opencv/tree/3.4/samples/dnn/face_detector)
+[An origin model](https://github.com/opencv/opencv/tree/master/samples/dnn/face_detector)
with single precision floating point weights has been quantized using [TensorFlow framework](https://www.tensorflow.org/).
To achieve the best accuracy run the model on BGR images resized to `300x300` applying mean subtraction
of values `(104, 177, 123)` for each blue, green and red channels correspondingly.
The following are accuracy metrics obtained using [COCO object detection evaluation
tool](http://cocodataset.org/#detections-eval) on [FDDB dataset](http://vis-www.cs.umass.edu/fddb/)
-(see [script](https://github.com/opencv/opencv/blob/3.4/modules/dnn/misc/face_detector_accuracy.py))
+(see [script](https://github.com/opencv/opencv/blob/master/modules/dnn/misc/face_detector_accuracy.py))
applying resize to `300x300` and keeping an origin images' sizes.
```
AP - Average Precision | FP32/FP16 | UINT8 | FP32/FP16 | UINT8 |
* [Models downloading script](https://github.com/opencv/opencv_extra/blob/master/testdata/dnn/download_models.py)
* [Configuration files adopted for OpenCV](https://github.com/opencv/opencv_extra/tree/master/testdata/dnn)
* [How to import models from TensorFlow Object Detection API](https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API)
-* [Names of classes from different datasets](https://github.com/opencv/opencv/tree/3.4/samples/data/dnn)
+* [Names of classes from different datasets](https://github.com/opencv/opencv/tree/master/samples/data/dnn)
function loadModels(callback) {
var utils = new Utils('');
- var proto = 'https://raw.githubusercontent.com/opencv/opencv/3.4/samples/dnn/face_detector/deploy.prototxt';
+ var proto = 'https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt';
var weights = 'https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel';
var recognModel = 'https://raw.githubusercontent.com/pyannote/pyannote-data/master/openface.nn4.small2.v1.t7';
utils.createFileFromUrl('face_detector.prototxt', proto, () => {