set(ENABLE_DYNAMIC_CUDA OFF)
endif()
+if(HAVE_CUDA AND NOT ENABLE_DYNAMIC_CUDA)
+ set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
+ if(HAVE_CUBLAS)
+ set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} ${CUDA_cublas_LIBRARY})
+ endif()
+ if(HAVE_CUFFT)
+ set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} ${CUDA_cufft_LIBRARY})
+ endif()
+endif()
+
# ----------------------------------------------------------------------------
# Solution folders:
# ----------------------------------------------------------------------------
# remove CUDA runtime and NPP from regular deps
# it can be added separately if needed.
- ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "libcu")
- ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "libnpp")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cusparse")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cufft")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cublas")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "npp")
+ ocv_list_filterout(OPENCV_EXTRA_COMPONENTS_CONFIGMAKE "cudart")
if(HAVE_CUDA)
# CUDA runtime libraries and are required always
OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_ALREADY_INCLUDED:=on
endif
-ifeq ($(OPENCV_USE_GPU_MODULE),on)
- include $(CLEAR_VARS)
- LOCAL_MODULE:=opencv_gpu
- LOCAL_SRC_FILES:=$(OPENCV_LIBS_DIR)/libopencv_gpu.a
- include $(PREBUILT_STATIC_LIBRARY)
+ifeq ($(OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_GPU_ALREADY_INCLUDED),)
+ ifeq ($(OPENCV_USE_GPU_MODULE),on)
+ include $(CLEAR_VARS)
+ LOCAL_MODULE:=opencv_gpu
+ LOCAL_SRC_FILES:=$(OPENCV_LIBS_DIR)/libopencv_gpu.a
+ include $(PREBUILT_STATIC_LIBRARY)
+ endif
+ OPENCV_MK_$(OPENCV_TARGET_ARCH_ABI)_GPU_ALREADY_INCLUDED:=on
endif
ifeq ($(OPENCV_LOCAL_CFLAGS),)
SET(OpenCV_EXTRA_LIBS_${__opttype} "")
# CUDA
- if(OpenCV_CUDA_VERSION AND (CMAKE_CROSSCOMPILING OR (WIN32 AND NOT OpenCV_SHARED)))
+ if(OpenCV_CUDA_VERSION)
if(NOT CUDA_FOUND)
find_package(CUDA ${OpenCV_CUDA_VERSION} EXACT REQUIRED)
else()
endif()
endif()
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_LIBRARIES})
+ set(OpenCV_CUDA_LIBS_ABSPATH ${CUDA_LIBRARIES})
if(${CUDA_VERSION} VERSION_LESS "5.5")
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_npp_LIBRARY})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_npp_LIBRARY})
else()
find_cuda_helper_libs(nppc)
find_cuda_helper_libs(nppi)
find_cuda_helper_libs(npps)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nppc_LIBRARY} ${CUDA_nppi_LIBRARY} ${CUDA_npps_LIBRARY})
endif()
if(OpenCV_USE_CUBLAS)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUBLAS_LIBRARIES})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_CUBLAS_LIBRARIES})
endif()
if(OpenCV_USE_CUFFT)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_CUFFT_LIBRARIES})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_CUFFT_LIBRARIES})
endif()
if(OpenCV_USE_NVCUVID)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nvcuvid_LIBRARIES})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nvcuvid_LIBRARIES})
endif()
if(WIN32)
- list(APPEND OpenCV_EXTRA_LIBS_${__opttype} ${CUDA_nvcuvenc_LIBRARIES})
+ list(APPEND OpenCV_CUDA_LIBS_ABSPATH ${CUDA_nvcuvenc_LIBRARIES})
endif()
+
+ set(OpenCV_CUDA_LIBS_RELPATH "")
+ foreach(l ${OpenCV_CUDA_LIBS_ABSPATH})
+ get_filename_component(_tmp ${l} PATH)
+ list(APPEND OpenCV_CUDA_LIBS_RELPATH ${_tmp})
+ endforeach()
+
+ list(REMOVE_DUPLICATES OpenCV_CUDA_LIBS_RELPATH)
+ link_directories(${OpenCV_CUDA_LIBS_RELPATH})
endif()
endforeach()
'oldbasicstructures' : ('http://docs.opencv.org/modules/core/doc/old_basic_structures.html#%s', None),
'readwriteimagevideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None),
'operationsonarrays' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#%s', None),
- 'utilitysystemfunctions':('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#%s', None),
- 'imgprocfilter':('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
- 'svms':('http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#%s', None),
- 'drawingfunc':('http://docs.opencv.org/modules/core/doc/drawing_functions.html#%s', None),
- 'xmlymlpers':('http://docs.opencv.org/modules/core/doc/xml_yaml_persistence.html#%s', None),
+ 'utilitysystemfunctions' : ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html#%s', None),
+ 'imgprocfilter' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
+ 'svms' : ('http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#%s', None),
+ 'drawingfunc' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#%s', None),
+ 'xmlymlpers' : ('http://docs.opencv.org/modules/core/doc/xml_yaml_persistence.html#%s', None),
'hgvideo' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#%s', None),
'gpuinit' : ('http://docs.opencv.org/modules/gpu/doc/initalization_and_information.html#%s', None),
'gpudatastructure' : ('http://docs.opencv.org/modules/gpu/doc/data_structures.html#%s', None),
'gpuperelement' : ('http://docs.opencv.org/modules/gpu/doc/per_element_operations.html#%s', None),
'gpuimgproc' : ('http://docs.opencv.org/modules/gpu/doc/image_processing.html#%s', None),
'gpumatrixreduct' : ('http://docs.opencv.org/modules/gpu/doc/matrix_reductions.html#%s', None),
- 'filtering':('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
+ 'filtering' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html#%s', None),
'flann' : ('http://docs.opencv.org/modules/flann/doc/flann_fast_approximate_nearest_neighbor_search.html#%s', None ),
'calib3d' : ('http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#%s', None ),
'feature2d' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html#%s', None ),
'imgproc_geometric' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#%s', None ),
+ 'miscellaneous_transformations' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html#%s', None),
+ 'user_interface' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html#%s', None),
# 'opencv_group' : ('http://answers.opencv.org/%s', None),
'opencv_qa' : ('http://answers.opencv.org/%s', None),
'how_to_contribute' : ('http://code.opencv.org/projects/opencv/wiki/How_to_contribute/%s', None),
- 'cvt_color': ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None),
- 'imread': ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#imread%s', None),
- 'imwrite': ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imwrite#imwrite%s', None),
- 'imshow': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=imshow#imshow%s', None),
- 'named_window': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=namedwindow#namedwindow%s', None),
- 'wait_key': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=waitkey#waitkey%s', None),
- 'add_weighted': ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=addweighted#addweighted%s', None),
- 'saturate_cast': ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html?highlight=saturate_cast#saturate-cast%s', None),
- 'mat_zeros': ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=zeros#mat-zeros%s', None),
- 'convert_to': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto%s', None),
- 'create_trackbar': ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=createtrackbar#createtrackbar%s', None),
- 'point': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#point%s', None),
- 'scalar': ('http://docs.opencv.org/modules/core/doc/basic_structures.html#scalar%s', None),
- 'line': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#line%s', None),
- 'ellipse': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#ellipse%s', None),
- 'rectangle': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#rectangle%s', None),
- 'circle': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#circle%s', None),
- 'fill_poly': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#fillpoly%s', None),
- 'rng': ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=rng#rng%s', None),
- 'put_text': ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#puttext%s', None),
- 'gaussian_blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur%s', None),
- 'blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=blur#blur%s', None),
- 'median_blur': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=medianblur#medianblur%s', None),
- 'bilateral_filter': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=bilateralfilter#bilateralfilter%s', None),
- 'erode': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=erode#erode%s', None),
- 'dilate': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=dilate#dilate%s', None),
- 'get_structuring_element': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=getstructuringelement#getstructuringelement%s', None),
- 'flood_fill': ( 'http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=floodfill#floodfill%s', None),
- 'morphology_ex': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=morphologyex#morphologyex%s', None),
- 'pyr_down': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrdown#pyrdown%s', None),
- 'pyr_up': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrup#pyrup%s', None),
- 'resize': ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=resize#resize%s', None),
- 'threshold': ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#threshold%s', None),
- 'filter2d': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=filter2d#filter2d%s', None),
- 'copy_make_border': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=copymakeborder#copymakeborder%s', None),
- 'sobel': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=sobel#sobel%s', None),
- 'scharr': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=scharr#scharr%s', None),
- 'laplacian': ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian%s', None),
- 'canny': ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=canny#canny%s', None),
- 'copy_to': ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=copyto#mat-copyto%s', None),
+ 'cvt_color' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=cvtcolor#cvtcolor%s', None),
+ 'imread' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#imread%s', None),
+ 'imwrite' : ('http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imwrite#imwrite%s', None),
+ 'imshow' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=imshow#imshow%s', None),
+ 'named_window' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=namedwindow#namedwindow%s', None),
+ 'wait_key' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=waitkey#waitkey%s', None),
+ 'add_weighted' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=addweighted#addweighted%s', None),
+ 'saturate_cast' : ('http://docs.opencv.org/modules/core/doc/utility_and_system_functions_and_macros.html?highlight=saturate_cast#saturate-cast%s', None),
+ 'mat_zeros' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=zeros#mat-zeros%s', None),
+ 'convert_to' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#mat-convertto%s', None),
+ 'create_trackbar' : ('http://docs.opencv.org/modules/highgui/doc/user_interface.html?highlight=createtrackbar#createtrackbar%s', None),
+ 'point' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#point%s', None),
+ 'scalar' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html#scalar%s', None),
+ 'line' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#line%s', None),
+ 'ellipse' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#ellipse%s', None),
+ 'rectangle' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#rectangle%s', None),
+ 'circle' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#circle%s', None),
+ 'fill_poly' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#fillpoly%s', None),
+ 'rng' : ('http://docs.opencv.org/modules/core/doc/operations_on_arrays.html?highlight=rng#rng%s', None),
+ 'put_text' : ('http://docs.opencv.org/modules/core/doc/drawing_functions.html#puttext%s', None),
+ 'gaussian_blur' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur%s', None),
+ 'blur' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=blur#blur%s', None),
+ 'median_blur' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=medianblur#medianblur%s', None),
+ 'bilateral_filter' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=bilateralfilter#bilateralfilter%s', None),
+ 'erode' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=erode#erode%s', None),
+ 'dilate' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=dilate#dilate%s', None),
+ 'get_structuring_element' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=getstructuringelement#getstructuringelement%s', None),
+ 'flood_fill' : ( 'http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=floodfill#floodfill%s', None),
+ 'morphology_ex' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=morphologyex#morphologyex%s', None),
+ 'pyr_down' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrdown#pyrdown%s', None),
+ 'pyr_up' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=pyrup#pyrup%s', None),
+ 'resize' : ('http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=resize#resize%s', None),
+ 'threshold' : ('http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#threshold%s', None),
+ 'filter2d' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=filter2d#filter2d%s', None),
+ 'copy_make_border' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=copymakeborder#copymakeborder%s', None),
+ 'sobel' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=sobel#sobel%s', None),
+ 'scharr' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=scharr#scharr%s', None),
+ 'laplacian' : ('http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian%s', None),
+ 'canny' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=canny#canny%s', None),
+ 'copy_to' : ('http://docs.opencv.org/modules/core/doc/basic_structures.html?highlight=copyto#mat-copyto%s', None),
'hough_lines' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghlines#houghlines%s', None),
'hough_lines_p' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghlinesp#houghlinesp%s', None),
'hough_circles' : ('http://docs.opencv.org/modules/imgproc/doc/feature_detection.html?highlight=houghcircles#houghcircles%s', None),
Goal
=====
-In this tutorial you will learn how to:
+In this tutorial you will learn:
.. container:: enumeratevisibleitemswithsquare
- * What is *linear blending* and why it is useful.
- * Add two images using :add_weighted:`addWeighted <>`
+ * what is *linear blending* and why it is useful;
+ * how to add two images using :add_weighted:`addWeighted <>`
Theory
=======
Our test case
=============
-Let us consider a simple color reduction method. Using the unsigned char C and C++ type for matrix item storing a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result.
+Let us consider a simple color reduction method. By using the unsigned char C and C++ type for matrix item storing, a channel of pixel may have up to 256 different values. For a three channel image this can allow the formation of way too many colors (16 million to be exact). Working with so many color shades may give a heavy blow to our algorithm performance. However, sometimes it is enough to work with a lot less of them to get the same final result.
In this cases it's common that we make a *color space reduction*. This means that we divide the color space current value with a new input value to end up with fewer colors. For instance every value between zero and nine takes the new value zero, every value between ten and nineteen the value ten and so on.
.. code-block:: cpp
- void Sharpen(const Mat& myImage,Mat& Result)
+ void Sharpen(const Mat& myImage, Mat& Result)
{
CV_Assert(myImage.depth() == CV_8U); // accept only uchar images
- Result.create(myImage.size(),myImage.type());
+ Result.create(myImage.size(), myImage.type());
const int nChannels = myImage.channels();
- for(int j = 1 ; j < myImage.rows-1; ++j)
+ for(int j = 1; j < myImage.rows - 1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
const uchar* current = myImage.ptr<uchar>(j );
uchar* output = Result.ptr<uchar>(j);
- for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
+ for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
{
- *output++ = saturate_cast<uchar>(5*current[i]
- -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
+ *output++ = saturate_cast<uchar>(5 * current[i]
+ -current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
}
}
Result.row(0).setTo(Scalar(0));
- Result.row(Result.rows-1).setTo(Scalar(0));
+ Result.row(Result.rows - 1).setTo(Scalar(0));
Result.col(0).setTo(Scalar(0));
- Result.col(Result.cols-1).setTo(Scalar(0));
+ Result.col(Result.cols - 1).setTo(Scalar(0));
}
At first we make sure that the input images data is in unsigned char format. For this we use the :utilitysystemfunctions:`CV_Assert <cv-assert>` function that throws an error when the expression inside it is false.
.. code-block:: cpp
- Result.create(myImage.size(),myImage.type());
+ Result.create(myImage.size(), myImage.type());
const int nChannels = myImage.channels();
We'll use the plain C [] operator to access pixels. Because we need to access multiple rows at the same time we'll acquire the pointers for each of them (a previous, a current and a next line). We need another pointer to where we're going to save the calculation. Then simply access the right items with the [] operator. For moving the output pointer ahead we simply increase this (with one byte) after each operation:
.. code-block:: cpp
- for(int j = 1 ; j < myImage.rows-1; ++j)
+ for(int j = 1; j < myImage.rows - 1; ++j)
{
const uchar* previous = myImage.ptr<uchar>(j - 1);
const uchar* current = myImage.ptr<uchar>(j );
uchar* output = Result.ptr<uchar>(j);
- for(int i= nChannels;i < nChannels*(myImage.cols-1); ++i)
+ for(int i = nChannels; i < nChannels * (myImage.cols - 1); ++i)
{
- *output++ = saturate_cast<uchar>(5*current[i]
- -current[i-nChannels] - current[i+nChannels] - previous[i] - next[i]);
+ *output++ = saturate_cast<uchar>(5 * current[i]
+ -current[i - nChannels] - current[i + nChannels] - previous[i] - next[i]);
}
}
-On the borders of the image the upper notation results inexistent pixel locations (like minus one - minus one). In these points our formula is undefined. A simple solution is to not apply the mask in these points and, for example, set the pixels on the borders to zeros:
+On the borders of the image the upper notation results inexistent pixel locations (like minus one - minus one). In these points our formula is undefined. A simple solution is to not apply the kernel in these points and, for example, set the pixels on the borders to zeros:
.. code-block:: cpp
- Result.row(0).setTo(Scalar(0)); // The top row
- Result.row(Result.rows-1).setTo(Scalar(0)); // The bottom row
- Result.col(0).setTo(Scalar(0)); // The left column
- Result.col(Result.cols-1).setTo(Scalar(0)); // The right column
+ Result.row(0).setTo(Scalar(0)); // The top row
+ Result.row(Result.rows - 1).setTo(Scalar(0)); // The bottom row
+ Result.col(0).setTo(Scalar(0)); // The left column
+ Result.col(Result.cols - 1).setTo(Scalar(0)); // The right column
The filter2D function
=====================
.. code-block:: cpp
- filter2D(I, K, I.depth(), kern );
+ filter2D(I, K, I.depth(), kern);
The function even has a fifth optional argument to specify the center of the kernel, and a sixth one for determining what to do in the regions where the operation is undefined (borders). Using this function has the advantage that it's shorter, less verbose and because there are some optimization techniques implemented it is usually faster than the *hand-coded method*. For example in my test while the second one took only 13 milliseconds the first took around 31 milliseconds. Quite some difference.
:linenos:
Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle
- Mat E = A(Range:all(), Range(1,3)); // using row and column boundaries
+ Mat E = A(Range::all(), Range(1,3)); // using row and column boundaries
Now you may ask if the matrix itself may belong to multiple *Mat* objects who takes responsibility for cleaning it up when it's no longer needed. The short answer is: the last object that used it. This is handled by using a reference counting mechanism. Whenever somebody copies a header of a *Mat* object, a counter is increased for the matrix. Whenever a header is cleaned this counter is decreased. When the counter reaches zero the matrix too is freed. Sometimes you will want to copy the matrix itself too, so OpenCV provides the :basicstructures:`clone() <mat-clone>` and :basicstructures:`copyTo() <mat-copyto>` functions.
Creating a *Mat* object explicitly
==================================
-In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readWriteImageVideo:` imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices.
+In the :ref:`Load_Save_Image` tutorial you have already learned how to write a matrix to an image file by using the :readwriteimagevideo:`imwrite() <imwrite>` function. However, for debugging purposes it's much more convenient to see the actual values. You can do this using the << operator of *Mat*. Be aware that this only works for two dimensional matrices.
Although *Mat* works really well as an image container, it is also a general matrix class. Therefore, it is possible to create and manipulate multidimensional matrices. You can create a Mat object in multiple ways:
::
- OpenCV-2.4.8-android-sdk
+ OpenCV-2.4.9-android-sdk
|_ apk
- | |_ OpenCV_2.4.8_binary_pack_armv7a.apk
- | |_ OpenCV_2.4.8_Manager_2.16_XXX.apk
+ | |_ OpenCV_2.4.9_binary_pack_armv7a.apk
+ | |_ OpenCV_2.4.9_Manager_2.18_XXX.apk
|
|_ doc
|_ samples
.. code-block:: bash
- unzip ~/Downloads/OpenCV-2.4.8-android-sdk.zip
+ unzip ~/Downloads/OpenCV-2.4.9-android-sdk.zip
-.. |opencv_android_bin_pack| replace:: :file:`OpenCV-2.4.8-android-sdk.zip`
-.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.8/OpenCV-2.4.8-android-sdk.zip/download
+.. |opencv_android_bin_pack| replace:: :file:`OpenCV-2.4.9-android-sdk.zip`
+.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.9/OpenCV-2.4.9-android-sdk.zip/download
.. |opencv_android_bin_pack_url| replace:: |opencv_android_bin_pack|
.. |seven_zip| replace:: 7-Zip
.. _seven_zip: http://www.7-zip.org/
.. code-block:: sh
:linenos:
- <Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.8_Manager_2.16_armv7a-neon.apk
+ <Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.9_Manager_2.18_armv7a-neon.apk
.. note:: ``armeabi``, ``armv7a-neon``, ``arm7a-neon-android8``, ``mips`` and ``x86`` stand for
platform targets:
:guilabel:`File -> Import -> Existing project in your workspace`.
Press :guilabel:`Browse` button and locate OpenCV4Android SDK
- (:file:`OpenCV-2.4.8-android-sdk/sdk`).
+ (:file:`OpenCV-2.4.9-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png
:alt: Add dependency from OpenCV library
:align: center
#. In application project add a reference to the OpenCV Java SDK in
- :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.8``.
+ :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.9``.
.. image:: images/eclipse_opencv_dependency1.png
:alt: Add dependency from OpenCV library
#. Add the OpenCV library project to your workspace the same way as for the async initialization
above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`,
press :guilabel:`Browse` button and select OpenCV SDK path
- (:file:`OpenCV-2.4.8-android-sdk/sdk`).
+ (:file:`OpenCV-2.4.9-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png
:alt: Add dependency from OpenCV library
:align: center
#. In the application project add a reference to the OpenCV4Android SDK in
- :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.8``;
+ :guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.9``;
.. image:: images/eclipse_opencv_dependency1.png
:alt: Add dependency from OpenCV library
:align: center
#. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV
- native libs from :file:`<OpenCV-2.4.8-android-sdk>/sdk/native/libs/<target_arch>` to your
+ native libs from :file:`<OpenCV-2.4.9-android-sdk>/sdk/native/libs/<target_arch>` to your
project directory to folder :file:`libs/<target_arch>`.
In case of the application project **with a JNI part**, instead of manual libraries copying you
need to modify your ``Android.mk`` file:
add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before
- ``"include path_to_OpenCV-2.4.8-android-sdk/sdk/native/jni/OpenCV.mk"``
+ ``"include path_to_OpenCV-2.4.9-android-sdk/sdk/native/jni/OpenCV.mk"``
.. code-block:: make
:linenos:
.. code-block:: make
- include C:\Work\OpenCV4Android\OpenCV-2.4.8-android-sdk\sdk\native\jni\OpenCV.mk
+ include C:\Work\OpenCV4Android\OpenCV-2.4.9-android-sdk\sdk\native\jni\OpenCV.mk
Should be inserted into the :file:`jni/Android.mk` file **after** this line:
.. note::
- We assume that by now you know how to load an image using :imread:`imread <>` and to display it in a window (using :imshow:`imshow <>`). Read the :ref:`Display_Image` tutorial otherwise.
+ We assume that by now you know how to load an image using :readwriteimagevideo:`imread <imread>` and to display it in a window (using :user_interface:`imshow <imshow>`). Read the :ref:`Display_Image` tutorial otherwise.
Goals
======
.. container:: enumeratevisibleitemswithsquare
- * Load an image using :imread:`imread <>`
- * Transform an image from BGR to Grayscale format by using :cvt_color:`cvtColor <>`
- * Save your transformed image in a file on disk (using :imwrite:`imwrite <>`)
+ * Load an image using :readwriteimagevideo:`imread <imread>`
+ * Transform an image from BGR to Grayscale format by using :miscellaneous_transformations:`cvtColor <cvtcolor>`
+ * Save your transformed image in a file on disk (using :readwriteimagevideo:`imwrite <imwrite>`)
Code
======
Explanation
============
-#. We begin by:
-
- * Creating a Mat object to store the image information
- * Load an image using :imread:`imread <>`, located in the path given by *imageName*. Fort this example, assume you are loading a RGB image.
+#. We begin by loading an image using :readwriteimagevideo:`imread <imread>`, located in the path given by *imageName*. For this example, assume you are loading a RGB image.
#. Now we are going to convert our image from BGR to Grayscale format. OpenCV has a really nice function to do this kind of transformations:
cvtColor( image, gray_image, CV_BGR2GRAY );
- As you can see, :cvt_color:`cvtColor <>` takes as arguments:
+ As you can see, :miscellaneous_transformations:`cvtColor <cvtcolor>` takes as arguments:
.. container:: enumeratevisibleitemswithsquare
* a source image (*image*)
* a destination image (*gray_image*), in which we will save the converted image.
- * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :imread:`imread <>` has BGR default channel order in case of color images).
+ * an additional parameter that indicates what kind of transformation will be performed. In this case we use **CV_BGR2GRAY** (because of :readwriteimagevideo:`imread <imread>` has BGR default channel order in case of color images).
-#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :imread:`imread <>`: :imwrite:`imwrite <>`
+#. So now we have our new *gray_image* and want to save it on disk (otherwise it will get lost after the program ends). To save it, we will use a function analagous to :readwriteimagevideo:`imread <imread>`: :readwriteimagevideo:`imwrite <imwrite>`
.. code-block:: cpp
.. cssclass:: toctableopencv
=========== =======================================================
- |Video| Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions.
+ |Video| Look here in order to find use on your video stream algorithms like: motion extraction, feature tracking and foreground extractions.
=========== =======================================================
*video* module. Video analysis
-----------------------------------------------------------
-Look here in order to find use on your video stream algoritms like: motion extraction, feature tracking and foreground extractions.
+Look here in order to find use on your video stream algorithms like: motion extraction, feature tracking and foreground extractions.
.. include:: ../../definitions/noContent.rst
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
)
-if (NOT (CMAKE_BUILD_TYPE MATCHES "debug"))
+if (NOT (CMAKE_BUILD_TYPE MATCHES "Debug"))
ADD_CUSTOM_COMMAND( TARGET ${the_target} POST_BUILD COMMAND ${CMAKE_STRIP} --strip-unneeded "${LIBRARY_OUTPUT_PATH}/lib${the_target}.so" )
endif()
using namespace android;
+// non-public camera related classes are not binary compatible
+// objects of these classes have different sizeof on different platforms
+// additional memory tail to all system objects to overcome sizeof issue
+#define MAGIC_TAIL 4096
+
+
void debugShowFPS();
#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0)
};
#endif
+
std::string getProcessName()
{
std::string result;
protected:
int cameraId;
sp<Camera> camera;
- CameraParameters params;
+#if defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
+ sp<SurfaceTexture> surface;
+#endif
+#if defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0) || defined(ANDROID_r4_4_0)
+ sp<BufferQueue> queue;
+ sp<ConsumerListenerStub> listener;
+#endif
+ CameraParameters* params;
CameraCallback cameraCallback;
void* userData;
int emptyCameraCallbackReported;
+ int width;
+ int height;
+
static const char* flashModesNames[ANDROID_CAMERA_FLASH_MODES_NUM];
static const char* focusModesNames[ANDROID_CAMERA_FOCUS_MODES_NUM];
static const char* whiteBalanceModesNames[ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM];
int is_supported(const char* supp_modes_key, const char* mode)
{
- const char* supported_modes = params.get(supp_modes_key);
+ const char* supported_modes = params->get(supp_modes_key);
return (supported_modes && mode && (strstr(supported_modes, mode) > 0));
}
if (focus_distance_type >= 0 && focus_distance_type < 3)
{
float focus_distances[3];
- const char* output = params.get(CameraParameters::KEY_FOCUS_DISTANCES);
+ const char* output = params->get(CameraParameters::KEY_FOCUS_DISTANCES);
int val_num = CameraHandler::split_float(output, focus_distances, ',', 3);
if(val_num == 3)
{
emptyCameraCallbackReported(0)
{
LOGD("Instantiated new CameraHandler (%p, %p)", callback, _userData);
+ void* params_buffer = operator new(sizeof(CameraParameters) + MAGIC_TAIL);
+ params = new(params_buffer) CameraParameters();
}
virtual ~CameraHandler()
{
+ if (params)
+ params->~CameraParameters();
+ operator delete(params);
LOGD("CameraHandler destructor is called");
}
CameraParameters::FOCUS_MODE_AUTO,
#if !defined(ANDROID_r2_2_0)
CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO,
+#else
+ CameraParameters::FOCUS_MODE_AUTO,
#endif
CameraParameters::FOCUS_MODE_EDOF,
CameraParameters::FOCUS_MODE_FIXED,
- CameraParameters::FOCUS_MODE_INFINITY
+ CameraParameters::FOCUS_MODE_INFINITY,
+ CameraParameters::FOCUS_MODE_MACRO,
+#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1)
+ CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE
+#else
+ CameraParameters::FOCUS_MODE_AUTO
+#endif
};
const char* CameraHandler::whiteBalanceModesNames[ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM] =
{
LOGI("initCameraConnect: Setting paramers from previous camera handler");
camera->setParameters(prevCameraParameters->flatten());
- handler->params.unflatten(prevCameraParameters->flatten());
+ handler->params->unflatten(prevCameraParameters->flatten());
}
else
{
android::String8 params_str = camera->getParameters();
LOGI("initCameraConnect: [%s]", params_str.string());
- handler->params.unflatten(params_str);
-
- LOGD("Supported Cameras: %s", handler->params.get("camera-indexes"));
- LOGD("Supported Picture Sizes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES));
- LOGD("Supported Picture Formats: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS));
- LOGD("Supported Preview Sizes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES));
- LOGD("Supported Preview Formats: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS));
- LOGD("Supported Preview Frame Rates: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES));
- LOGD("Supported Thumbnail Sizes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES));
- LOGD("Supported Whitebalance Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE));
- LOGD("Supported Effects: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_EFFECTS));
- LOGD("Supported Scene Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_SCENE_MODES));
- LOGD("Supported Focus Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
- LOGD("Supported Antibanding Options: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_ANTIBANDING));
- LOGD("Supported Flash Modes: %s", handler->params.get(CameraParameters::KEY_SUPPORTED_FLASH_MODES));
+ handler->params->unflatten(params_str);
+
+ LOGD("Supported Cameras: %s", handler->params->get("camera-indexes"));
+ LOGD("Supported Picture Sizes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES));
+ LOGD("Supported Picture Formats: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS));
+ LOGD("Supported Preview Sizes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES));
+ LOGD("Supported Preview Formats: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS));
+ LOGD("Supported Preview Frame Rates: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES));
+ LOGD("Supported Thumbnail Sizes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES));
+ LOGD("Supported Whitebalance Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE));
+ LOGD("Supported Effects: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_EFFECTS));
+ LOGD("Supported Scene Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_SCENE_MODES));
+ LOGD("Supported Focus Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
+ LOGD("Supported Antibanding Options: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_ANTIBANDING));
+ LOGD("Supported Flash Modes: %s", handler->params->get(CameraParameters::KEY_SUPPORTED_FLASH_MODES));
#if !defined(ANDROID_r2_2_0)
// Set focus mode to continuous-video if supported
- const char* available_focus_modes = handler->params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES);
+ const char* available_focus_modes = handler->params->get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES);
if (available_focus_modes != 0)
{
if (strstr(available_focus_modes, "continuous-video") != NULL)
{
- handler->params.set(CameraParameters::KEY_FOCUS_MODE, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO);
+ handler->params->set(CameraParameters::KEY_FOCUS_MODE, CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO);
- status_t resParams = handler->camera->setParameters(handler->params.flatten());
+ status_t resParams = handler->camera->setParameters(handler->params->flatten());
if (resParams != 0)
{
#endif
//check if yuv420sp format available. Set this format as preview format.
- const char* available_formats = handler->params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
+ const char* available_formats = handler->params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS);
if (available_formats != 0)
{
const char* format_to_set = 0;
if (0 != format_to_set)
{
- handler->params.setPreviewFormat(format_to_set);
+ handler->params->setPreviewFormat(format_to_set);
- status_t resParams = handler->camera->setParameters(handler->params.flatten());
+ status_t resParams = handler->camera->setParameters(handler->params->flatten());
if (resParams != 0)
LOGE("initCameraConnect: failed to set preview format to %s", format_to_set);
LOGD("initCameraConnect: preview format is set to %s", format_to_set);
}
}
+
+ handler->params->setPreviewSize(640, 480);
+ status_t resParams = handler->camera->setParameters(handler->params->flatten());
+ if (resParams != 0)
+ LOGE("initCameraConnect: failed to set preview resolution to 640x480");
+ else
+ LOGD("initCameraConnect: preview format is set to 640x480");
}
status_t bufferStatus;
#elif defined(ANDROID_r2_3_3)
/* Do nothing in case of 2.3 for now */
#elif defined(ANDROID_r3_0_1) || defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
- sp<SurfaceTexture> surfaceTexture = new SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
- bufferStatus = camera->setPreviewTexture(surfaceTexture);
+ void* surface_texture_obj = operator new(sizeof(SurfaceTexture) + MAGIC_TAIL);
+ handler->surface = new(surface_texture_obj) SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
+ bufferStatus = camera->setPreviewTexture(handler->surface);
if (bufferStatus != 0)
LOGE("initCameraConnect: failed setPreviewTexture call (status %d); camera might not work correctly", bufferStatus);
#elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0)
- sp<BufferQueue> bufferQueue = new BufferQueue();
- sp<BufferQueue::ConsumerListener> queueListener = new ConsumerListenerStub();
- bufferQueue->consumerConnect(queueListener);
- bufferStatus = camera->setPreviewTexture(bufferQueue);
+ void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
+ handler->queue = new(buffer_queue_obj) BufferQueue();
+ void* consumer_listener_obj = operator new(sizeof(ConsumerListenerStub) + MAGIC_TAIL);
+ handler->listener = new(consumer_listener_obj) ConsumerListenerStub();
+ handler->queue->consumerConnect(handler->listener);
+ bufferStatus = camera->setPreviewTexture(handler->queue);
if (bufferStatus != 0)
LOGE("initCameraConnect: failed setPreviewTexture call; camera might not work correctly");
# elif defined(ANDROID_r4_4_0)
- sp<BufferQueue> bufferQueue = new BufferQueue();
- sp<IConsumerListener> queueListener = new ConsumerListenerStub();
- bufferQueue->consumerConnect(queueListener, true);
- bufferStatus = handler->camera->setPreviewTarget(bufferQueue);
+ void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
+ handler->queue = new(buffer_queue_obj) BufferQueue();
+ void* consumer_listener_obj = operator new(sizeof(ConsumerListenerStub) + MAGIC_TAIL);
+ handler->listener = new(consumer_listener_obj) ConsumerListenerStub();
+ handler->queue->consumerConnect(handler->listener, true);
+ bufferStatus = handler->camera->setPreviewTarget(handler->queue);
if (bufferStatus != 0)
LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly");
# endif
case ANDROID_CAMERA_PROPERTY_FRAMEWIDTH:
{
int w,h;
- params.getPreviewSize(&w, &h);
+ params->getPreviewSize(&w, &h);
return w;
}
case ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT:
{
int w,h;
- params.getPreviewSize(&w, &h);
+ params->getPreviewSize(&w, &h);
return h;
}
case ANDROID_CAMERA_PROPERTY_SUPPORTED_PREVIEW_SIZES_STRING:
{
- cameraPropertySupportedPreviewSizesString = params.get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
+ cameraPropertySupportedPreviewSizesString = params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES);
union {const char* str;double res;} u;
memset(&u.res, 0, sizeof(u.res));
u.str = cameraPropertySupportedPreviewSizesString.c_str();
}
case ANDROID_CAMERA_PROPERTY_PREVIEW_FORMAT_STRING:
{
- const char* fmt = params.get(CameraParameters::KEY_PREVIEW_FORMAT);
+ const char* fmt = params->get(CameraParameters::KEY_PREVIEW_FORMAT);
if (fmt == CameraParameters::PIXEL_FORMAT_YUV422SP)
fmt = "yuv422sp";
else if (fmt == CameraParameters::PIXEL_FORMAT_YUV420SP)
}
case ANDROID_CAMERA_PROPERTY_EXPOSURE:
{
- int exposure = params.getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
+ int exposure = params->getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
return exposure;
}
case ANDROID_CAMERA_PROPERTY_FPS:
{
- return params.getPreviewFrameRate();
+ return params->getPreviewFrameRate();
}
case ANDROID_CAMERA_PROPERTY_FLASH_MODE:
{
int flash_mode = getModeNum(CameraHandler::flashModesNames,
ANDROID_CAMERA_FLASH_MODES_NUM,
- params.get(CameraParameters::KEY_FLASH_MODE));
+ params->get(CameraParameters::KEY_FLASH_MODE));
return flash_mode;
}
case ANDROID_CAMERA_PROPERTY_FOCUS_MODE:
{
int focus_mode = getModeNum(CameraHandler::focusModesNames,
ANDROID_CAMERA_FOCUS_MODES_NUM,
- params.get(CameraParameters::KEY_FOCUS_MODE));
+ params->get(CameraParameters::KEY_FOCUS_MODE));
return focus_mode;
}
case ANDROID_CAMERA_PROPERTY_WHITE_BALANCE:
{
int white_balance = getModeNum(CameraHandler::whiteBalanceModesNames,
ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM,
- params.get(CameraParameters::KEY_WHITE_BALANCE));
+ params->get(CameraParameters::KEY_WHITE_BALANCE));
return white_balance;
}
case ANDROID_CAMERA_PROPERTY_ANTIBANDING:
{
int antibanding = getModeNum(CameraHandler::antibandingModesNames,
ANDROID_CAMERA_ANTIBANDING_MODES_NUM,
- params.get(CameraParameters::KEY_ANTIBANDING));
+ params->get(CameraParameters::KEY_ANTIBANDING));
return antibanding;
}
case ANDROID_CAMERA_PROPERTY_FOCAL_LENGTH:
{
- float focal_length = params.getFloat(CameraParameters::KEY_FOCAL_LENGTH);
+ float focal_length = params->getFloat(CameraParameters::KEY_FOCAL_LENGTH);
return focal_length;
}
case ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_NEAR:
{
return getFocusDistance(ANDROID_CAMERA_FOCUS_DISTANCE_FAR_INDEX);
}
+#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1)
+ case ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK:
+ {
+ const char* status = params->get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK);
+ if (status == CameraParameters::TRUE)
+ return 1.;
+ else
+ return 0.;
+ }
+ case ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK:
+ {
+ const char* status = params->get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK);
+ if (status == CameraParameters::TRUE)
+ return 1.;
+ else
+ return 0.;
+ }
+#endif
default:
LOGW("CameraHandler::getProperty - Unsupported property.");
};
{
LOGD("CameraHandler::setProperty(%d, %f)", propIdx, value);
+ android::String8 params_str;
+ params_str = camera->getParameters();
+ LOGI("Params before set: [%s]", params_str.string());
+
switch (propIdx)
{
case ANDROID_CAMERA_PROPERTY_FRAMEWIDTH:
{
int w,h;
- params.getPreviewSize(&w, &h);
- w = (int)value;
- params.setPreviewSize(w, h);
+ params->getPreviewSize(&w, &h);
+ width = (int)value;
}
break;
case ANDROID_CAMERA_PROPERTY_FRAMEHEIGHT:
{
int w,h;
- params.getPreviewSize(&w, &h);
- h = (int)value;
- params.setPreviewSize(w, h);
+ params->getPreviewSize(&w, &h);
+ height = (int)value;
}
break;
case ANDROID_CAMERA_PROPERTY_EXPOSURE:
{
- int max_exposure = params.getInt("max-exposure-compensation");
- int min_exposure = params.getInt("min-exposure-compensation");
- if(max_exposure && min_exposure){
+ int max_exposure = params->getInt("max-exposure-compensation");
+ int min_exposure = params->getInt("min-exposure-compensation");
+ if(max_exposure && min_exposure)
+ {
int exposure = (int)value;
- if(exposure >= min_exposure && exposure <= max_exposure){
- params.set("exposure-compensation", exposure);
- } else {
+ if(exposure >= min_exposure && exposure <= max_exposure)
+ params->set("exposure-compensation", exposure);
+ else
LOGE("Exposure compensation not in valid range (%i,%i).", min_exposure, max_exposure);
- }
- } else {
+ } else
LOGE("Exposure compensation adjust is not supported.");
- }
+
+ camera->setParameters(params->flatten());
}
break;
case ANDROID_CAMERA_PROPERTY_FLASH_MODE:
{
int new_val = (int)value;
- if(new_val >= 0 && new_val < ANDROID_CAMERA_FLASH_MODES_NUM){
+ if(new_val >= 0 && new_val < ANDROID_CAMERA_FLASH_MODES_NUM)
+ {
const char* mode_name = flashModesNames[new_val];
if(is_supported(CameraParameters::KEY_SUPPORTED_FLASH_MODES, mode_name))
- params.set(CameraParameters::KEY_FLASH_MODE, mode_name);
+ params->set(CameraParameters::KEY_FLASH_MODE, mode_name);
else
LOGE("Flash mode %s is not supported.", mode_name);
- } else {
- LOGE("Flash mode value not in valid range.");
}
+ else
+ LOGE("Flash mode value not in valid range.");
+
+ camera->setParameters(params->flatten());
}
break;
case ANDROID_CAMERA_PROPERTY_FOCUS_MODE:
{
int new_val = (int)value;
- if(new_val >= 0 && new_val < ANDROID_CAMERA_FOCUS_MODES_NUM){
+ if(new_val >= 0 && new_val < ANDROID_CAMERA_FOCUS_MODES_NUM)
+ {
const char* mode_name = focusModesNames[new_val];
if(is_supported(CameraParameters::KEY_SUPPORTED_FOCUS_MODES, mode_name))
- params.set(CameraParameters::KEY_FOCUS_MODE, mode_name);
+ params->set(CameraParameters::KEY_FOCUS_MODE, mode_name);
else
LOGE("Focus mode %s is not supported.", mode_name);
- } else {
- LOGE("Focus mode value not in valid range.");
}
+ else
+ LOGE("Focus mode value not in valid range.");
+
+ camera->setParameters(params->flatten());
}
break;
case ANDROID_CAMERA_PROPERTY_WHITE_BALANCE:
{
int new_val = (int)value;
- if(new_val >= 0 && new_val < ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM){
+ if(new_val >= 0 && new_val < ANDROID_CAMERA_WHITE_BALANCE_MODES_NUM)
+ {
const char* mode_name = whiteBalanceModesNames[new_val];
if(is_supported(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE, mode_name))
- params.set(CameraParameters::KEY_WHITE_BALANCE, mode_name);
+ params->set(CameraParameters::KEY_WHITE_BALANCE, mode_name);
else
LOGE("White balance mode %s is not supported.", mode_name);
- } else {
- LOGE("White balance mode value not in valid range.");
}
+ else
+ LOGE("White balance mode value not in valid range.");
+
+ camera->setParameters(params->flatten());
}
break;
case ANDROID_CAMERA_PROPERTY_ANTIBANDING:
{
int new_val = (int)value;
- if(new_val >= 0 && new_val < ANDROID_CAMERA_ANTIBANDING_MODES_NUM){
+ if(new_val >= 0 && new_val < ANDROID_CAMERA_ANTIBANDING_MODES_NUM)
+ {
const char* mode_name = antibandingModesNames[new_val];
if(is_supported(CameraParameters::KEY_SUPPORTED_ANTIBANDING, mode_name))
- params.set(CameraParameters::KEY_ANTIBANDING, mode_name);
+ params->set(CameraParameters::KEY_ANTIBANDING, mode_name);
else
LOGE("Antibanding mode %s is not supported.", mode_name);
- } else {
+ }
+ else
LOGE("Antibanding mode value not in valid range.");
+
+ camera->setParameters(params->flatten());
+ }
+ break;
+#if !defined(ANDROID_r2_2_0) && !defined(ANDROID_r2_3_3) && !defined(ANDROID_r3_0_1)
+ case ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK:
+ {
+ if (is_supported(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED, "true"))
+ {
+ if (value != 0)
+ params->set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK, CameraParameters::TRUE);
+ else
+ params->set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK, CameraParameters::FALSE);
+ LOGE("Expose lock is set");
}
+ else
+ LOGE("Expose lock is not supported");
+
+ camera->setParameters(params->flatten());
}
break;
+ case ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK:
+ {
+ if (is_supported(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED, "true"))
+ {
+ if (value != 0)
+ params->set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK, CameraParameters::TRUE);
+ else
+ params->set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK, CameraParameters::FALSE);
+ LOGE("White balance lock is set");
+ }
+ else
+ LOGE("White balance lock is not supported");
+
+ camera->setParameters(params->flatten());
+ }
+ break;
+#endif
default:
LOGW("CameraHandler::setProperty - Unsupported property.");
};
+
+ params_str = camera->getParameters();
+ LOGI("Params after set: [%s]", params_str.string());
}
void CameraHandler::applyProperties(CameraHandler** ppcameraHandler)
return;
}
- CameraParameters curCameraParameters((*ppcameraHandler)->params.flatten());
+ // delayed resolution setup to exclude errors during other parameres setup on the fly
+ // without camera restart
+ if (((*ppcameraHandler)->width != 0) && ((*ppcameraHandler)->height != 0))
+ (*ppcameraHandler)->params->setPreviewSize((*ppcameraHandler)->width, (*ppcameraHandler)->height);
#if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3) || defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) \
|| defined(ANDROID_r4_3_0) || defined(ANDROID_r4_4_0)
return;
}
- handler->camera->setParameters(curCameraParameters.flatten());
- handler->params.unflatten(curCameraParameters.flatten());
+ handler->camera->setParameters((*ppcameraHandler)->params->flatten());
status_t bufferStatus;
# if defined(ANDROID_r4_0_0) || defined(ANDROID_r4_0_3)
- sp<SurfaceTexture> surfaceTexture = new SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
- bufferStatus = handler->camera->setPreviewTexture(surfaceTexture);
+ void* surface_texture_obj = operator new(sizeof(SurfaceTexture) + MAGIC_TAIL);
+ handler->surface = new(surface_texture_obj) SurfaceTexture(MAGIC_OPENCV_TEXTURE_ID);
+ bufferStatus = handler->camera->setPreviewTexture(handler->surface);
if (bufferStatus != 0)
LOGE("applyProperties: failed setPreviewTexture call (status %d); camera might not work correctly", bufferStatus);
# elif defined(ANDROID_r4_1_1) || defined(ANDROID_r4_2_0) || defined(ANDROID_r4_3_0)
- sp<BufferQueue> bufferQueue = new BufferQueue();
- sp<BufferQueue::ConsumerListener> queueListener = new ConsumerListenerStub();
- bufferQueue->consumerConnect(queueListener);
- bufferStatus = handler->camera->setPreviewTexture(bufferQueue);
+ void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
+ handler->queue = new(buffer_queue_obj) BufferQueue();
+ handler->queue->consumerConnect(handler->listener);
+ bufferStatus = handler->camera->setPreviewTexture(handler->queue);
if (bufferStatus != 0)
LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly");
# elif defined(ANDROID_r4_4_0)
- sp<BufferQueue> bufferQueue = new BufferQueue();
- sp<IConsumerListener> queueListener = new ConsumerListenerStub();
- bufferQueue->consumerConnect(queueListener, true);
- bufferStatus = handler->camera->setPreviewTarget(bufferQueue);
+ void* buffer_queue_obj = operator new(sizeof(BufferQueue) + MAGIC_TAIL);
+ handler->queue = new(buffer_queue_obj) BufferQueue();
+ handler->queue->consumerConnect(handler->listener, true);
+ bufferStatus = handler->camera->setPreviewTarget(handler->queue);
if (bufferStatus != 0)
LOGE("applyProperties: failed setPreviewTexture call; camera might not work correctly");
# endif
LOGD("CameraHandler::applyProperties(): after previousCameraHandler->closeCameraConnect");
LOGD("CameraHandler::applyProperties(): before initCameraConnect");
- CameraHandler* handler=initCameraConnect(cameraCallback, cameraId, userData, &curCameraParameters);
+ CameraHandler* handler=initCameraConnect(cameraCallback, cameraId, userData, (*ppcameraHandler)->params);
LOGD("CameraHandler::applyProperties(): after initCameraConnect, handler=0x%x", (int)handler);
if (handler == NULL) {
LOGE("ERROR in applyProperties --- cannot reinit camera");
ANDROID_CAMERA_PROPERTY_FOCAL_LENGTH = 105,
ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_NEAR = 106,
ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_OPTIMAL = 107,
- ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_FAR = 108
+ ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_FAR = 108,
+ ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK = 109,
+ ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK = 110
};
enum {
ANDROID_CAMERA_FOCUS_MODE_AUTO = 0,
- ANDROID_CAMERA_FOCUS_MODE_CONTINUOUS_PICTURE,
ANDROID_CAMERA_FOCUS_MODE_CONTINUOUS_VIDEO,
ANDROID_CAMERA_FOCUS_MODE_EDOF,
ANDROID_CAMERA_FOCUS_MODE_FIXED,
ANDROID_CAMERA_FOCUS_MODE_INFINITY,
ANDROID_CAMERA_FOCUS_MODE_MACRO,
+ ANDROID_CAMERA_FOCUS_MODE_CONTINUOUS_PICTURE,
ANDROID_CAMERA_FOCUS_MODES_NUM
};
Setting the Thresholds
+++++++++++++++++++++++
-Sometimes you run into the situation, when you want to apply a threshold on the prediction. A common scenario in face recognition is to tell, wether a face belongs to the training dataset or if it is unknown. You might wonder, why there's no public API in :ocv:class:`FaceRecognizer` to set the threshold for the prediction, but rest assured: It's supported. It just means there's no generic way in an abstract class to provide an interface for setting/getting the thresholds of *every possible* :ocv:class:`FaceRecognizer` algorithm. The appropriate place to set the thresholds is in the constructor of the specific :ocv:class:`FaceRecognizer` and since every :ocv:class:`FaceRecognizer` is a :ocv:class:`Algorithm` (see above), you can get/set the thresholds at runtime!
+Sometimes you run into the situation, when you want to apply a threshold on the prediction. A common scenario in face recognition is to tell, whether a face belongs to the training dataset or if it is unknown. You might wonder, why there's no public API in :ocv:class:`FaceRecognizer` to set the threshold for the prediction, but rest assured: It's supported. It just means there's no generic way in an abstract class to provide an interface for setting/getting the thresholds of *every possible* :ocv:class:`FaceRecognizer` algorithm. The appropriate place to set the thresholds is in the constructor of the specific :ocv:class:`FaceRecognizer` and since every :ocv:class:`FaceRecognizer` is a :ocv:class:`Algorithm` (see above), you can get/set the thresholds at runtime!
Here is an example of setting a threshold for the Eigenfaces method, when creating the model:
Fisherfaces for Gender Classification
--------------------------------------
-If you want to decide wether a person is *male* or *female*, you have to learn the discriminative features of both classes. The Eigenfaces method is based on the Principal Component Analysis, which is an unsupervised statistical model and not suitable for this task. Please see the Face Recognition tutorial for insights into the algorithms. The Fisherfaces instead yields a class-specific linear projection, so it is much better suited for the gender classification task. `http://www.bytefish.de/blog/gender_classification <http://www.bytefish.de/blog/gender_classification>`_ shows the recognition rate of the Fisherfaces method for gender classification.
+If you want to decide whether a person is *male* or *female*, you have to learn the discriminative features of both classes. The Eigenfaces method is based on the Principal Component Analysis, which is an unsupervised statistical model and not suitable for this task. Please see the Face Recognition tutorial for insights into the algorithms. The Fisherfaces instead yields a class-specific linear projection, so it is much better suited for the gender classification task. `http://www.bytefish.de/blog/gender_classification <http://www.bytefish.de/blog/gender_classification>`_ shows the recognition rate of the Fisherfaces method for gender classification.
The Fisherfaces method achieves a 98% recognition rate in a subject-independent cross-validation. A subject-independent cross-validation means *images of the person under test are never used for learning the model*. And could you believe it: you can simply use the facerec_fisherfaces demo, that's inlcuded in OpenCV.
IplImage* color_img = cvCreateImage(cvSize(320,240), IPL_DEPTH_8U, 3);
IplImage gray_img_hdr, *gray_img;
- gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);
+ gray_img = (IplImage*)cvReshapeMatND(color_img, sizeof(gray_img_hdr), &gray_img_hdr, 1, 0, 0);
...
int size[] = { 2, 2, 2 };
CvMatND* mat = cvCreateMatND(3, size, CV_32F);
CvMat row_header, *row;
+ row = (CvMat*)cvReshapeMatND(mat, sizeof(row_header), &row_header, 0, 1, 0);
+
+..
+
+In C, the header file for this function includes a convenient macro ``cvReshapeND`` that does away with the ``sizeof_header`` parameter. So, the lines containing the call to ``cvReshapeMatND`` in the examples may be replaced as follow:
+
+::
+
+ gray_img = (IplImage*)cvReshapeND(color_img, &gray_img_hdr, 1, 0, 0);
+
+ ...
+
row = (CvMat*)cvReshapeND(mat, &row_header, 0, 1, 0);
..
#define CV_VERSION_EPOCH 2
#define CV_VERSION_MAJOR 4
-#define CV_VERSION_MINOR 8
+#define CV_VERSION_MINOR 9
#define CV_VERSION_REVISION 0
#define CVAUX_STR_EXP(__A) #__A
void write(std::ostream& out, const Mat& m, const int*, int) const
{
out << "[";
- writeMat(out, m, ';', ' ', m.cols == 1);
+ writeMat(out, m, ';', ' ', m.rows == 1);
out << "]";
}
void write(std::ostream& out, const Mat& m, const int*, int) const
{
out << "[";
- writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
+ writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.rows*m.channels() == 1);
out << "]";
}
"uint8", "int8", "uint16", "int16", "int32", "float32", "float64", "uint64"
};
out << "array([";
- writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
+ writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.rows*m.channels() == 1);
out << "], type='" << numpyTypes[m.depth()] << "')";
}
virtual ~CSVFormatter() {}
void write(std::ostream& out, const Mat& m, const int*, int) const
{
- writeMat(out, m, ' ', ' ', m.cols*m.channels() == 1);
+ writeMat(out, m, ' ', ' ', m.rows*m.channels() == 1);
if(m.rows > 1)
out << "\n";
}
void write(std::ostream& out, const Mat& m, const int*, int) const
{
out << "{";
- writeMat(out, m, ',', ' ', m.cols==1);
+ writeMat(out, m, ',', ' ', m.rows==1);
out << "}";
}
:param keypoints: Input collection of keypoints. Keypoints for which a descriptor cannot be computed are removed. Sometimes new keypoints can be added, for example: ``SIFT`` duplicates keypoint with several dominant orientations (for each orientation).
- :param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]`. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint.
+ :param descriptors: Computed descriptors. In the second variant of the method ``descriptors[i]`` are descriptors computed for a ``keypoints[i]``. Row ``j`` is the ``keypoints`` (or ``keypoints[i]``) is the descriptor for keypoint ``j``-th keypoint.
DescriptorExtractor::create
:param normType: One of ``NORM_L1``, ``NORM_L2``, ``NORM_HAMMING``, ``NORM_HAMMING2``. ``L1`` and ``L2`` norms are preferable choices for SIFT and SURF descriptors, ``NORM_HAMMING`` should be used with ORB, BRISK and BRIEF, ``NORM_HAMMING2`` should be used with ORB when ``WTA_K==3`` or ``4`` (see ORB::ORB constructor description).
- :param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMathcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
+ :param crossCheck: If it is false, this is will be default BFMatcher behaviour when it finds the k nearest neighbors for each query descriptor. If ``crossCheck==true``, then the ``knnMatch()`` method with ``k=1`` will only return pairs ``(i,j)`` such that for ``i-th`` query descriptor the ``j-th`` descriptor in the matcher's collection is the nearest and vice versa, i.e. the ``BFMatcher`` will only return consistent pairs. Such technique usually produces best results with minimal number of outliers when there are enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
FlannBasedMatcher
-----------------------
In the current version, each of the OpenCV GPU algorithms can use only a single GPU. So, to utilize multiple GPUs, you have to manually distribute the work between GPUs.
-Switching active devie can be done using :ocv:func:`gpu::setDevice()` function. For more details please read Cuda C Programing Guide.
+Switching active devie can be done using :ocv:func:`gpu::setDevice()` function. For more details please read Cuda C Programming Guide.
While developing algorithms for multiple GPUs, note a data passing overhead. For primitive functions and small images, it can be significant, which may eliminate all the advantages of having multiple GPUs. But for high-level algorithms, consider using multi-GPU acceleration. For example, the Stereo Block Matching algorithm has been successfully parallelized using the following algorithm:
//////////////////////////////////////////////////////////////////////
// GeneralizedHough
+#if !defined(__GNUC__) || (__GNUC__ * 10 + __GNUC_MINOR__ != 47)
+
CV_FLAGS(GHMethod, GHT_POSITION, GHT_SCALE, GHT_ROTATION)
DEF_PARAM_TEST(Method_Sz, GHMethod, cv::Size);
CPU_SANITY_CHECK(positions);
}
}
+
+#endif
//
//M*/
+#if defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ == 47)
+# define CUDA_DISABLER
+#endif
+
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
using namespace cv;
using namespace cv::gpu;
+#if defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ == 47)
+# define CUDA_DISABLER
+#endif
+
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
Ptr<GeneralizedHough_GPU> cv::gpu::GeneralizedHough_GPU::create(int) { throw_nogpu(); return Ptr<GeneralizedHough_GPU>(); }
cv::Mat background_gold;
mog2_gold.getBackgroundImage(background_gold);
- ASSERT_MAT_NEAR(background_gold, background, 0);
+ ASSERT_MAT_NEAR(background_gold, background, 1);
}
INSTANTIATE_TEST_CASE_P(GPU_Video, MOG2, testing::Combine(
///////////////////////////////////////////////////////////////////////////////////////////////////////
// GeneralizedHough
+#if !defined(__GNUC__) || (__GNUC__ * 10 + __GNUC_MINOR__ != 47)
+
PARAM_TEST_CASE(GeneralizedHough, cv::gpu::DeviceInfo, UseRoi)
{
};
ALL_DEVICES,
WHOLE_SUBMAT));
+#endif
+
#endif // HAVE_CUDA
:param filename: Name of the output video file.
- :param fourcc: 4-character code of codec used to compress the frames. For example, ``CV_FOURCC('P','I','M,'1')`` is a MPEG-1 codec, ``CV_FOURCC('M','J','P','G')`` is a motion-jpeg codec etc. List of codes can be obtained at `Video Codecs by FOURCC <http://www.fourcc.org/codecs.php>`_ page.
+ :param fourcc: 4-character code of codec used to compress the frames. For example, ``CV_FOURCC('P','I','M','1')`` is a MPEG-1 codec, ``CV_FOURCC('M','J','P','G')`` is a motion-jpeg codec etc. List of codes can be obtained at `Video Codecs by FOURCC <http://www.fourcc.org/codecs.php>`_ page.
:param fps: Framerate of the created video stream.
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008,
+ CV_CAP_PROP_ANDROID_EXPOSE_LOCK = 8009,
+ CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK = 8010,
// Properties of cameras available through AVFOUNDATION interface
CV_CAP_PROP_IOS_DEVICE_FOCUS = 9001,
enum
{
CV_CAP_ANDROID_FOCUS_MODE_AUTO = 0,
+ CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_PICTURE,
CV_CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO,
CV_CAP_ANDROID_FOCUS_MODE_EDOF,
CV_CAP_ANDROID_FOCUS_MODE_FIXED,
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_OPTIMAL);
case CV_CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR:
return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_FOCUS_DISTANCE_FAR);
+ case CV_CAP_PROP_ANDROID_EXPOSE_LOCK:
+ return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK);
+ case CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK:
+ return (double)m_activity->getProperty(ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK);
default:
CV_Error( CV_StsOutOfRange, "Failed attempt to GET unsupported camera property." );
break;
case CV_CAP_PROP_ANDROID_ANTIBANDING:
m_activity->setProperty(ANDROID_CAMERA_PROPERTY_ANTIBANDING, propValue);
break;
+ case CV_CAP_PROP_ANDROID_EXPOSE_LOCK:
+ m_activity->setProperty(ANDROID_CAMERA_PROPERTY_EXPOSE_LOCK, propValue);
+ break;
+ case CV_CAP_PROP_ANDROID_WHITEBALANCE_LOCK:
+ m_activity->setProperty(ANDROID_CAMERA_PROPERTY_WHITEBALANCE_LOCK, propValue);
+ break;
default:
CV_Error( CV_StsOutOfRange, "Failed attempt to SET unsupported camera property." );
return false;
}
- if (propIdx != CV_CAP_PROP_AUTOGRAB) {// property for highgui class CvCapture_Android only
+ // Only changes in frame size require camera restart
+ if ((propIdx == CV_CAP_PROP_FRAME_WIDTH) || (propIdx == CV_CAP_PROP_FRAME_HEIGHT))
+ { // property for highgui class CvCapture_Android only
m_CameraParamsChanged = true;
}
+
res = true;
}
}
//cleanup
+ CFRelease(cfData);
+ CVPixelBufferRelease(pixelBuffer);
CGImageRelease(cgImage);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
}
public Mat rgba() {
- Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2BGR_NV12, 4);
+ Imgproc.cvtColor(mYuvFrameData, mRgba, Imgproc.COLOR_YUV2RGBA_NV21, 4);
return mRgba;
}
}
deliverAndDrawFrame(mFrame);
-
} while (!mStopThread);
}
}
public static final String OPENCV_VERSION_2_4_8 = "2.4.8";
/**
+ * OpenCV Library version 2.4.9.
+ */
+ public static final String OPENCV_VERSION_2_4_9 = "2.4.9";
+
+ /**
* Loads and initializes OpenCV library from current application package. Roughly, it's an analog of system.loadLibrary("opencv_java").
* @return Returns true is initialization of OpenCV was successful.
*/
m_pFViVarRes = (int*)cvAlloc(sizeof(int)*m_Dim);
m_Sizes = (int*)cvAlloc(sizeof(int)*m_Dim);
- { /* Create init sparce matrix: */
+ { /* Create init sparse matrix: */
int i;
for(i=0;i<m_Dim;++i)m_Sizes[i] = m_BinNum;
m_HistMat.Realloc(m_Dim,m_Sizes,SPARSE);
m_HistVolumeSaved = 0;
- } /* Create init sparce matrix. */
+ } /* Create init sparse matrix. */
} /* AllocData. */
void FreeData()
CvMat *X0,CvMat *observRes,CvMat *resultX,
int maxIter,double epsilon)
{
- /* This is not sparce method */
+ /* This is not sparse method */
/* Make optimization using */
/* func - function to compute */
/* uses function to compute jacobian */
ocl::SURF_OCL
-------------
-.. ocv:class:: ocl::SURF_OCL
+.. ocv:class:: ocl::SURF_OCL : public Feature2D
Class used for extracting Speeded Up Robust Features (SURF) from an image. ::
//! Speeded up robust features, port from GPU module.
////////////////////////////////// SURF //////////////////////////////////////////
- class CV_EXPORTS SURF_OCL
+ class CV_EXPORTS SURF_OCL : public cv::Feature2D
{
public:
enum KeypointLayout
SURF_OCL();
//! the full constructor taking all the necessary parameters
explicit SURF_OCL(double _hessianThreshold, int _nOctaves = 4,
- int _nOctaveLayers = 2, bool _extended = false, float _keypointsRatio = 0.01f, bool _upright = false);
+ int _nOctaveLayers = 2, bool _extended = true, float _keypointsRatio = 0.01f, bool _upright = false);
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
+
+ int descriptorType() const;
+
//! upload host keypoints to device memory
void uploadKeypoints(const vector<cv::KeyPoint> &keypoints, oclMat &keypointsocl);
//! download keypoints from device to host memory
void operator()(const oclMat &img, const oclMat &mask, std::vector<KeyPoint> &keypoints, std::vector<float> &descriptors,
bool useProvidedKeypoints = false);
+ //! finds the keypoints using fast hessian detector used in SURF
+ void operator()(InputArray img, InputArray mask,
+ CV_OUT vector<KeyPoint>& keypoints) const;
+ //! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
+ void operator()(InputArray img, InputArray mask,
+ CV_OUT vector<KeyPoint>& keypoints,
+ OutputArray descriptors,
+ bool useProvidedKeypoints=false) const;
+
+ AlgorithmInfo* info() const;
+
void releaseMemory();
// SURF parameters
oclMat sum, mask1, maskSum, intBuffer;
oclMat det, trace;
oclMat maxPosBuffer;
-
+ protected:
+ void detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const;
+ void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const;
};
}
}
"plain"
};
-CV_PERF_TEST_MAIN_WITH_IMPLS(nonfree, impls, perf::printCudaInfo())
+#ifdef HAVE_OPENCL
+#define DUMP_PROPERTY_XML(propertyName, propertyValue) \
+ do { \
+ std::stringstream ssName, ssValue;\
+ ssName << propertyName;\
+ ssValue << propertyValue; \
+ ::testing::Test::RecordProperty(ssName.str(), ssValue.str()); \
+ } while (false)
+
+#define DUMP_MESSAGE_STDOUT(msg) \
+ do { \
+ std::cout << msg << std::endl; \
+ } while (false)
+
+#include "opencv2/ocl/private/opencl_dumpinfo.hpp"
+#endif
+
+int main(int argc, char **argv)
+{
+ ::perf::TestBase::setPerformanceStrategy(::perf::PERF_STRATEGY_SIMPLE);
+#if defined(HAVE_CUDA) && defined(HAVE_OPENCL)
+ CV_PERF_TEST_MAIN_INTERNALS(nonfree, impls, perf::printCudaInfo(), dumpOpenCLDevice());
+#elif defined(HAVE_CUDA)
+ CV_PERF_TEST_MAIN_INTERNALS(nonfree, impls, perf::printCudaInfo());
+#elif defined(HAVE_OPENCL)
+ CV_PERF_TEST_MAIN_INTERNALS(nonfree, impls, dumpOpenCLDevice());
+#else
+ CV_PERF_TEST_MAIN_INTERNALS(nonfree, impls)
+#endif
+}
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
+#include "cvconfig.h"
+
#include "opencv2/ts/ts.hpp"
#include "opencv2/nonfree/nonfree.hpp"
{
String filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
-
- if (frame.empty())
- FAIL() << "Unable to load source image " << filename;
+ ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
{
String filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
-
- if (frame.empty())
- FAIL() << "Unable to load source image " << filename;
+ ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
{
String filename = getDataPath(GetParam());
Mat frame = imread(filename, IMREAD_GRAYSCALE);
-
- if (frame.empty())
- FAIL() << "Unable to load source image " << filename;
+ ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
Mat mask;
declare.in(frame).time(90);
SANITY_CHECK_NOTHING();
}
+
+
+PERF_TEST_P(OCL_SURF, DISABLED_detect, testing::Values(SURF_IMAGES))
+{
+ String filename = getDataPath(GetParam());
+ Mat frame = imread(filename, IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
+
+ declare.in(frame);
+
+ Mat mask;
+ vector<KeyPoint> points;
+ Ptr<Feature2D> detector;
+
+ if (getSelectedImpl() == "plain")
+ {
+ detector = new SURF;
+ TEST_CYCLE() detector->operator()(frame, mask, points, noArray());
+ }
+ else if (getSelectedImpl() == "ocl")
+ {
+ detector = new ocl::SURF_OCL;
+ OCL_TEST_CYCLE() detector->operator()(frame, mask, points, noArray());
+ }
+ else CV_TEST_FAIL_NO_IMPL();
+
+ SANITY_CHECK_KEYPOINTS(points, 1e-3);
+}
+
+PERF_TEST_P(OCL_SURF, DISABLED_extract, testing::Values(SURF_IMAGES))
+{
+ String filename = getDataPath(GetParam());
+ Mat frame = imread(filename, IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
+
+ declare.in(frame);
+
+ Mat mask;
+ Ptr<Feature2D> detector;
+ vector<KeyPoint> points;
+ vector<float> descriptors;
+
+ if (getSelectedImpl() == "plain")
+ {
+ detector = new SURF;
+ detector->operator()(frame, mask, points, noArray());
+ TEST_CYCLE() detector->operator()(frame, mask, points, descriptors, true);
+ }
+ else if (getSelectedImpl() == "ocl")
+ {
+ detector = new ocl::SURF_OCL;
+ detector->operator()(frame, mask, points, noArray());
+ OCL_TEST_CYCLE() detector->operator()(frame, mask, points, descriptors, true);
+ }
+ else CV_TEST_FAIL_NO_IMPL();
+
+ SANITY_CHECK(descriptors, 1e-4);
+}
+
+PERF_TEST_P(OCL_SURF, DISABLED_full, testing::Values(SURF_IMAGES))
+{
+ String filename = getDataPath(GetParam());
+ Mat frame = imread(filename, IMREAD_GRAYSCALE);
+ ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
+
+ declare.in(frame).time(90);
+
+ Mat mask;
+ Ptr<Feature2D> detector;
+ vector<KeyPoint> points;
+ vector<float> descriptors;
+
+ if (getSelectedImpl() == "plain")
+ {
+ detector = new SURF;
+ TEST_CYCLE() detector->operator()(frame, mask, points, descriptors, false);
+ }
+ else if (getSelectedImpl() == "ocl")
+ {
+ detector = new ocl::SURF_OCL;
+ detector->operator()(frame, mask, points, noArray());
+ OCL_TEST_CYCLE() detector->operator()(frame, mask, points, descriptors, false);
+ }
+ else CV_TEST_FAIL_NO_IMPL();
+
+ SANITY_CHECK_KEYPOINTS(points, 1e-3);
+ SANITY_CHECK(descriptors, 1e-4);
+}
+
#endif // HAVE_OPENCV_OCL
obj.info()->addParam(obj, "edgeThreshold", obj.edgeThreshold);
obj.info()->addParam(obj, "sigma", obj.sigma))
+#ifdef HAVE_OPENCV_OCL
+
+namespace ocl {
+CV_INIT_ALGORITHM(SURF_OCL, "Feature2D.SURF_OCL",
+ obj.info()->addParam(obj, "hessianThreshold", obj.hessianThreshold);
+ obj.info()->addParam(obj, "nOctaves", obj.nOctaves);
+ obj.info()->addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
+ obj.info()->addParam(obj, "extended", obj.extended);
+ obj.info()->addParam(obj, "upright", obj.upright))
+}
+
+#endif
+
+
///////////////////////////////////////////////////////////////////////////////////////////////////////////
bool initModule_nonfree(void)
cv::ocl::SURF_OCL::SURF_OCL()
{
hessianThreshold = 100.0f;
- extended = true;
+ extended = false;
nOctaves = 4;
- nOctaveLayers = 2;
+ nOctaveLayers = 3;
keypointsRatio = 0.01f;
upright = false;
}
return extended ? 128 : 64;
}
+int cv::ocl::SURF_OCL::descriptorType() const
+{
+ return CV_32F;
+}
+
void cv::ocl::SURF_OCL::uploadKeypoints(const vector<KeyPoint> &keypoints, oclMat &keypointsGPU)
{
if (keypoints.empty())
downloadDescriptors(descriptorsGPU, descriptors);
}
+
+void cv::ocl::SURF_OCL::operator()(InputArray img, InputArray mask,
+ CV_OUT vector<KeyPoint>& keypoints) const
+{
+ this->operator()(img, mask, keypoints, noArray(), false);
+}
+
+void cv::ocl::SURF_OCL::operator()(InputArray img, InputArray mask, vector<KeyPoint> &keypoints,
+ OutputArray descriptors, bool useProvidedKeypoints) const
+{
+ oclMat _img, _mask;
+ if(img.kind() == _InputArray::OCL_MAT)
+ _img = *(oclMat*)img.obj;
+ else
+ _img.upload(img.getMat());
+ if(_img.channels() != 1)
+ {
+ oclMat temp;
+ cvtColor(_img, temp, COLOR_BGR2GRAY);
+ _img = temp;
+ }
+
+ if( !mask.empty() )
+ {
+ if(mask.kind() == _InputArray::OCL_MAT)
+ _mask = *(oclMat*)mask.obj;
+ else
+ _mask.upload(mask.getMat());
+ }
+
+ SURF_OCL_Invoker surf((SURF_OCL&)*this, _img, _mask);
+ oclMat keypointsGPU;
+
+ if (!useProvidedKeypoints || !upright)
+ ((SURF_OCL*)this)->uploadKeypoints(keypoints, keypointsGPU);
+
+ if (!useProvidedKeypoints)
+ surf.detectKeypoints(keypointsGPU);
+ else if (!upright)
+ surf.findOrientation(keypointsGPU);
+ if(keypointsGPU.cols*keypointsGPU.rows != 0)
+ ((SURF_OCL*)this)->downloadKeypoints(keypointsGPU, keypoints);
+
+ if( descriptors.needed() )
+ {
+ oclMat descriptorsGPU;
+ surf.computeDescriptors(keypointsGPU, descriptorsGPU, descriptorSize());
+ Size sz = descriptorsGPU.size();
+ if( descriptors.kind() == _InputArray::STD_VECTOR )
+ {
+ CV_Assert(descriptors.type() == CV_32F);
+ std::vector<float>* v = (std::vector<float>*)descriptors.obj;
+ v->resize(sz.width*sz.height);
+ Mat m(sz, CV_32F, &v->at(0));
+ descriptorsGPU.download(m);
+ }
+ else
+ {
+ descriptors.create(sz, CV_32F);
+ Mat m = descriptors.getMat();
+ descriptorsGPU.download(m);
+ }
+ }
+}
+
+void cv::ocl::SURF_OCL::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
+{
+ (*this)(image, mask, keypoints, noArray(), false);
+}
+
+void cv::ocl::SURF_OCL::computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
+{
+ (*this)(image, Mat(), keypoints, descriptors, true);
+}
+
void cv::ocl::SURF_OCL::releaseMemory()
{
sum.release();
const string DESCRIPTOR_DIR = FEATURES2D_DIR + "/descriptor_extractors";
const string IMAGE_FILENAME = "tsukuba.png";
+#if defined(HAVE_OPENCV_OCL) && 0 // unblock this to see SURF_OCL tests failures
+static Ptr<Feature2D> getSURF()
+{
+ ocl::PlatformsInfo p;
+ if(ocl::getOpenCLPlatforms(p) > 0)
+ return new ocl::SURF_OCL;
+ else
+ return new SURF;
+}
+#else
+static Ptr<Feature2D> getSURF()
+{
+ return new SURF;
+}
+#endif
+
/****************************************************************************************\
* Regression tests for feature detectors comparing keypoints. *
\****************************************************************************************/
TEST( Features2d_Detector_SURF, regression )
{
- CV_FeatureDetectorTest test( "detector-surf", FeatureDetector::create("SURF") );
+ CV_FeatureDetectorTest test( "detector-surf", Ptr<FeatureDetector>(getSURF()) );
test.safe_run();
}
TEST( Features2d_DescriptorExtractor_SURF, regression )
{
CV_DescriptorExtractorTest<L2<float> > test( "descriptor-surf", 0.05f,
- DescriptorExtractor::create("SURF") );
+ Ptr<DescriptorExtractor>(getSURF()) );
test.safe_run();
}
const int sz = 100;
const int k = 3;
- Ptr<DescriptorExtractor> ext = DescriptorExtractor::create("SURF");
+ Ptr<DescriptorExtractor> ext = Ptr<DescriptorExtractor>(getSURF());
ASSERT_TRUE(ext != NULL);
- Ptr<FeatureDetector> det = FeatureDetector::create("SURF");
+ Ptr<FeatureDetector> det = Ptr<FeatureDetector>(getSURF());
//"%YAML:1.0\nhessianThreshold: 8000.\noctaves: 3\noctaveLayers: 4\nupright: 0\n"
ASSERT_TRUE(det != NULL);
protected:
void run(int)
{
- Ptr<Feature2D> f = Algorithm::create<Feature2D>("Feature2D." + fname);
+ Ptr<Feature2D> f;
+ if(fname == "SURF")
+ f = getSURF();
+ else
+ f = Algorithm::create<Feature2D>("Feature2D." + fname);
if(f.empty())
return;
string path = string(ts->get_data_path()) + "detectors_descriptors_evaluation/planar/";
const string IMAGE_TSUKUBA = "/features2d/tsukuba.png";
const string IMAGE_BIKES = "/detectors_descriptors_evaluation/images_datasets/bikes/img1.png";
+#if defined(HAVE_OPENCV_OCL) && 0 // unblock this to see SURF_OCL tests failures
+#define SURF_NAME "Feature2D.SURF_OCL"
+#else
+#define SURF_NAME "Feature2D.SURF"
+#endif
+
#define SHOW_DEBUG_LOG 0
static
*/
TEST(Features2d_RotationInvariance_Detector_SURF, regression)
{
- DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
+ DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>(SURF_NAME),
0.44f,
0.76f);
test.safe_run();
*/
TEST(Features2d_RotationInvariance_Descriptor_SURF, regression)
{
- DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
- Algorithm::create<DescriptorExtractor>("Feature2D.SURF"),
+ DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>(SURF_NAME),
+ Algorithm::create<DescriptorExtractor>(SURF_NAME),
NORM_L1,
0.83f);
test.safe_run();
*/
TEST(Features2d_ScaleInvariance_Detector_SURF, regression)
{
- DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
+ DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>(SURF_NAME),
0.64f,
0.84f);
test.safe_run();
*/
TEST(Features2d_ScaleInvariance_Descriptor_SURF, regression)
{
- DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
- Algorithm::create<DescriptorExtractor>("Feature2D.SURF"),
+ DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>(SURF_NAME),
+ Algorithm::create<DescriptorExtractor>(SURF_NAME),
NORM_L1,
0.61f);
test.safe_run();
-----------------
Computes an integral image.
-.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum, oclMat &sqsum, int sdepth=-1)
+.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum, oclMat &sqsum)
-.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum, int sdepth=-1)
+.. ocv:function:: void ocl::integral(const oclMat &src, oclMat &sum)
:param src: Source image. Only ``CV_8UC1`` images are supported for now.
- :param sum: Integral image containing 32-bit unsigned integer or 32-bit floating-point .
+ :param sum: Integral image containing 32-bit unsigned integer values packed into ``CV_32SC1`` .
- :param sqsum: Sqsum values is ``CV_32FC1`` or ``CV_64FC1`` type.
+ :param sqsum: Sqsum values is ``CV_32FC1`` type.
.. seealso:: :ocv:func:`integral`
CV_EXPORTS void warpPerspective(const oclMat &src, oclMat &dst, const Mat &M, Size dsize, int flags = INTER_LINEAR);
//! computes the integral image and integral for the squared image
- // sum will support CV_32S, CV_32F, sqsum - support CV32F, CV_64F
+ // sum will have CV_32S type, sqsum - CV32F type
// supports only CV_8UC1 source type
- CV_EXPORTS void integral(const oclMat &src, oclMat &sum, oclMat &sqsum, int sdepth=-1 );
- CV_EXPORTS void integral(const oclMat &src, oclMat &sum, int sdepth=-1 );
+ CV_EXPORTS void integral(const oclMat &src, oclMat &sum, oclMat &sqsum);
+ CV_EXPORTS void integral(const oclMat &src, oclMat &sum);
CV_EXPORTS void cornerHarris(const oclMat &src, oclMat &dst, int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
CV_EXPORTS void cornerHarris_dxdy(const oclMat &src, oclMat &dst, oclMat &Dx, oclMat &Dy,
int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
Size m_maxSize;
vector<CvSize> sizev;
vector<float> scalev;
- oclMat gimg1, gsum, gsqsum, gsqsum_t;
+ oclMat gimg1, gsum, gsqsum;
void * buffers;
};
typedef std::tr1::tuple<std::string, std::string, int> Cascade_Image_MinSize_t;
typedef perf::TestBaseWithParam<Cascade_Image_MinSize_t> Cascade_Image_MinSize;
-OCL_PERF_TEST_P(Cascade_Image_MinSize, DISABLED_CascadeClassifier,
+OCL_PERF_TEST_P(Cascade_Image_MinSize, CascadeClassifier,
testing::Combine(testing::Values( string("cv/cascadeandhog/cascades/haarcascade_frontalface_alt.xml"),
string("cv/cascadeandhog/cascades/haarcascade_frontalface_alt2.xml") ),
- testing::Values(string("cv/shared/lena.png"),
- string("cv/cascadeandhog/images/bttf301.png")/*,
- string("cv/cascadeandhog/images/class57.png")*/ ),
+ testing::Values( string("cv/shared/lena.png"),
+ string("cv/cascadeandhog/images/bttf301.png"),
+ string("cv/cascadeandhog/images/class57.png") ),
testing::Values(30, 64, 90)))
{
const string cascasePath = get<0>(GetParam());
faces.clear();
startTimer();
- cc.detectMultiScale(img, faces, 1.1, 3, 0, minSize);
+ cc.detectMultiScale(img, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, minSize);
stopTimer();
}
}
ocl::finish();
startTimer();
- cc.detectMultiScale(uimg, faces, 1.1, 3, 0, minSize);
+ cc.detectMultiScale(uimg, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, minSize);
stopTimer();
}
}
typedef tuple<Size, MatDepth> IntegralParams;
typedef TestBaseWithParam<IntegralParams> IntegralFixture;
-OCL_PERF_TEST_P(IntegralFixture, Integral1, ::testing::Combine(OCL_TEST_SIZES, OCL_PERF_ENUM(CV_32S, CV_32F)))
+OCL_PERF_TEST_P(IntegralFixture, DISABLED_Integral1, ::testing::Combine(OCL_TEST_SIZES, OCL_PERF_ENUM(CV_32S, CV_32F)))
{
const IntegralParams params = GetParam();
const Size srcSize = get<0>(params);
{
ocl::oclMat oclSrc(src), oclDst;
- OCL_TEST_CYCLE() cv::ocl::integral(oclSrc, oclDst, sdepth);
+// OCL_TEST_CYCLE() cv::ocl::integral(oclSrc, oclDst, sdepth);
oclDst.download(dst);
oclDst.download(dst);
- SANITY_CHECK(dst, 3e-2);
+ SANITY_CHECK(dst, 2e-2);
}
else if (RUN_PLAIN_IMPL)
{
TEST_CYCLE() cv::matchTemplate(src, templ, dst, CV_TM_CCORR_NORMED);
- SANITY_CHECK(dst, 3e-2);
+ SANITY_CHECK(dst, 2e-2);
}
else
OCL_PERF_ELSE
oclMat gsum(totalheight + 4, gimg.cols + 1, CV_32SC1);
oclMat gsqsum(totalheight + 4, gimg.cols + 1, CV_32FC1);
- int sdepth = 0;
- if(Context::getContext()->supportsFeature(FEATURE_CL_DOUBLE))
- sdepth = CV_64FC1;
- else
- sdepth = CV_32FC1;
- sdepth = CV_MAT_DEPTH(sdepth);
- int type = CV_MAKE_TYPE(sdepth, 1);
- oclMat gsqsum_t(totalheight + 4, gimg.cols + 1, type);
-
cl_mem stagebuffer;
cl_mem nodebuffer;
cl_mem candidatebuffer;
cv::Rect roi, roi2;
cv::Mat imgroi, imgroisq;
cv::ocl::oclMat resizeroi, gimgroi, gimgroisq;
-
int grp_per_CU = 12;
size_t blocksize = 8;
roi2 = Rect(0, 0, sz.width - 1, sz.height - 1);
resizeroi = gimg1(roi2);
gimgroi = gsum(roi);
- gimgroisq = gsqsum_t(roi);
+ gimgroisq = gsqsum(roi);
int width = gimgroi.cols - 1 - cascade->orig_window_size.width;
int height = gimgroi.rows - 1 - cascade->orig_window_size.height;
scaleinfo[i].width_height = (width << 16) | height;
scaleinfo[i].factor = factor;
cv::ocl::resize(gimg, resizeroi, Size(sz.width - 1, sz.height - 1), 0, 0, INTER_LINEAR);
cv::ocl::integral(resizeroi, gimgroi, gimgroisq);
-
indexy += sz.height;
}
- if(gsqsum_t.depth() == CV_64F)
- gsqsum_t.convertTo(gsqsum, CV_32FC1);
- else
- gsqsum = gsqsum_t;
gcascade = (GpuHidHaarClassifierCascade *)cascade->hid_cascade;
stage = (GpuHidHaarStageClassifier *)(gcascade + 1);
int n_factors = 0;
oclMat gsum;
oclMat gsqsum;
- oclMat gsqsum_t;
- cv::ocl::integral(gimg, gsum, gsqsum_t);
- if(gsqsum_t.depth() == CV_64F)
- gsqsum_t.convertTo(gsqsum, CV_32FC1);
- else
- gsqsum = gsqsum_t;
+ cv::ocl::integral(gimg, gsum, gsqsum);
CvSize sz;
vector<CvSize> sizev;
vector<float> scalev;
roi2 = Rect(0, 0, sz.width - 1, sz.height - 1);
resizeroi = gimg1(roi2);
gimgroi = gsum(roi);
- gimgroisq = gsqsum_t(roi);
+ gimgroisq = gsqsum(roi);
cv::ocl::resize(gimg, resizeroi, Size(sz.width - 1, sz.height - 1), 0, 0, INTER_LINEAR);
cv::ocl::integral(resizeroi, gimgroi, gimgroisq);
indexy += sz.height;
}
- if(gsqsum_t.depth() == CV_64F)
- gsqsum_t.convertTo(gsqsum, CV_32FC1);
- else
- gsqsum = gsqsum_t;
gcascade = (GpuHidHaarClassifierCascade *)(cascade->hid_cascade);
stage = (GpuHidHaarStageClassifier *)(gcascade + 1);
}
else
{
- cv::ocl::integral(gimg, gsum, gsqsum_t);
- if(gsqsum_t.depth() == CV_64F)
- gsqsum_t.convertTo(gsqsum, CV_32FC1);
- else
- gsqsum = gsqsum_t;
+ cv::ocl::integral(gimg, gsum, gsqsum);
gcascade = (GpuHidHaarClassifierCascade *)cascade->hid_cascade;
gimg1.release();
gsum.release();
gsqsum.release();
- gsqsum_t.release();
}
else if (!(m_flags & CV_HAAR_SCALE_IMAGE) && (flags & CV_HAAR_SCALE_IMAGE))
{
gsum.create(totalheight + 4, cols + 1, CV_32SC1);
gsqsum.create(totalheight + 4, cols + 1, CV_32FC1);
- int sdepth = 0;
- if(Context::getContext()->supportsFeature(FEATURE_CL_DOUBLE))
- sdepth = CV_64FC1;
- else
- sdepth = CV_32FC1;
- sdepth = CV_MAT_DEPTH(sdepth);
- int type = CV_MAKE_TYPE(sdepth, 1);
-
- gsqsum_t.create(totalheight + 4, cols + 1, type);
-
scaleinfo = (detect_piramid_info *)malloc(sizeof(detect_piramid_info) * loopcount);
for( int i = 0; i < loopcount; i++ )
{
////////////////////////////////////////////////////////////////////////
// integral
- void integral(const oclMat &src, oclMat &sum, oclMat &sqsum, int sdepth)
+ void integral(const oclMat &src, oclMat &sum, oclMat &sqsum)
{
CV_Assert(src.type() == CV_8UC1);
if (!src.clCxt->supportsFeature(ocl::FEATURE_CL_DOUBLE) && src.depth() == CV_64F)
return;
}
- if( sdepth <= 0 )
- sdepth = CV_32S;
- sdepth = CV_MAT_DEPTH(sdepth);
- int type = CV_MAKE_TYPE(sdepth, 1);
-
int vlen = 4;
int offset = src.offset / vlen;
int pre_invalid = src.offset % vlen;
oclMat t_sum , t_sqsum;
int w = src.cols + 1, h = src.rows + 1;
-
- char build_option[250];
- if(Context::getContext()->supportsFeature(ocl::FEATURE_CL_DOUBLE))
- {
- t_sqsum.create(src.cols, src.rows, CV_64FC1);
- sqsum.create(h, w, CV_64FC1);
- sprintf(build_option, "-D TYPE=double -D TYPE4=double4 -D convert_TYPE4=convert_double4");
- }
- else
- {
- t_sqsum.create(src.cols, src.rows, CV_32FC1);
- sqsum.create(h, w, CV_32FC1);
- sprintf(build_option, "-D TYPE=float -D TYPE4=float4 -D convert_TYPE4=convert_float4");
- }
+ int depth = src.depth() == CV_8U ? CV_32S : CV_64F;
+ int type = CV_MAKE_TYPE(depth, 1);
t_sum.create(src.cols, src.rows, type);
sum.create(h, w, type);
- int sum_offset = sum.offset / sum.elemSize();
- int sqsum_offset = sqsum.offset / sqsum.elemSize();
+ t_sqsum.create(src.cols, src.rows, CV_32FC1);
+ sqsum.create(h, w, CV_32FC1);
+
+ int sum_offset = sum.offset / vlen;
+ int sqsum_offset = sqsum.offset / vlen;
vector<pair<size_t , const void *> > args;
args.push_back( make_pair( sizeof(cl_mem) , (void *)&src.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.cols ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.step ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.step));
- args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sqsum.step));
size_t gt[3] = {((vcols + 1) / 2) * 256, 1, 1}, lt[3] = {256, 1, 1};
- openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_cols", gt, lt, args, -1, sdepth, build_option);
+ openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_cols", gt, lt, args, -1, depth);
args.clear();
args.push_back( make_pair( sizeof(cl_mem) , (void *)&t_sum.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.rows ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.cols ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.step ));
- args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sqsum.step));
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum.step));
args.push_back( make_pair( sizeof(cl_int) , (void *)&sqsum.step));
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum_offset));
args.push_back( make_pair( sizeof(cl_int) , (void *)&sqsum_offset));
size_t gt2[3] = {t_sum.cols * 32, 1, 1}, lt2[3] = {256, 1, 1};
- openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_rows", gt2, lt2, args, -1, sdepth, build_option);
+ openCLExecuteKernel(src.clCxt, &imgproc_integral, "integral_rows", gt2, lt2, args, -1, depth);
}
- void integral(const oclMat &src, oclMat &sum, int sdepth)
+ void integral(const oclMat &src, oclMat &sum)
{
CV_Assert(src.type() == CV_8UC1);
int vlen = 4;
int pre_invalid = src.offset % vlen;
int vcols = (pre_invalid + src.cols + vlen - 1) / vlen;
- if( sdepth <= 0 )
- sdepth = CV_32S;
- sdepth = CV_MAT_DEPTH(sdepth);
- int type = CV_MAKE_TYPE(sdepth, 1);
-
oclMat t_sum;
int w = src.cols + 1, h = src.rows + 1;
+ int depth = src.depth() == CV_8U ? CV_32S : CV_32F;
+ int type = CV_MAKE_TYPE(depth, 1);
t_sum.create(src.cols, src.rows, type);
sum.create(h, w, type);
args.push_back( make_pair( sizeof(cl_int) , (void *)&src.step ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&t_sum.step));
size_t gt[3] = {((vcols + 1) / 2) * 256, 1, 1}, lt[3] = {256, 1, 1};
- openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_cols", gt, lt, args, -1, sdepth);
+ openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_cols", gt, lt, args, -1, depth);
args.clear();
args.push_back( make_pair( sizeof(cl_mem) , (void *)&t_sum.data ));
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum.step));
args.push_back( make_pair( sizeof(cl_int) , (void *)&sum_offset));
size_t gt2[3] = {t_sum.cols * 32, 1, 1}, lt2[3] = {256, 1, 1};
- openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_rows", gt2, lt2, args, -1, sdepth);
+ openCLExecuteKernel(src.clCxt, &imgproc_integral_sum, "integral_sum_rows", gt2, lt2, args, -1, depth);
}
/////////////////////// corner //////////////////////////////
void matchTemplate_CCORR_NORMED(
const oclMat &image, const oclMat &templ, oclMat &result, MatchTemplateBuf &buf)
{
- cv::ocl::oclMat temp;
matchTemplate_CCORR(image, templ, result, buf);
buf.image_sums.resize(1);
buf.image_sqsums.resize(1);
- integral(image.reshape(1), buf.image_sums[0], temp);
- if(temp.depth() == CV_64F)
- temp.convertTo(buf.image_sqsums[0], CV_32FC1);
- else
- buf.image_sqsums[0] = temp;
+
+ integral(image.reshape(1), buf.image_sums[0], buf.image_sqsums[0]);
+
unsigned long long templ_sqsum = (unsigned long long)sqrSum(templ.reshape(1))[0];
Context *clCxt = image.clCxt;
{
buf.image_sums.resize(1);
buf.image_sqsums.resize(1);
- cv::ocl::oclMat temp;
- integral(image, buf.image_sums[0], temp);
- if(temp.depth() == CV_64F)
- temp.convertTo(buf.image_sqsums[0], CV_32FC1);
- else
- buf.image_sqsums[0] = temp;
+ integral(image, buf.image_sums[0], buf.image_sqsums[0]);
templ_sum[0] = (float)sum(templ)[0];
templ_sum *= scale;
buf.image_sums.resize(buf.images.size());
buf.image_sqsums.resize(buf.images.size());
- cv::ocl::oclMat temp;
+
for(int i = 0; i < image.oclchannels(); i ++)
{
- integral(buf.images[i], buf.image_sums[i], temp);
- if(temp.depth() == CV_64F)
- temp.convertTo(buf.image_sqsums[i], CV_32FC1);
- else
- buf.image_sqsums[i] = temp;
+ integral(buf.images[i], buf.image_sums[i], buf.image_sqsums[i]);
}
switch(image.oclchannels())
GpuHidHaarTreeNode;
-//typedef struct __attribute__((aligned (32))) GpuHidHaarClassifier
-//{
-// int count __attribute__((aligned (4)));
-// GpuHidHaarTreeNode* node __attribute__((aligned (8)));
-// float* alpha __attribute__((aligned (8)));
-//}
-//GpuHidHaarClassifier;
+typedef struct __attribute__((aligned (32))) GpuHidHaarClassifier
+{
+ int count __attribute__((aligned (4)));
+ GpuHidHaarTreeNode* node __attribute__((aligned (8)));
+ float* alpha __attribute__((aligned (8)));
+}
+GpuHidHaarClassifier;
typedef struct __attribute__((aligned (64))) GpuHidHaarStageClassifier
GpuHidHaarStageClassifier;
-//typedef struct __attribute__((aligned (64))) GpuHidHaarClassifierCascade
-//{
-// int count __attribute__((aligned (4)));
-// int is_stump_based __attribute__((aligned (4)));
-// int has_tilted_features __attribute__((aligned (4)));
-// int is_tree __attribute__((aligned (4)));
-// int pq0 __attribute__((aligned (4)));
-// int pq1 __attribute__((aligned (4)));
-// int pq2 __attribute__((aligned (4)));
-// int pq3 __attribute__((aligned (4)));
-// int p0 __attribute__((aligned (4)));
-// int p1 __attribute__((aligned (4)));
-// int p2 __attribute__((aligned (4)));
-// int p3 __attribute__((aligned (4)));
-// float inv_window_area __attribute__((aligned (4)));
-//} GpuHidHaarClassifierCascade;
+typedef struct __attribute__((aligned (64))) GpuHidHaarClassifierCascade
+{
+ int count __attribute__((aligned (4)));
+ int is_stump_based __attribute__((aligned (4)));
+ int has_tilted_features __attribute__((aligned (4)));
+ int is_tree __attribute__((aligned (4)));
+ int pq0 __attribute__((aligned (4)));
+ int pq1 __attribute__((aligned (4)));
+ int pq2 __attribute__((aligned (4)));
+ int pq3 __attribute__((aligned (4)));
+ int p0 __attribute__((aligned (4)));
+ int p1 __attribute__((aligned (4)));
+ int p2 __attribute__((aligned (4)));
+ int p3 __attribute__((aligned (4)));
+ float inv_window_area __attribute__((aligned (4)));
+} GpuHidHaarClassifierCascade;
#ifdef PACKED_CLASSIFIER
for(int stageloop = start_stage; (stageloop < end_stage) && result; stageloop++ )
{// iterate until candidate is valid
float stage_sum = 0.0f;
- __global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
- ((__global uchar*)stagecascadeptr+stageloop*sizeof(GpuHidHaarStageClassifier));
- int lcl_off = (yl*DATA_SIZE_X)+(xl);
- int stagecount = stageinfo->count;
- float stagethreshold = stageinfo->threshold;
- for(int nodeloop = 0; nodeloop < stagecount; nodecounter++,nodeloop++ )
+ int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
+ float stagethreshold = as_float(stageinfo.y);
+ int lcl_off = (lid_y*DATA_SIZE_X)+(lid_x);
+ for(int nodeloop = 0; nodeloop < stageinfo.x; nodecounter++,nodeloop++ )
{
// simple macro to extract shorts from int
#define M0(_t) ((_t)&0xFFFF)
variance_norm_factor = variance_norm_factor * correction - mean * mean;
variance_norm_factor = variance_norm_factor >=0.f ? sqrt(variance_norm_factor) : 1.f;
- for(int stageloop = start_stage; (stageloop < split_stage) && result; stageloop++ )
+ for(int stageloop = start_stage; (stageloop < split_stage) && result; stageloop++ )
{
float stage_sum = 0.f;
- __global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
- ((__global uchar*)stagecascadeptr+stageloop*sizeof(GpuHidHaarStageClassifier));
- int stagecount = stageinfo->count;
- float stagethreshold = stageinfo->threshold;
- for(int nodeloop = 0; nodeloop < stagecount; )
+ int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
+ float stagethreshold = as_float(stageinfo.y);
+ for(int nodeloop = 0; nodeloop < stageinfo.x; )
{
- __global GpuHidHaarTreeNode* currentnodeptr = (__global GpuHidHaarTreeNode*)
- (((__global uchar*)nodeptr) + nodecounter * sizeof(GpuHidHaarTreeNode));
+ __global GpuHidHaarTreeNode* currentnodeptr = (nodeptr + nodecounter);
int4 info1 = *(__global int4*)(&(currentnodeptr->p[0][0]));
int4 info2 = *(__global int4*)(&(currentnodeptr->p[1][0]));
#endif
}
- result = (stage_sum >= stagethreshold) ? 1 : 0;
+ result = (stage_sum >= stagethreshold);
}
if(factor < 2)
{
lclcount[0]=0;
barrier(CLK_LOCAL_MEM_FENCE);
- //int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
- __global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
- ((__global uchar*)stagecascadeptr+stageloop*sizeof(GpuHidHaarStageClassifier));
- int stagecount = stageinfo->count;
- float stagethreshold = stageinfo->threshold;
+ int2 stageinfo = *(global int2*)(stagecascadeptr+stageloop);
+ float stagethreshold = as_float(stageinfo.y);
int perfscale = queuecount > 4 ? 3 : 2;
int queuecount_loop = (queuecount + (1<<perfscale)-1) >> perfscale;
int lcl_compute_win = lcl_sz >> perfscale;
int lcl_compute_win_id = (lcl_id >>(6-perfscale));
- int lcl_loops = (stagecount + lcl_compute_win -1) >> (6-perfscale);
+ int lcl_loops = (stageinfo.x + lcl_compute_win -1) >> (6-perfscale);
int lcl_compute_id = lcl_id - (lcl_compute_win_id << (6-perfscale));
for(int queueloop=0; queueloop<queuecount_loop; queueloop++)
{
float part_sum = 0.f;
const int stump_factor = STUMP_BASED ? 1 : 2;
int root_offset = 0;
- for(int lcl_loop=0; lcl_loop<lcl_loops && tempnodecounter<stagecount;)
+ for(int lcl_loop=0; lcl_loop<lcl_loops && tempnodecounter<stageinfo.x;)
{
- __global GpuHidHaarTreeNode* currentnodeptr = (__global GpuHidHaarTreeNode*)
- (((__global uchar*)nodeptr) + sizeof(GpuHidHaarTreeNode) * ((nodecounter + tempnodecounter) * stump_factor + root_offset));
+ __global GpuHidHaarTreeNode* currentnodeptr =
+ nodeptr + (nodecounter + tempnodecounter) * stump_factor + root_offset;
int4 info1 = *(__global int4*)(&(currentnodeptr->p[0][0]));
int4 info2 = *(__global int4*)(&(currentnodeptr->p[1][0]));
queuecount = lclcount[0];
barrier(CLK_LOCAL_MEM_FENCE);
- nodecounter += stagecount;
+ nodecounter += stageinfo.x;
}//end for(int stageloop = splitstage; stageloop< endstage && queuecount>0;stageloop++)
if(lcl_id<queuecount)
int right __attribute__((aligned(4)));
}
GpuHidHaarTreeNode;
-//typedef struct __attribute__((aligned(32))) GpuHidHaarClassifier
-//{
-// int count __attribute__((aligned(4)));
-// GpuHidHaarTreeNode *node __attribute__((aligned(8)));
-// float *alpha __attribute__((aligned(8)));
-//}
-//GpuHidHaarClassifier;
+typedef struct __attribute__((aligned(32))) GpuHidHaarClassifier
+{
+ int count __attribute__((aligned(4)));
+ GpuHidHaarTreeNode *node __attribute__((aligned(8)));
+ float *alpha __attribute__((aligned(8)));
+}
+GpuHidHaarClassifier;
typedef struct __attribute__((aligned(64))) GpuHidHaarStageClassifier
{
int count __attribute__((aligned(4)));
int reserved3 __attribute__((aligned(8)));
}
GpuHidHaarStageClassifier;
-//typedef struct __attribute__((aligned(64))) GpuHidHaarClassifierCascade
-//{
-// int count __attribute__((aligned(4)));
-// int is_stump_based __attribute__((aligned(4)));
-// int has_tilted_features __attribute__((aligned(4)));
-// int is_tree __attribute__((aligned(4)));
-// int pq0 __attribute__((aligned(4)));
-// int pq1 __attribute__((aligned(4)));
-// int pq2 __attribute__((aligned(4)));
-// int pq3 __attribute__((aligned(4)));
-// int p0 __attribute__((aligned(4)));
-// int p1 __attribute__((aligned(4)));
-// int p2 __attribute__((aligned(4)));
-// int p3 __attribute__((aligned(4)));
-// float inv_window_area __attribute__((aligned(4)));
-//} GpuHidHaarClassifierCascade;
+typedef struct __attribute__((aligned(64))) GpuHidHaarClassifierCascade
+{
+ int count __attribute__((aligned(4)));
+ int is_stump_based __attribute__((aligned(4)));
+ int has_tilted_features __attribute__((aligned(4)));
+ int is_tree __attribute__((aligned(4)));
+ int pq0 __attribute__((aligned(4)));
+ int pq1 __attribute__((aligned(4)));
+ int pq2 __attribute__((aligned(4)));
+ int pq3 __attribute__((aligned(4)));
+ int p0 __attribute__((aligned(4)));
+ int p1 __attribute__((aligned(4)));
+ int p2 __attribute__((aligned(4)));
+ int p3 __attribute__((aligned(4)));
+ float inv_window_area __attribute__((aligned(4)));
+} GpuHidHaarClassifierCascade;
__kernel void gpuRunHaarClassifierCascade_scaled2(
- global GpuHidHaarStageClassifier *stagecascadeptr_,
+ global GpuHidHaarStageClassifier *stagecascadeptr,
global int4 *info,
- global GpuHidHaarTreeNode *nodeptr_,
+ global GpuHidHaarTreeNode *nodeptr,
global const int *restrict sum,
- global const float *restrict sqsum,
+ global const float *restrict sqsum,
global int4 *candidate,
const int rows,
const int cols,
int max_idx = rows * cols - 1;
for (int scalei = 0; scalei < loopcount; scalei++)
{
- int4 scaleinfo1 = info[scalei];
+ int4 scaleinfo1;
+ scaleinfo1 = info[scalei];
int grpnumperline = (scaleinfo1.y & 0xffff0000) >> 16;
int totalgrp = scaleinfo1.y & 0xffff;
float factor = as_float(scaleinfo1.w);
for (int stageloop = start_stage; (stageloop < end_stage) && result; stageloop++)
{
float stage_sum = 0.f;
- __global GpuHidHaarStageClassifier* stageinfo = (__global GpuHidHaarStageClassifier*)
- (((__global uchar*)stagecascadeptr_)+stageloop*sizeof(GpuHidHaarStageClassifier));
- int stagecount = stageinfo->count;
+ int stagecount = stagecascadeptr[stageloop].count;
for (int nodeloop = 0; nodeloop < stagecount;)
{
- __global GpuHidHaarTreeNode* currentnodeptr = (__global GpuHidHaarTreeNode*)
- (((__global uchar*)nodeptr_) + nodecounter * sizeof(GpuHidHaarTreeNode));
+ __global GpuHidHaarTreeNode *currentnodeptr = (nodeptr + nodecounter);
int4 info1 = *(__global int4 *)(&(currentnodeptr->p[0][0]));
int4 info2 = *(__global int4 *)(&(currentnodeptr->p[1][0]));
int4 info3 = *(__global int4 *)(&(currentnodeptr->p[2][0]));
float4 w = *(__global float4 *)(&(currentnodeptr->weight[0]));
- float3 alpha3 = *(__global float3*)(&(currentnodeptr->alpha[0]));
+ float3 alpha3 = *(__global float3 *)(&(currentnodeptr->alpha[0]));
float nodethreshold = w.w * variance_norm_factor;
info1.x += p_offset;
sum[clamp(mad24(info3.w, step, info3.x), 0, max_idx)]
+ sum[clamp(mad24(info3.w, step, info3.z), 0, max_idx)]) * w.z;
- bool passThres = (classsum >= nodethreshold) ? 1 : 0;
+ bool passThres = classsum >= nodethreshold;
#if STUMP_BASED
stage_sum += passThres ? alpha3.y : alpha3.x;
}
#endif
}
-
- result = (stage_sum >= stageinfo->threshold) ? 1 : 0;
+ result = (int)(stage_sum >= stagecascadeptr[stageloop].threshold);
}
barrier(CLK_LOCAL_MEM_FENCE);
}
}
}
-__kernel void gpuscaleclassifier(global GpuHidHaarTreeNode *orinode, global GpuHidHaarTreeNode *newnode, float scale, float weight_scale, const int nodenum)
+__kernel void gpuscaleclassifier(global GpuHidHaarTreeNode *orinode, global GpuHidHaarTreeNode *newnode, float scale, float weight_scale, int nodenum)
{
- const int counter = get_global_id(0);
+ int counter = get_global_id(0);
int tr_x[3], tr_y[3], tr_h[3], tr_w[3], i = 0;
- GpuHidHaarTreeNode t1 = *(__global GpuHidHaarTreeNode*)
- (((__global uchar*)orinode) + counter * sizeof(GpuHidHaarTreeNode));
- __global GpuHidHaarTreeNode* pNew = (__global GpuHidHaarTreeNode*)
- (((__global uchar*)newnode) + (counter + nodenum) * sizeof(GpuHidHaarTreeNode));
+ GpuHidHaarTreeNode t1 = *(orinode + counter);
#pragma unroll
for (i = 0; i < 3; i++)
}
t1.weight[0] = -(t1.weight[1] * tr_h[1] * tr_w[1] + t1.weight[2] * tr_h[2] * tr_w[2]) / (tr_h[0] * tr_w[0]);
+ counter += nodenum;
#pragma unroll
for (i = 0; i < 3; i++)
{
- pNew->p[i][0] = tr_x[i];
- pNew->p[i][1] = tr_y[i];
- pNew->p[i][2] = tr_x[i] + tr_w[i];
- pNew->p[i][3] = tr_y[i] + tr_h[i];
- pNew->weight[i] = t1.weight[i] * weight_scale;
+ newnode[counter].p[i][0] = tr_x[i];
+ newnode[counter].p[i][1] = tr_y[i];
+ newnode[counter].p[i][2] = tr_x[i] + tr_w[i];
+ newnode[counter].p[i][3] = tr_y[i] + tr_h[i];
+ newnode[counter].weight[i] = t1.weight[i] * weight_scale;
}
- pNew->left = t1.left;
- pNew->right = t1.right;
- pNew->threshold = t1.threshold;
- pNew->alpha[0] = t1.alpha[0];
- pNew->alpha[1] = t1.alpha[1];
- pNew->alpha[2] = t1.alpha[2];
+ newnode[counter].left = t1.left;
+ newnode[counter].right = t1.right;
+ newnode[counter].threshold = t1.threshold;
+ newnode[counter].alpha[0] = t1.alpha[0];
+ newnode[counter].alpha[1] = t1.alpha[1];
+ newnode[counter].alpha[2] = t1.alpha[2];
}
#elif defined (cl_khr_fp64)
#pragma OPENCL EXTENSION cl_khr_fp64:enable
#endif
-#define CONVERT(step) ((step)>>1)
-#else
-#define CONVERT(step) ((step))
#endif
#define LSIZE 256
#define GET_CONFLICT_OFFSET(lid) ((lid) >> LOG_NUM_BANKS)
-kernel void integral_cols_D4(__global uchar4 *src,__global int *sum ,__global TYPE *sqsum,
- int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step,int dst1_step)
+kernel void integral_cols_D4(__global uchar4 *src,__global int *sum ,__global float *sqsum,
+ int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step)
{
int lid = get_local_id(0);
int gid = get_group_id(0);
int4 src_t[2], sum_t[2];
- TYPE4 sqsum_t[2];
+ float4 sqsum_t[2];
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
- __local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
+ __local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
__local int* sum_p;
- __local TYPE* sqsum_p;
+ __local float* sqsum_p;
src_step = src_step >> 2;
gid = gid << 1;
for(int i = 0; i < rows; i =i + LSIZE_1)
src_t[1] = (i + lid < rows ? convert_int4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : 0);
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
- sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
+ sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
- sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
+ sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
barrier(CLK_LOCAL_MEM_FENCE);
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
lm_sum[0][bf_loc] = src_t[0];
- lm_sqsum[0][bf_loc] = convert_TYPE4(src_t[0] * src_t[0]);
+ lm_sqsum[0][bf_loc] = convert_float4(src_t[0] * src_t[0]);
lm_sum[1][bf_loc] = src_t[1];
- lm_sqsum[1][bf_loc] = convert_TYPE4(src_t[1] * src_t[1]);
+ lm_sqsum[1][bf_loc] = convert_float4(src_t[1] * src_t[1]);
int offset = 1;
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
}
}
barrier(CLK_LOCAL_MEM_FENCE);
- int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step /4, loc_s1 = loc_s0 + dst_step ;
- int loc_sq0 = gid * CONVERT(dst1_step) + i + lid - 1 - pre_invalid * dst1_step / sizeof(TYPE),loc_sq1 = loc_sq0 + CONVERT(dst1_step);
+ int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step / 4, loc_s1 = loc_s0 + dst_step ;
if(lid > 0 && (i+lid) <= rows)
{
lm_sum[0][bf_loc] += sum_t[0];
lm_sqsum[0][bf_loc] += sqsum_t[0];
lm_sqsum[1][bf_loc] += sqsum_t[1];
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
- sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
+ sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
for(int k = 0; k < 4; k++)
{
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
- sqsum[loc_sq0 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
+ sqsum[loc_s0 + k * dst_step / 4] = sqsum_p[k];
}
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
- sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
+ sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
for(int k = 0; k < 4; k++)
{
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
- sqsum[loc_sq1 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
+ sqsum[loc_s1 + k * dst_step / 4] = sqsum_p[k];
}
}
barrier(CLK_LOCAL_MEM_FENCE);
}
-kernel void integral_rows_D4(__global int4 *srcsum,__global TYPE4 * srcsqsum,__global int *sum ,
- __global TYPE *sqsum,int rows,int cols,int src_step,int src1_step,int sum_step,
+kernel void integral_rows_D4(__global int4 *srcsum,__global float4 * srcsqsum,__global int *sum ,
+ __global float *sqsum,int rows,int cols,int src_step,int sum_step,
int sqsum_step,int sum_offset,int sqsum_offset)
{
int lid = get_local_id(0);
int gid = get_group_id(0);
int4 src_t[2], sum_t[2];
- TYPE4 sqsrc_t[2],sqsum_t[2];
+ float4 sqsrc_t[2],sqsum_t[2];
__local int4 lm_sum[2][LSIZE + LOG_LSIZE];
- __local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
+ __local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
__local int *sum_p;
- __local TYPE *sqsum_p;
+ __local float *sqsum_p;
src_step = src_step >> 4;
- src1_step = (src1_step / sizeof(TYPE)) >> 2 ;
- gid <<= 1;
for(int i = 0; i < rows; i =i + LSIZE_1)
{
- src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid ] : (int4)0;
- sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid ] : (TYPE4)0;
- src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid + 1] : (int4)0;
- sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid + 1] : (TYPE4)0;
+ src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (int4)0;
+ sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2] : (float4)0;
+ src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (int4)0;
+ sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
sum_t[0] = (i == 0 ? 0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
- sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
+ sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
sum_t[1] = (i == 0 ? 0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
- sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
+ sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
barrier(CLK_LOCAL_MEM_FENCE);
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
}
if(i + lid == 0)
{
- int loc0 = gid * sum_step;
- int loc1 = gid * CONVERT(sqsum_step);
+ int loc0 = gid * 2 * sum_step;
+ int loc1 = gid * 2 * sqsum_step;
for(int k = 1; k <= 8; k++)
{
- if(gid * 4 + k > cols) break;
+ if(gid * 8 + k > cols) break;
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
- sqsum[sqsum_offset + loc1 + k * sqsum_step / sizeof(TYPE)] = 0;
+ sqsum[sqsum_offset + loc1 + k * sqsum_step / 4] = 0;
}
}
- int loc_s0 = sum_offset + gid * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
- int loc_sq0 = sqsum_offset + gid * CONVERT(sqsum_step) + sqsum_step / sizeof(TYPE) + i + lid, loc_sq1 = loc_sq0 + CONVERT(sqsum_step) ;
-
+ int loc_s0 = sum_offset + gid * 2 * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
+ int loc_sq0 = sqsum_offset + gid * 2 * sqsum_step + sqsum_step / 4 + i + lid, loc_sq1 = loc_sq0 + sqsum_step ;
if(lid > 0 && (i+lid) <= rows)
{
lm_sum[0][bf_loc] += sum_t[0];
lm_sqsum[0][bf_loc] += sqsum_t[0];
lm_sqsum[1][bf_loc] += sqsum_t[1];
sum_p = (__local int*)(&(lm_sum[0][bf_loc]));
- sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
+ sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
for(int k = 0; k < 4; k++)
{
- if(gid * 4 + k >= cols) break;
+ if(gid * 8 + k >= cols) break;
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
- sqsum[loc_sq0 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
+ sqsum[loc_sq0 + k * sqsum_step / 4] = sqsum_p[k];
}
sum_p = (__local int*)(&(lm_sum[1][bf_loc]));
- sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
+ sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
for(int k = 0; k < 4; k++)
{
- if(gid * 4 + 4 + k >= cols) break;
+ if(gid * 8 + 4 + k >= cols) break;
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
- sqsum[loc_sq1 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
+ sqsum[loc_sq1 + k * sqsum_step / 4] = sqsum_p[k];
}
- }
+ }
barrier(CLK_LOCAL_MEM_FENCE);
}
}
-kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global TYPE *sqsum,
- int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step, int dst1_step)
+kernel void integral_cols_D5(__global uchar4 *src,__global float *sum ,__global float *sqsum,
+ int src_offset,int pre_invalid,int rows,int cols,int src_step,int dst_step)
{
int lid = get_local_id(0);
int gid = get_group_id(0);
float4 src_t[2], sum_t[2];
- TYPE4 sqsum_t[2];
+ float4 sqsum_t[2];
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
- __local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
+ __local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
__local float* sum_p;
- __local TYPE* sqsum_p;
+ __local float* sqsum_p;
src_step = src_step >> 2;
gid = gid << 1;
for(int i = 0; i < rows; i =i + LSIZE_1)
src_t[1] = (i + lid < rows ? convert_float4(src[src_offset + (lid+i) * src_step + min(gid + 1, cols - 1)]) : (float4)0);
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
- sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
+ sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
- sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
+ sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
barrier(CLK_LOCAL_MEM_FENCE);
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
lm_sum[0][bf_loc] = src_t[0];
- lm_sqsum[0][bf_loc] = convert_TYPE4(src_t[0] * src_t[0]);
+ lm_sqsum[0][bf_loc] = convert_float4(src_t[0] * src_t[0]);
lm_sum[1][bf_loc] = src_t[1];
- lm_sqsum[1][bf_loc] = convert_TYPE4(src_t[1] * src_t[1]);
+ lm_sqsum[1][bf_loc] = convert_float4(src_t[1] * src_t[1]);
int offset = 1;
for(int d = LSIZE >> 1 ; d > 0; d>>=1)
}
barrier(CLK_LOCAL_MEM_FENCE);
int loc_s0 = gid * dst_step + i + lid - 1 - pre_invalid * dst_step / 4, loc_s1 = loc_s0 + dst_step ;
- int loc_sq0 = gid * CONVERT(dst1_step) + i + lid - 1 - pre_invalid * dst1_step / sizeof(TYPE), loc_sq1 = loc_sq0 + CONVERT(dst1_step);
if(lid > 0 && (i+lid) <= rows)
{
lm_sum[0][bf_loc] += sum_t[0];
lm_sqsum[0][bf_loc] += sqsum_t[0];
lm_sqsum[1][bf_loc] += sqsum_t[1];
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
- sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
+ sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
for(int k = 0; k < 4; k++)
{
if(gid * 4 + k >= cols + pre_invalid || gid * 4 + k < pre_invalid) continue;
sum[loc_s0 + k * dst_step / 4] = sum_p[k];
- sqsum[loc_sq0 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
+ sqsum[loc_s0 + k * dst_step / 4] = sqsum_p[k];
}
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
- sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
+ sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
for(int k = 0; k < 4; k++)
{
if(gid * 4 + k + 4 >= cols + pre_invalid) break;
sum[loc_s1 + k * dst_step / 4] = sum_p[k];
- sqsum[loc_sq1 + k * dst1_step / sizeof(TYPE)] = sqsum_p[k];
+ sqsum[loc_s1 + k * dst_step / 4] = sqsum_p[k];
}
}
barrier(CLK_LOCAL_MEM_FENCE);
}
-kernel void integral_rows_D5(__global float4 *srcsum,__global TYPE4 * srcsqsum,__global float *sum ,
- __global TYPE *sqsum,int rows,int cols,int src_step,int src1_step, int sum_step,
+kernel void integral_rows_D5(__global float4 *srcsum,__global float4 * srcsqsum,__global float *sum ,
+ __global float *sqsum,int rows,int cols,int src_step,int sum_step,
int sqsum_step,int sum_offset,int sqsum_offset)
{
int lid = get_local_id(0);
int gid = get_group_id(0);
float4 src_t[2], sum_t[2];
- TYPE4 sqsrc_t[2],sqsum_t[2];
+ float4 sqsrc_t[2],sqsum_t[2];
__local float4 lm_sum[2][LSIZE + LOG_LSIZE];
- __local TYPE4 lm_sqsum[2][LSIZE + LOG_LSIZE];
+ __local float4 lm_sqsum[2][LSIZE + LOG_LSIZE];
__local float *sum_p;
- __local TYPE *sqsum_p;
+ __local float *sqsum_p;
src_step = src_step >> 4;
- src1_step = (src1_step / sizeof(TYPE)) >> 2;
for(int i = 0; i < rows; i =i + LSIZE_1)
{
src_t[0] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2] : (float4)0;
- sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid * 2] : (TYPE4)0;
+ sqsrc_t[0] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2] : (float4)0;
src_t[1] = i + lid < rows ? srcsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
- sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src1_step + gid * 2 + 1] : (TYPE4)0;
+ sqsrc_t[1] = i + lid < rows ? srcsqsum[(lid+i) * src_step + gid * 2 + 1] : (float4)0;
sum_t[0] = (i == 0 ? (float4)0 : lm_sum[0][LSIZE_2 + LOG_LSIZE]);
- sqsum_t[0] = (i == 0 ? (TYPE4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
+ sqsum_t[0] = (i == 0 ? (float4)0 : lm_sqsum[0][LSIZE_2 + LOG_LSIZE]);
sum_t[1] = (i == 0 ? (float4)0 : lm_sum[1][LSIZE_2 + LOG_LSIZE]);
- sqsum_t[1] = (i == 0 ? (TYPE4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
+ sqsum_t[1] = (i == 0 ? (float4)0 : lm_sqsum[1][LSIZE_2 + LOG_LSIZE]);
barrier(CLK_LOCAL_MEM_FENCE);
int bf_loc = lid + GET_CONFLICT_OFFSET(lid);
if(i + lid == 0)
{
int loc0 = gid * 2 * sum_step;
- int loc1 = gid * 2 * CONVERT(sqsum_step);
+ int loc1 = gid * 2 * sqsum_step;
for(int k = 1; k <= 8; k++)
{
if(gid * 8 + k > cols) break;
sum[sum_offset + loc0 + k * sum_step / 4] = 0;
- sqsum[sqsum_offset + loc1 + k * sqsum_step / sizeof(TYPE)] = 0;
+ sqsum[sqsum_offset + loc1 + k * sqsum_step / 4] = 0;
}
}
int loc_s0 = sum_offset + gid * 2 * sum_step + sum_step / 4 + i + lid, loc_s1 = loc_s0 + sum_step ;
- int loc_sq0 = sqsum_offset + gid * 2 * CONVERT(sqsum_step) + sqsum_step / sizeof(TYPE) + i + lid, loc_sq1 = loc_sq0 + CONVERT(sqsum_step) ;
+ int loc_sq0 = sqsum_offset + gid * 2 * sqsum_step + sqsum_step / 4 + i + lid, loc_sq1 = loc_sq0 + sqsum_step ;
if(lid > 0 && (i+lid) <= rows)
{
lm_sum[0][bf_loc] += sum_t[0];
lm_sqsum[0][bf_loc] += sqsum_t[0];
lm_sqsum[1][bf_loc] += sqsum_t[1];
sum_p = (__local float*)(&(lm_sum[0][bf_loc]));
- sqsum_p = (__local TYPE*)(&(lm_sqsum[0][bf_loc]));
+ sqsum_p = (__local float*)(&(lm_sqsum[0][bf_loc]));
for(int k = 0; k < 4; k++)
{
if(gid * 8 + k >= cols) break;
sum[loc_s0 + k * sum_step / 4] = sum_p[k];
- sqsum[loc_sq0 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
+ sqsum[loc_sq0 + k * sqsum_step / 4] = sqsum_p[k];
}
sum_p = (__local float*)(&(lm_sum[1][bf_loc]));
- sqsum_p = (__local TYPE*)(&(lm_sqsum[1][bf_loc]));
+ sqsum_p = (__local float*)(&(lm_sqsum[1][bf_loc]));
for(int k = 0; k < 4; k++)
{
if(gid * 8 + 4 + k >= cols) break;
sum[loc_s1 + k * sum_step / 4] = sum_p[k];
- sqsum[loc_sq1 + k * sqsum_step / sizeof(TYPE)] = sqsum_p[k];
+ sqsum[loc_sq1 + k * sqsum_step / 4] = sqsum_p[k];
}
}
barrier(CLK_LOCAL_MEM_FENCE);
//////////////////////////////////integral/////////////////////////////////////////////////
-struct Integral :
- public ImgprocTestBase
-{
- int sdepth;
+typedef ImgprocTestBase Integral;
- virtual void SetUp()
- {
- type = GET_PARAM(0);
- blockSize = GET_PARAM(1);
- sdepth = GET_PARAM(2);
- useRoi = GET_PARAM(3);
- }
-};
OCL_TEST_P(Integral, Mat1)
{
for (int j = 0; j < LOOP_TIMES; j++)
{
random_roi();
- ocl::integral(gsrc_roi, gdst_roi, sdepth);
- integral(src_roi, dst_roi, sdepth);
+ ocl::integral(gsrc_roi, gdst_roi);
+ integral(src_roi, dst_roi);
Near();
}
}
-OCL_TEST_P(Integral, Mat2)
+// TODO wrong output type
+OCL_TEST_P(Integral, DISABLED_Mat2)
{
Mat dst1;
ocl::oclMat gdst1;
{
random_roi();
- integral(src_roi, dst_roi, dst1, sdepth);
- ocl::integral(gsrc_roi, gdst_roi, gdst1, sdepth);
+ integral(src_roi, dst1, dst_roi);
+ ocl::integral(gsrc_roi, gdst1, gdst_roi);
Near();
- if(gdst1.clCxt->supportsFeature(ocl::FEATURE_CL_DOUBLE))
- EXPECT_MAT_NEAR(dst1, Mat(gdst1), 0.);
}
}
INSTANTIATE_TEST_CASE_P(Imgproc, Integral, Combine(
Values((MatType)CV_8UC1), // TODO does not work with CV_32F, CV_64F
Values(0), // not used
- Values((MatType)CV_32SC1, (MatType)CV_32FC1),
+ Values(0), // not used
Bool()));
INSTANTIATE_TEST_CASE_P(Imgproc, Threshold, Combine(
if r == 0:
css = css[:-1] + "border-top:2px solid #6678B1;\""
out.write(" <td%s%s>\n" % (attr, css))
- if th is not None:
+ if td is not None:
out.write(" %s\n" % htmlEncode(td.text))
out.write(" </td>\n")
i += colspan
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.opencv.engine"
- android:versionCode="216@ANDROID_PLATFORM_VERSION_CODE@"
- android:versionName="2.16" >
+ android:versionCode="218@ANDROID_PLATFORM_VERSION_CODE@"
+ android:versionName="2.18" >
<uses-sdk android:minSdkVersion="@ANDROID_NATIVE_API_LEVEL@" />
<uses-feature android:name="android.hardware.touchscreen" android:required="false"/>
const int OpenCVEngine::Platform = DetectKnownPlatforms();
const int OpenCVEngine::CpuID = GetCpuID();
-const int OpenCVEngine::KnownVersions[] = {2040000, 2040100, 2040200, 2040300, 2040301, 2040302, 2040400, 2040500, 2040600, 2040700, 2040701, 2040800};
+const int OpenCVEngine::KnownVersions[] = {2040000, 2040100, 2040200, 2040300, 2040301, 2040302, 2040400, 2040500, 2040600, 2040700, 2040701, 2040800, 2040900};
bool OpenCVEngine::ValidateVersion(int version)
{
}
else
{
- HardwarePlatformView.setText("Tegra 5");
+ HardwarePlatformView.setText("Tegra K1");
}
}
else
.. code-block:: sh
- adb install OpenCV-2.4.8-android-sdk/apk/OpenCV_2.4.8_Manager_2.16_<platform>.apk
+ adb install OpenCV-2.4.9-android-sdk/apk/OpenCV_2.4.9_Manager_2.18_<platform>.apk
Use the table below to determine proper OpenCV Manager package for your device:
+------------------------------+--------------+----------------------------------------------------+
| Hardware Platform | Android ver. | Package name |
+==============================+==============+====================================================+
-| armeabi-v7a (ARMv7-A + NEON) | >= 2.3 | OpenCV_2.4.8_Manager_2.16_armv7a-neon.apk |
+| armeabi-v7a (ARMv7-A + NEON) | >= 2.3 | OpenCV_2.4.9_Manager_2.18_armv7a-neon.apk |
+------------------------------+--------------+----------------------------------------------------+
-| armeabi-v7a (ARMv7-A + NEON) | = 2.2 | OpenCV_2.4.8_Manager_2.16_armv7a-neon-android8.apk |
+| armeabi-v7a (ARMv7-A + NEON) | = 2.2 | OpenCV_2.4.9_Manager_2.18_armv7a-neon-android8.apk |
+------------------------------+--------------+----------------------------------------------------+
-| armeabi (ARMv5, ARMv6) | >= 2.3 | OpenCV_2.4.8_Manager_2.16_armeabi.apk |
+| armeabi (ARMv5, ARMv6) | >= 2.3 | OpenCV_2.4.9_Manager_2.18_armeabi.apk |
+------------------------------+--------------+----------------------------------------------------+
-| Intel x86 | >= 2.3 | OpenCV_2.4.8_Manager_2.16_x86.apk |
+| Intel x86 | >= 2.3 | OpenCV_2.4.9_Manager_2.18_x86.apk |
+------------------------------+--------------+----------------------------------------------------+
-| MIPS | >= 2.3 | OpenCV_2.4.8_Manager_2.16_mips.apk |
+| MIPS | >= 2.3 | OpenCV_2.4.9_Manager_2.18_mips.apk |
+------------------------------+--------------+----------------------------------------------------+
--feature - Feature to use. Can be sift, surf of orb. Append '-flann' to feature name
to use Flann-based matcher instead bruteforce.
- Press left mouse button on a feature point to see its mathcing point.
+ Press left mouse button on a feature point to see its matching point.
'''
import numpy as np
--feature - Feature to use. Can be sift, surf of orb. Append '-flann' to feature name
to use Flann-based matcher instead bruteforce.
- Press left mouse button on a feature point to see its mathcing point.
+ Press left mouse button on a feature point to see its matching point.
'''
import numpy as np