temporary added NPP_staging, functionality from the library will be moved to NPP...
authorAnatoly Baksheev <no@email>
Fri, 17 Dec 2010 15:41:26 +0000 (15:41 +0000)
committerAnatoly Baksheev <no@email>
Fri, 17 Dec 2010 15:41:26 +0000 (15:41 +0000)
12 files changed:
3rdparty/NPP_staging/NPP_staging_static_Windows_32_v1.lib [new file with mode: 0644]
3rdparty/NPP_staging/NPP_staging_static_Windows_64_v1.lib [new file with mode: 0644]
3rdparty/NPP_staging/libNPP_staging_static_Darwin_64_v1.a [new file with mode: 0644]
3rdparty/NPP_staging/libNPP_staging_static_Linux_32_v1.a [new file with mode: 0644]
3rdparty/NPP_staging/libNPP_staging_static_Linux_64_v1.a [new file with mode: 0644]
3rdparty/NPP_staging/npp_staging.h [new file with mode: 0644]
modules/gpu/CMakeLists.txt
modules/gpu/FindNPP_staging.cmake [new file with mode: 0644]
modules/gpu/include/opencv2/gpu/gpu.hpp
modules/gpu/src/cascadeclassifier.cpp [new file with mode: 0644]
modules/gpu/src/precomp.hpp
tests/gpu/src/stereo_bm.cpp

diff --git a/3rdparty/NPP_staging/NPP_staging_static_Windows_32_v1.lib b/3rdparty/NPP_staging/NPP_staging_static_Windows_32_v1.lib
new file mode 100644 (file)
index 0000000..39d29b8
Binary files /dev/null and b/3rdparty/NPP_staging/NPP_staging_static_Windows_32_v1.lib differ
diff --git a/3rdparty/NPP_staging/NPP_staging_static_Windows_64_v1.lib b/3rdparty/NPP_staging/NPP_staging_static_Windows_64_v1.lib
new file mode 100644 (file)
index 0000000..f8372bf
Binary files /dev/null and b/3rdparty/NPP_staging/NPP_staging_static_Windows_64_v1.lib differ
diff --git a/3rdparty/NPP_staging/libNPP_staging_static_Darwin_64_v1.a b/3rdparty/NPP_staging/libNPP_staging_static_Darwin_64_v1.a
new file mode 100644 (file)
index 0000000..ab0150e
Binary files /dev/null and b/3rdparty/NPP_staging/libNPP_staging_static_Darwin_64_v1.a differ
diff --git a/3rdparty/NPP_staging/libNPP_staging_static_Linux_32_v1.a b/3rdparty/NPP_staging/libNPP_staging_static_Linux_32_v1.a
new file mode 100644 (file)
index 0000000..6c16959
Binary files /dev/null and b/3rdparty/NPP_staging/libNPP_staging_static_Linux_32_v1.a differ
diff --git a/3rdparty/NPP_staging/libNPP_staging_static_Linux_64_v1.a b/3rdparty/NPP_staging/libNPP_staging_static_Linux_64_v1.a
new file mode 100644 (file)
index 0000000..56c7bd5
Binary files /dev/null and b/3rdparty/NPP_staging/libNPP_staging_static_Linux_64_v1.a differ
diff --git a/3rdparty/NPP_staging/npp_staging.h b/3rdparty/NPP_staging/npp_staging.h
new file mode 100644 (file)
index 0000000..c54af5c
--- /dev/null
@@ -0,0 +1,760 @@
+/*\r
+* Copyright 1993-2010 NVIDIA Corporation.  All rights reserved.\r
+*\r
+* NOTICE TO USER:\r
+*\r
+* This source code is subject to NVIDIA ownership rights under U.S. and\r
+* international Copyright laws.\r
+*\r
+* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE\r
+* CODE FOR ANY PURPOSE.  IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR\r
+* IMPLIED WARRANTY OF ANY KIND.  NVIDIA DISCLAIMS ALL WARRANTIES WITH\r
+* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF\r
+* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.\r
+* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,\r
+* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS\r
+* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE\r
+* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE\r
+* OR PERFORMANCE OF THIS SOURCE CODE.\r
+*\r
+* U.S. Government End Users.  This source code is a "commercial item" as\r
+* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting  of\r
+* "commercial computer software" and "commercial computer software\r
+* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)\r
+* and is provided to the U.S. Government only as a commercial end item.\r
+* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through\r
+* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the\r
+* source code with only those rights set forth herein.\r
+*/\r
+#ifndef _npp_staging_h_\r
+#define _npp_staging_h_\r
+\r
+\r
+/**\r
+* \file npp_staging.h\r
+* NPP Staging Library (will become part of NPP next release)\r
+*/\r
+\r
+\r
+#ifdef __cplusplus\r
+\r
+\r
+/** \defgroup ctassert Compile-time assert functionality\r
+* @{\r
+*/\r
+\r
+\r
+    /**\r
+     * Compile-time assert namespace\r
+     */\r
+    namespace NppStCTprep\r
+    {\r
+        template <bool x>\r
+        struct CT_ASSERT_FAILURE;\r
+\r
+        template <>\r
+        struct CT_ASSERT_FAILURE<true> {};\r
+\r
+        template <int x>\r
+        struct assertTest{};\r
+    }\r
+\r
+\r
+    #define NPPST_CT_PREP_PASTE_AUX(a,b)      a##b                           ///< Concatenation indirection macro\r
+    #define NPPST_CT_PREP_PASTE(a,b)          NPPST_CT_PREP_PASTE_AUX(a, b)  ///< Concatenation macro\r
+\r
+\r
+    /**\r
+     * Performs compile-time assertion of a condition on the file scope\r
+     */\r
+    #define NPPST_CT_ASSERT(X) \\r
+        typedef NppStCTprep::assertTest<sizeof(NppStCTprep::CT_ASSERT_FAILURE< (bool)(X) >)> \\r
+        NPPST_CT_PREP_PASTE(__ct_assert_typedef_, __LINE__)\r
+\r
+\r
+/*@}*/\r
+\r
+\r
+#endif\r
+\r
+\r
+/** \defgroup typedefs NPP Integral and compound types of guaranteed size\r
+ * @{\r
+ */\r
+\r
+\r
+typedef               bool NppStBool; ///< Bool of size less than integer\r
+typedef          long long NppSt64s;  ///< 64-bit signed integer\r
+typedef unsigned long long NppSt64u;  ///< 64-bit unsigned integer\r
+typedef                int NppSt32s;  ///< 32-bit signed integer\r
+typedef       unsigned int NppSt32u;  ///< 32-bit unsigned integer\r
+typedef              short NppSt16s;  ///< 16-bit signed short\r
+typedef     unsigned short NppSt16u;  ///< 16-bit unsigned short\r
+typedef               char NppSt8s;   ///< 8-bit signed char\r
+typedef      unsigned char NppSt8u;   ///< 8-bit unsigned char\r
+typedef              float NppSt32f;  ///< 32-bit IEEE-754 (single precision) float\r
+typedef             double NppSt64f;  ///< 64-bit IEEE-754 (double precision) float\r
+\r
+\r
+/**\r
+ * 2D Rectangle, 8-bit unsigned fields\r
+ * This struct contains position and size information of a rectangle in two space\r
+ */\r
+struct NppStRect8u\r
+{\r
+    NppSt8u x;          ///< x-coordinate of upper left corner\r
+    NppSt8u y;          ///< y-coordinate of upper left corner\r
+    NppSt8u width;      ///< Rectangle width\r
+    NppSt8u height;     ///< Rectangle height\r
+#ifdef __cplusplus\r
+    NppStRect8u() : x(0), y(0), width(0), height(0) {};\r
+    NppStRect8u(NppSt8u x, NppSt8u y, NppSt8u width, NppSt8u height) : x(x), y(y), width(width), height(height) {}\r
+#endif\r
+};\r
+\r
+\r
+/**\r
+ * 2D Rectangle, 32-bit signed fields\r
+ * This struct contains position and size information of a rectangle in two space\r
+ */\r
+struct NppStRect32s\r
+{\r
+    NppSt32s x;          ///< x-coordinate of upper left corner\r
+    NppSt32s y;          ///< y-coordinate of upper left corner\r
+    NppSt32s width;      ///< Rectangle width\r
+    NppSt32s height;     ///< Rectangle height\r
+#ifdef __cplusplus\r
+    NppStRect32s() : x(0), y(0), width(0), height(0) {};\r
+    NppStRect32s(NppSt32s x, NppSt32s y, NppSt32s width, NppSt32s height) : x(x), y(y), width(width), height(height) {}\r
+#endif\r
+};\r
+\r
+\r
+/**\r
+ * 2D Rectangle, 32-bit unsigned fields\r
+ * This struct contains position and size information of a rectangle in two space\r
+ */\r
+struct NppStRect32u\r
+{\r
+    NppSt32u x;          ///< x-coordinate of upper left corner\r
+    NppSt32u y;          ///< y-coordinate of upper left corner\r
+    NppSt32u width;      ///< Rectangle width\r
+    NppSt32u height;     ///< Rectangle height\r
+#ifdef __cplusplus\r
+    NppStRect32u() : x(0), y(0), width(0), height(0) {};\r
+    NppStRect32u(NppSt32u x, NppSt32u y, NppSt32u width, NppSt32u height) : x(x), y(y), width(width), height(height) {}\r
+#endif\r
+};\r
+\r
+\r
+/**\r
+ * 2D Size, 32-bit signed fields\r
+ * This struct typically represents the size of a a rectangular region in two space\r
+ */\r
+struct NppStSize32s\r
+{\r
+    NppSt32s width;  ///< Rectangle width\r
+    NppSt32s height; ///< Rectangle height\r
+#ifdef __cplusplus\r
+    NppStSize32s() : width(0), height(0) {};\r
+    NppStSize32s(NppSt32s width, NppSt32s height) : width(width), height(height) {}\r
+#endif\r
+};\r
+\r
+\r
+/**\r
+ * 2D Size, 32-bit unsigned fields\r
+ * This struct typically represents the size of a a rectangular region in two space\r
+ */\r
+struct NppStSize32u\r
+{\r
+    NppSt32u width;  ///< Rectangle width\r
+    NppSt32u height; ///< Rectangle height\r
+#ifdef __cplusplus\r
+    NppStSize32u() : width(0), height(0) {};\r
+    NppStSize32u(NppSt32u width, NppSt32u height) : width(width), height(height) {}\r
+#endif\r
+};\r
+\r
+\r
+/**\r
+ * Error Status Codes\r
+ *\r
+ * Almost all NPP function return error-status information using\r
+ * these return codes.\r
+ * Negative return codes indicate errors, positive return codes indicate\r
+ * warnings, a return code of 0 indicates success.\r
+ */\r
+enum NppStStatus\r
+{\r
+    //already present in NPP\r
+ /*   NPP_SUCCESS                      = 0,   ///< Successful operation (same as NPP_NO_ERROR)\r
+    NPP_ERROR                        = -1,  ///< Unknown error\r
+    NPP_CUDA_KERNEL_EXECUTION_ERROR  = -3,  ///< CUDA kernel execution error\r
+    NPP_NULL_POINTER_ERROR           = -4,  ///< NULL pointer argument error\r
+    NPP_TEXTURE_BIND_ERROR           = -24, ///< CUDA texture binding error or non-zero offset returned\r
+    NPP_MEMCPY_ERROR                 = -13, ///< CUDA memory copy error\r
+    NPP_MEM_ALLOC_ERR                = -12, ///< CUDA memory allocation error\r
+    NPP_MEMFREE_ERR                  = -15, ///< CUDA memory deallocation error*/\r
+\r
+    //to be added\r
+    NPP_INVALID_ROI,                        ///< Invalid region of interest argument\r
+    NPP_INVALID_STEP,                       ///< Invalid image lines step argument (check sign, alignment, relation to image width)\r
+    NPP_INVALID_SCALE,                      ///< Invalid scale parameter passed\r
+    NPP_MEM_INSUFFICIENT_BUFFER,            ///< Insufficient user-allocated buffer\r
+    NPP_MEM_RESIDENCE_ERROR,                ///< Memory residence error detected (check if pointers should be device or pinned)\r
+    NPP_MEM_INTERNAL_ERROR,                 ///< Internal memory management error\r
+};\r
+\r
+\r
+/*@}*/\r
+\r
+\r
+#ifdef __cplusplus\r
+\r
+\r
+/** \defgroup ct_typesize_checks Client-side sizeof types compile-time check\r
+* @{\r
+*/\r
+    NPPST_CT_ASSERT(sizeof(NppStBool) <= 4);\r
+    NPPST_CT_ASSERT(sizeof(NppSt64s) == 8);\r
+    NPPST_CT_ASSERT(sizeof(NppSt64u) == 8);\r
+    NPPST_CT_ASSERT(sizeof(NppSt32s) == 4);\r
+    NPPST_CT_ASSERT(sizeof(NppSt32u) == 4);\r
+    NPPST_CT_ASSERT(sizeof(NppSt16s) == 2);\r
+    NPPST_CT_ASSERT(sizeof(NppSt16u) == 2);\r
+    NPPST_CT_ASSERT(sizeof(NppSt8s) == 1);\r
+    NPPST_CT_ASSERT(sizeof(NppSt8u) == 1);\r
+    NPPST_CT_ASSERT(sizeof(NppSt32f) == 4);\r
+    NPPST_CT_ASSERT(sizeof(NppSt64f) == 8);\r
+    NPPST_CT_ASSERT(sizeof(NppStRect8u) == sizeof(NppSt32u));\r
+    NPPST_CT_ASSERT(sizeof(NppStRect32s) == 4 * sizeof(NppSt32s));\r
+    NPPST_CT_ASSERT(sizeof(NppStRect32u) == 4 * sizeof(NppSt32u));\r
+    NPPST_CT_ASSERT(sizeof(NppStSize32u) == 2 * sizeof(NppSt32u));\r
+/*@}*/\r
+\r
+\r
+#endif\r
+\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+\r
+/** \defgroup core_npp NPP Core\r
+ * Basic functions for CUDA streams management.\r
+ * WARNING: These functions couldn't be exported from NPP_staging library, so they can't be used\r
+ * @{\r
+ */\r
+\r
+\r
+/**\r
+ * Gets an active CUDA stream used by NPP (Not an API yet!)\r
+ * \return Current CUDA stream\r
+ */\r
+cudaStream_t nppStGetActiveCUDAstream();\r
+\r
+\r
+/**\r
+ * Sets an active CUDA stream used by NPP (Not an API yet!)\r
+ * \param cudaStream        [IN] cudaStream CUDA stream to become current\r
+ * \return CUDA stream used before\r
+ */\r
+cudaStream_t nppStSetActiveCUDAstream(cudaStream_t cudaStream);\r
+\r
+\r
+/*@}*/\r
+\r
+\r
+/** \defgroup nppi NPP Image Processing\r
+* @{\r
+*/\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 32-bit unsigned pixels, single channel.\r
+ *\r
+ * \param d_src             [IN] Source image pointer (CUDA device memory)\r
+ * \param srcStep           [IN] Source image line step\r
+ * \param d_dst             [OUT] Destination image pointer (CUDA device memory)\r
+ * \param dstStep           [IN] Destination image line step\r
+ * \param srcRoi            [IN] Region of interest in the source image\r
+ * \param scale             [IN] Downsampling scale factor (positive integer)\r
+ * \param readThruTexture   [IN] Performance hint to cache source in texture (true) or read directly (false)\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStDownsampleNearest_32u_C1R(NppSt32u *d_src, NppSt32u srcStep,\r
+                                            NppSt32u *d_dst, NppSt32u dstStep,\r
+                                            NppStSize32u srcRoi, NppSt32u scale,\r
+                                            NppStBool readThruTexture);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 32-bit signed pixels, single channel.\r
+ * \see nppiStDownsampleNearest_32u_C1R\r
+ */\r
+NppStStatus nppiStDownsampleNearest_32s_C1R(NppSt32s *d_src, NppSt32u srcStep,\r
+                                            NppSt32s *d_dst, NppSt32u dstStep,\r
+                                            NppStSize32u srcRoi, NppSt32u scale,\r
+                                            NppStBool readThruTexture);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 32-bit float pixels, single channel.\r
+ * \see nppiStDownsampleNearest_32u_C1R\r
+ */\r
+NppStStatus nppiStDownsampleNearest_32f_C1R(NppSt32f *d_src, NppSt32u srcStep,\r
+                                            NppSt32f *d_dst, NppSt32u dstStep,\r
+                                            NppStSize32u srcRoi, NppSt32u scale,\r
+                                            NppStBool readThruTexture);\r
+\r
+\r
+/**\r
+* Downsamples (decimates) an image using the nearest neighbor algorithm. 64-bit unsigned pixels, single channel.\r
+* \see nppiStDownsampleNearest_32u_C1R\r
+*/\r
+NppStStatus nppiStDownsampleNearest_64u_C1R(NppSt64u *d_src, NppSt32u srcStep,\r
+                                            NppSt64u *d_dst, NppSt32u dstStep,\r
+                                            NppStSize32u srcRoi, NppSt32u scale,\r
+                                            NppStBool readThruTexture);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 64-bit signed pixels, single channel.\r
+ * \see nppiStDownsampleNearest_32u_C1R\r
+ */\r
+NppStStatus nppiStDownsampleNearest_64s_C1R(NppSt64s *d_src, NppSt32u srcStep,\r
+                                            NppSt64s *d_dst, NppSt32u dstStep,\r
+                                            NppStSize32u srcRoi, NppSt32u scale,\r
+                                            NppStBool readThruTexture);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 64-bit float pixels, single channel.\r
+ * \see nppiStDownsampleNearest_32u_C1R\r
+ */\r
+NppStStatus nppiStDownsampleNearest_64f_C1R(NppSt64f *d_src, NppSt32u srcStep,\r
+                                            NppSt64f *d_dst, NppSt32u dstStep,\r
+                                            NppStSize32u srcRoi, NppSt32u scale,\r
+                                            NppStBool readThruTexture);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 32-bit unsigned pixels, single channel. Host implementation.\r
+ *\r
+ * \param h_src             [IN] Source image pointer (Host or pinned memory)\r
+ * \param srcStep           [IN] Source image line step\r
+ * \param h_dst             [OUT] Destination image pointer (Host or pinned memory)\r
+ * \param dstStep           [IN] Destination image line step\r
+ * \param srcRoi            [IN] Region of interest in the source image\r
+ * \param scale             [IN] Downsampling scale factor (positive integer)\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStDownsampleNearest_32u_C1R_host(NppSt32u *h_src, NppSt32u srcStep,\r
+                                                 NppSt32u *h_dst, NppSt32u dstStep,\r
+                                                 NppStSize32u srcRoi, NppSt32u scale);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 32-bit signed pixels, single channel. Host implementation.\r
+ * \see nppiStDownsampleNearest_32u_C1R_host\r
+ */\r
+NppStStatus nppiStDownsampleNearest_32s_C1R_host(NppSt32s *h_src, NppSt32u srcStep,\r
+                                                 NppSt32s *h_dst, NppSt32u dstStep,\r
+                                                 NppStSize32u srcRoi, NppSt32u scale);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 32-bit float pixels, single channel. Host implementation.\r
+ * \see nppiStDownsampleNearest_32u_C1R_host\r
+ */\r
+NppStStatus nppiStDownsampleNearest_32f_C1R_host(NppSt32f *h_src, NppSt32u srcStep,\r
+                                                 NppSt32f *h_dst, NppSt32u dstStep,\r
+                                                 NppStSize32u srcRoi, NppSt32u scale);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 64-bit unsigned pixels, single channel. Host implementation.\r
+ * \see nppiStDownsampleNearest_32u_C1R_host\r
+ */\r
+NppStStatus nppiStDownsampleNearest_64u_C1R_host(NppSt64u *h_src, NppSt32u srcStep,\r
+                                                 NppSt64u *h_dst, NppSt32u dstStep,\r
+                                                 NppStSize32u srcRoi, NppSt32u scale);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 64-bit signed pixels, single channel. Host implementation.\r
+ * \see nppiStDownsampleNearest_32u_C1R_host\r
+ */\r
+NppStStatus nppiStDownsampleNearest_64s_C1R_host(NppSt64s *h_src, NppSt32u srcStep,\r
+                                                 NppSt64s *h_dst, NppSt32u dstStep,\r
+                                                 NppStSize32u srcRoi, NppSt32u scale);\r
+\r
+\r
+/**\r
+ * Downsamples (decimates) an image using the nearest neighbor algorithm. 64-bit float pixels, single channel. Host implementation.\r
+ * \see nppiStDownsampleNearest_32u_C1R_host\r
+ */\r
+NppStStatus nppiStDownsampleNearest_64f_C1R_host(NppSt64f *h_src, NppSt32u srcStep,\r
+                                                 NppSt64f *h_dst, NppSt32u dstStep,\r
+                                                 NppStSize32u srcRoi, NppSt32u scale);\r
+\r
+\r
+/**\r
+ * Computes standard deviation for each rectangular region of the input image using integral images.\r
+ *\r
+ * \param d_sum             [IN] Integral image pointer (CUDA device memory)\r
+ * \param sumStep           [IN] Integral image line step\r
+ * \param d_sqsum           [IN] Squared integral image pointer (CUDA device memory)\r
+ * \param sqsumStep         [IN] Squared integral image line step\r
+ * \param d_norm            [OUT] Stddev image pointer (CUDA device memory). Each pixel contains stddev of a rect with top-left corner at the original location in the image\r
+ * \param normStep          [IN] Stddev image line step\r
+ * \param roi               [IN] Region of interest in the source image\r
+ * \param rect              [IN] Rectangular region to calculate stddev over\r
+ * \param scaleArea         [IN] Multiplication factor to account decimated scale\r
+ * \param readThruTexture   [IN] Performance hint to cache source in texture (true) or read directly (false)\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStRectStdDev_32f_C1R(NppSt32u *d_sum, NppSt32u sumStep,\r
+                                     NppSt64u *d_sqsum, NppSt32u sqsumStep,\r
+                                     NppSt32f *d_norm, NppSt32u normStep,\r
+                                     NppStSize32u roi, NppStRect32u rect,\r
+                                     NppSt32f scaleArea, NppStBool readThruTexture);\r
+\r
+\r
+/**\r
+ * Computes standard deviation for each rectangular region of the input image using integral images. Host implementation\r
+ *\r
+ * \param h_sum             [IN] Integral image pointer (Host or pinned memory)\r
+ * \param sumStep           [IN] Integral image line step\r
+ * \param h_sqsum           [IN] Squared integral image pointer (Host or pinned memory)\r
+ * \param sqsumStep         [IN] Squared integral image line step\r
+ * \param h_norm            [OUT] Stddev image pointer (Host or pinned memory). Each pixel contains stddev of a rect with top-left corner at the original location in the image\r
+ * \param normStep          [IN] Stddev image line step\r
+ * \param roi               [IN] Region of interest in the source image\r
+ * \param rect              [IN] Rectangular region to calculate stddev over\r
+ * \param scaleArea         [IN] Multiplication factor to account decimated scale\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStRectStdDev_32f_C1R_host(NppSt32u *h_sum, NppSt32u sumStep,\r
+                                          NppSt64u *h_sqsum, NppSt32u sqsumStep,\r
+                                          NppSt32f *h_norm, NppSt32u normStep,\r
+                                          NppStSize32u roi, NppStRect32u rect,\r
+                                          NppSt32f scaleArea);\r
+\r
+\r
+/**\r
+ * Transposes an image. 32-bit unsigned pixels, single channel\r
+ *\r
+ * \param d_src             [IN] Source image pointer (CUDA device memory)\r
+ * \param srcStride         [IN] Source image line step\r
+ * \param d_dst             [OUT] Destination image pointer (CUDA device memory)\r
+ * \param dstStride         [IN] Destination image line step\r
+ * \param srcRoi            [IN] Region of interest of the source image\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStTranspose_32u_C1R(NppSt32u *d_src, NppSt32u srcStride,\r
+                                    NppSt32u *d_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 32-bit signed pixels, single channel\r
+ * \see nppiStTranspose_32u_C1R\r
+ */\r
+NppStStatus nppiStTranspose_32s_C1R(NppSt32s *d_src, NppSt32u srcStride,\r
+                                    NppSt32s *d_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 32-bit float pixels, single channel\r
+ * \see nppiStTranspose_32u_C1R\r
+ */\r
+NppStStatus nppiStTranspose_32f_C1R(NppSt32f *d_src, NppSt32u srcStride,\r
+                                    NppSt32f *d_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 64-bit unsigned pixels, single channel\r
+ * \see nppiStTranspose_32u_C1R\r
+ */\r
+NppStStatus nppiStTranspose_64u_C1R(NppSt64u *d_src, NppSt32u srcStride,\r
+                                    NppSt64u *d_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 64-bit signed pixels, single channel\r
+ * \see nppiStTranspose_32u_C1R\r
+ */\r
+NppStStatus nppiStTranspose_64s_C1R(NppSt64s *d_src, NppSt32u srcStride,\r
+                                    NppSt64s *d_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 64-bit float pixels, single channel\r
+ * \see nppiStTranspose_32u_C1R\r
+ */\r
+NppStStatus nppiStTranspose_64f_C1R(NppSt64f *d_src, NppSt32u srcStride,\r
+                                    NppSt64f *d_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 32-bit unsigned pixels, single channel. Host implementation\r
+ *\r
+ * \param h_src             [IN] Source image pointer (Host or pinned memory)\r
+ * \param srcStride         [IN] Source image line step\r
+ * \param h_dst             [OUT] Destination image pointer (Host or pinned memory)\r
+ * \param dstStride         [IN] Destination image line step\r
+ * \param srcRoi            [IN] Region of interest of the source image\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStTranspose_32u_C1R_host(NppSt32u *h_src, NppSt32u srcStride,\r
+                                         NppSt32u *h_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 32-bit signed pixels, single channel. Host implementation\r
+ * \see nppiStTranspose_32u_C1R_host\r
+ */\r
+NppStStatus nppiStTranspose_32s_C1R_host(NppSt32s *h_src, NppSt32u srcStride,\r
+                                         NppSt32s *h_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 32-bit float pixels, single channel. Host implementation\r
+ * \see nppiStTranspose_32u_C1R_host\r
+ */\r
+NppStStatus nppiStTranspose_32f_C1R_host(NppSt32f *h_src, NppSt32u srcStride,\r
+                                         NppSt32f *h_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 64-bit unsigned pixels, single channel. Host implementation\r
+ * \see nppiStTranspose_32u_C1R_host\r
+ */\r
+NppStStatus nppiStTranspose_64u_C1R_host(NppSt64u *h_src, NppSt32u srcStride,\r
+                                         NppSt64u *h_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 64-bit signed pixels, single channel. Host implementation\r
+ * \see nppiStTranspose_32u_C1R_host\r
+ */\r
+NppStStatus nppiStTranspose_64s_C1R_host(NppSt64s *h_src, NppSt32u srcStride,\r
+                                         NppSt64s *h_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Transposes an image. 64-bit float pixels, single channel. Host implementation\r
+ * \see nppiStTranspose_32u_C1R_host\r
+ */\r
+NppStStatus nppiStTranspose_64f_C1R_host(NppSt64f *h_src, NppSt32u srcStride,\r
+                                         NppSt64f *h_dst, NppSt32u dstStride, NppStSize32u srcRoi);\r
+\r
+\r
+/**\r
+ * Calculates the size of the temporary buffer for integral image creation\r
+ *\r
+ * \param roiSize           [IN] Size of the input image\r
+ * \param pBufsize          [OUT] Pointer to host variable that returns the size of the temporary buffer (in bytes)\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStIntegralGetSize_8u32u(NppStSize32u roiSize, NppSt32u *pBufsize);\r
+\r
+\r
+/**\r
+ * Creates an integral image representation for the input image\r
+ *\r
+ * \param d_src             [IN] Source image pointer (CUDA device memory)\r
+ * \param srcStep           [IN] Source image line step\r
+ * \param d_dst             [OUT] Destination integral image pointer (CUDA device memory)\r
+ * \param dstStep           [IN] Destination image line step\r
+ * \param roiSize           [IN] Region of interest of the source image\r
+ * \param pBuffer           [IN] Pointer to the pre-allocated temporary buffer (CUDA device memory)\r
+ * \param bufSize           [IN] Size of the pBuffer in bytes\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStIntegral_8u32u_C1R(NppSt8u *d_src, NppSt32u srcStep,\r
+                                     NppSt32u *d_dst, NppSt32u dstStep, NppStSize32u roiSize,\r
+                                     NppSt8u *pBuffer, NppSt32u bufSize);\r
+\r
+\r
+/**\r
+ * Creates an integral image representation for the input image. Host implementation\r
+ *\r
+ * \param h_src             [IN] Source image pointer (Host or pinned memory)\r
+ * \param srcStep           [IN] Source image line step\r
+ * \param h_dst             [OUT] Destination integral image pointer (Host or pinned memory)\r
+ * \param dstStep           [IN] Destination image line step\r
+ * \param roiSize           [IN] Region of interest of the source image\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStIntegral_8u32u_C1R_host(NppSt8u *h_src, NppSt32u srcStep,\r
+                                          NppSt32u *h_dst, NppSt32u dstStep, NppStSize32u roiSize);\r
+\r
+\r
+/**\r
+ * Calculates the size of the temporary buffer for squared integral image creation\r
+ *\r
+ * \param roiSize           [IN] Size of the input image\r
+ * \param pBufsize          [OUT] Pointer to host variable that returns the size of the temporary buffer (in bytes)\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStSqrIntegralGetSize_8u64u(NppStSize32u roiSize, NppSt32u *pBufsize);\r
+\r
+\r
+/**\r
+ * Creates a squared integral image representation for the input image\r
+ *\r
+ * \param d_src             [IN] Source image pointer (CUDA device memory)\r
+ * \param srcStep           [IN] Source image line step\r
+ * \param d_dst             [OUT] Destination squared integral image pointer (CUDA device memory)\r
+ * \param dstStep           [IN] Destination image line step\r
+ * \param roiSize           [IN] Region of interest of the source image\r
+ * \param pBuffer           [IN] Pointer to the pre-allocated temporary buffer (CUDA device memory)\r
+ * \param bufSize           [IN] Size of the pBuffer in bytes\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStSqrIntegral_8u64u_C1R(NppSt8u *d_src, NppSt32u srcStep,\r
+                                        NppSt64u *d_dst, NppSt32u dstStep, NppStSize32u roiSize,\r
+                                        NppSt8u *pBuffer, NppSt32u bufSize);\r
+\r
+\r
+/**\r
+ * Creates a squared integral image representation for the input image. Host implementation\r
+ *\r
+ * \param h_src             [IN] Source image pointer (Host or pinned memory)\r
+ * \param srcStep           [IN] Source image line step\r
+ * \param h_dst             [OUT] Destination squared integral image pointer (Host or pinned memory)\r
+ * \param dstStep           [IN] Destination image line step\r
+ * \param roiSize           [IN] Region of interest of the source image\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppiStSqrIntegral_8u64u_C1R_host(NppSt8u *h_src, NppSt32u srcStep,\r
+                                             NppSt64u *h_dst, NppSt32u dstStep, NppStSize32u roiSize);\r
+\r
+\r
+/*@}*/\r
+\r
+\r
+/** \defgroup npps NPP Signal Processing\r
+* @{\r
+*/\r
+\r
+\r
+/**\r
+ * Calculates the size of the temporary buffer for vector compaction. 32-bit unsigned values\r
+ *\r
+ * \param srcLen            [IN] Length of the input vector in elements\r
+ * \param pBufsize          [OUT] Pointer to host variable that returns the size of the temporary buffer (in bytes)\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppsStCompactGetSize_32u(NppSt32u srcLen, NppSt32u *pBufsize);\r
+\r
+\r
+/**\r
+ * Calculates the size of the temporary buffer for vector compaction. 32-bit signed values\r
+ * \see nppsStCompactGetSize_32u\r
+ */\r
+NppStStatus nppsStCompactGetSize_32s(NppSt32u srcLen, NppSt32u *pBufsize);\r
+\r
+\r
+/**\r
+ * Calculates the size of the temporary buffer for vector compaction. 32-bit float values\r
+ * \see nppsStCompactGetSize_32u\r
+ */\r
+NppStStatus nppsStCompactGetSize_32f(NppSt32u srcLen, NppSt32u *pBufsize);\r
+\r
+\r
+/**\r
+ * Compacts the input vector by removing elements of specified value. 32-bit unsigned values\r
+ *\r
+ * \param d_src             [IN] Source vector pointer (CUDA device memory)\r
+ * \param srcLen            [IN] Source vector length\r
+ * \param d_dst             [OUT] Destination vector pointer (CUDA device memory)\r
+ * \param p_dstLen          [OUT] Pointer to the destination vector length (Pinned memory or NULL)\r
+ * \param elemRemove        [IN] The value to be removed\r
+ * \param pBuffer           [IN] Pointer to the pre-allocated temporary buffer (CUDA device memory)\r
+ * \param bufSize           [IN] Size of the pBuffer in bytes\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppsStCompact_32u(NppSt32u *d_src, NppSt32u srcLen,\r
+                              NppSt32u *d_dst, NppSt32u *p_dstLen,\r
+                              NppSt32u elemRemove,\r
+                              NppSt8u *pBuffer, NppSt32u bufSize);\r
+\r
+\r
+/**\r
+ * Compacts the input vector by removing elements of specified value. 32-bit signed values\r
+ * \see nppsStCompact_32u\r
+ */\r
+NppStStatus nppsStCompact_32s(NppSt32s *d_src, NppSt32u srcLen,\r
+                              NppSt32s *d_dst, NppSt32u *p_dstLen,\r
+                              NppSt32s elemRemove,\r
+                              NppSt8u *pBuffer, NppSt32u bufSize);\r
+\r
+\r
+/**\r
+ * Compacts the input vector by removing elements of specified value. 32-bit float values\r
+ * \see nppsStCompact_32u\r
+ */\r
+NppStStatus nppsStCompact_32f(NppSt32f *d_src, NppSt32u srcLen,\r
+                              NppSt32f *d_dst, NppSt32u *p_dstLen,\r
+                              NppSt32f elemRemove,\r
+                              NppSt8u *pBuffer, NppSt32u bufSize);\r
+\r
+\r
+/**\r
+ * Compacts the input vector by removing elements of specified value. 32-bit unsigned values. Host implementation\r
+ *\r
+ * \param h_src             [IN] Source vector pointer (CUDA device memory)\r
+ * \param srcLen            [IN] Source vector length\r
+ * \param h_dst             [OUT] Destination vector pointer (CUDA device memory)\r
+ * \param dstLen            [OUT] Pointer to the destination vector length (can be NULL)\r
+ * \param elemRemove        [IN] The value to be removed\r
+ *\r
+ * \return NPP status code\r
+ */\r
+NppStStatus nppsStCompact_32u_host(NppSt32u *h_src, NppSt32u srcLen,\r
+                                   NppSt32u *h_dst, NppSt32u *dstLen, NppSt32u elemRemove);\r
+\r
+\r
+/**\r
+ * Compacts the input vector by removing elements of specified value. 32-bit signed values. Host implementation\r
+ * \see nppsStCompact_32u_host\r
+ */\r
+NppStStatus nppsStCompact_32s_host(NppSt32s *h_src, NppSt32u srcLen,\r
+                                   NppSt32s *h_dst, NppSt32u *dstLen, NppSt32s elemRemove);\r
+\r
+\r
+/**\r
+ * Compacts the input vector by removing elements of specified value. 32-bit float values. Host implementation\r
+ * \see nppsStCompact_32u_host\r
+ */\r
+NppStStatus nppsStCompact_32f_host(NppSt32f *h_src, NppSt32u srcLen,\r
+                                   NppSt32f *h_dst, NppSt32u *dstLen, NppSt32f elemRemove);\r
+\r
+\r
+/*@}*/\r
+\r
+\r
+#ifdef __cplusplus\r
+}\r
+#endif\r
+\r
+\r
+#endif // _npp_staging_h_\r
index 77228e5..3c10b0e 100644 (file)
@@ -1,15 +1,12 @@
-
 set(name "gpu")
 
-#"opencv_features2d" "opencv_flann" "opencv_objdetect" - only headers needed 
-set(DEPS "opencv_core" "opencv_imgproc" "opencv_objdetect" "opencv_features2d" "opencv_flann") 
-
-set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} opencv_gpu)
-
 set(the_target "opencv_${name}")
-
 project(${the_target})
 
+
+set(DEPS "opencv_core" "opencv_imgproc" "opencv_objdetect" "opencv_features2d" "opencv_flann") #"opencv_features2d" "opencv_flann" "opencv_objdetect" - only headers needed 
+set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} opencv_gpu)
+
 add_definitions(-DCVAPI_EXPORTS)
 
 include_directories("${CMAKE_CURRENT_SOURCE_DIR}/include"                    
@@ -43,7 +40,7 @@ if (HAVE_CUDA)
        set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${_path_to_findnpp}) 
        find_package(NPP 3.2.16 REQUIRED)
        message(STATUS "NPP detected: " ${NPP_VERSION})
-
+       
        include_directories(${CUDA_INCLUDE_DIRS} ${CUDA_NPP_INCLUDES})  
        
        if (UNIX OR APPLE)
@@ -79,6 +76,11 @@ endif()
 
 add_library(${the_target} ${lib_srcs} ${lib_hdrs} ${lib_int_hdrs} ${lib_cuda} ${lib_cuda_hdrs} ${lib_device_hdrs} ${cuda_objs})
 
+IF (HAVE_CUDA)
+    include(FindNPP_staging.cmake)    
+    include_directories(${NPPST_INC})       
+    target_link_libraries(${the_target} ${NPPST_LIB})
+endif()
 
 if(PCHSupport_FOUND)
        set(pch_header ${CMAKE_CURRENT_SOURCE_DIR}/src/precomp.hpp)
@@ -108,7 +110,7 @@ set_target_properties(${the_target} PROPERTIES
        )
 
 # Add the required libraries for linking:
-target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${DEPS})
+target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${IPP_LIBS} ${DEPS} )
 
 if (HAVE_CUDA)
        target_link_libraries(${the_target} ${CUDA_LIBRARIES} ${CUDA_NPP_LIBRARIES})
diff --git a/modules/gpu/FindNPP_staging.cmake b/modules/gpu/FindNPP_staging.cmake
new file mode 100644 (file)
index 0000000..e478695
--- /dev/null
@@ -0,0 +1,24 @@
+if(CMAKE_SIZEOF_VOID_P EQUAL 4)                        \r
+    set(BIT_SUFF 32)\r
+else()\r
+    set(BIT_SUFF 64)\r
+endif()\r
+\r
+if (APPLE)\r
+    set(PLATFORM_SUFF Darwin)\r
+elseif (UNIX)\r
+    set(PLATFORM_SUFF Linux)\r
+else()\r
+    set(PLATFORM_SUFF Windows)\r
+endif()\r
+\r
+set(LIB_FILE NPP_staging_static_${PLATFORM_SUFF}_${BIT_SUFF}_v1)\r
+\r
+find_library(NPPST_LIB \r
+    NAMES "${LIB_FILE}" "lib${LIB_FILE}" \r
+    PATHS "${CMAKE_SOURCE_DIR}/3rdparty/NPP_staging" \r
+    DOC "NPP staging library"\r
+    )  \r
+\r
+SET(NPPST_INC "${CMAKE_SOURCE_DIR}//3rdparty/NPP_staging")\r
\ No newline at end of file
index b30ca48..a8329da 100644 (file)
@@ -1305,8 +1305,67 @@ namespace cv
             explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L2Dist) {}\r
             explicit BruteForceMatcher_GPU(L2<T> /*d*/) : BruteForceMatcher_GPU_base(L2Dist) {}\r
         };\r
-    }\r
 \r
+        ////////////////////////////////// CascadeClassifier //////////////////////////////////////////\r
+        // The cascade classifier class for object detection.\r
+        class CV_EXPORTS CascadeClassifier\r
+        {\r
+        public:\r
+            struct CV_EXPORTS DTreeNode\r
+            {\r
+                int featureIdx;\r
+                float threshold; // for ordered features only\r
+                int left;\r
+                int right;\r
+            };\r
+\r
+            struct CV_EXPORTS DTree\r
+            {\r
+                int nodeCount;\r
+            };\r
+\r
+            struct CV_EXPORTS Stage\r
+            {\r
+                int first;\r
+                int ntrees;\r
+                float threshold;\r
+            };\r
+\r
+            enum { BOOST = 0 };\r
+            enum { DO_CANNY_PRUNING = 1, SCALE_IMAGE = 2,FIND_BIGGEST_OBJECT = 4, DO_ROUGH_SEARCH = 8 };\r
+\r
+            CascadeClassifier();\r
+            CascadeClassifier(const string& filename);\r
+            ~CascadeClassifier();\r
+\r
+            bool empty() const;\r
+            bool load(const string& filename);\r
+            bool read(const FileNode& node);\r
+\r
+            void detectMultiScale( const Mat& image, vector<Rect>& objects, double scaleFactor=1.1,\r
+                int minNeighbors=3, int flags=0, Size minSize=Size(), Size maxSize=Size());\r
+\r
+            bool setImage( Ptr<FeatureEvaluator>&, const Mat& );\r
+            int runAt( Ptr<FeatureEvaluator>&, Point );\r
+\r
+            bool isStumpBased;\r
+\r
+            int stageType;\r
+            int featureType;\r
+            int ncategories;\r
+            Size origWinSize;\r
+\r
+            vector<Stage> stages;\r
+            vector<DTree> classifiers;\r
+            vector<DTreeNode> nodes;\r
+            vector<float> leaves;\r
+            vector<int> subsets;\r
+\r
+            Ptr<FeatureEvaluator> feval;\r
+            Ptr<CvHaarClassifierCascade> oldCascade;\r
+        };\r
+\r
+    }\r
 \r
     //! Speckle filtering - filters small connected components on diparity image.\r
     //! It sets pixel (x,y) to newVal if it coresponds to small CC with size < maxSpeckleSize.\r
diff --git a/modules/gpu/src/cascadeclassifier.cpp b/modules/gpu/src/cascadeclassifier.cpp
new file mode 100644 (file)
index 0000000..e6a4e72
--- /dev/null
@@ -0,0 +1,110 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+//  By downloading, copying, installing or using the software you agree to this license.\r
+//  If you do not agree to this license, do not download, install,\r
+//  copy or use the software.\r
+//\r
+//\r
+//                           License Agreement\r
+//                For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.\r
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+//   * Redistribution's of source code must retain the above copyright notice,\r
+//     this list of conditions and the following disclaimer.\r
+//\r
+//   * Redistribution's in binary form must reproduce the above copyright notice,\r
+//     this list of conditions and the following disclaimer in the documentation\r
+//     and/or other GpuMaterials provided with the distribution.\r
+//\r
+//   * The name of the copyright holders may not be used to endorse or promote products\r
+//     derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or bpied warranties, including, but not limited to, the bpied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "precomp.hpp"\r
+\r
+\r
+\r
+\r
+using namespace cv;\r
+using namespace cv::gpu;\r
+using namespace std;\r
+\r
+#if !defined (HAVE_CUDA)\r
+\r
+cv::gpu::CascadeClassifier::CascadeClassifier()  { throw_nogpu(); }\r
+cv::gpu::CascadeClassifier::CascadeClassifier(const string&)  { throw_nogpu(); }\r
+cv::gpu::CascadeClassifier::~CascadeClassifier()  { throw_nogpu(); }\r
+\r
+bool cv::gpu::CascadeClassifier::empty() const { throw_nogpu(); return true; }\r
+bool cv::gpu::CascadeClassifier::load(const string& filename)  { throw_nogpu(); return true; }\r
+bool cv::gpu::CascadeClassifier::read(const FileNode& node)  { throw_nogpu(); return true; }\r
+\r
+void cv::gpu::CascadeClassifier::detectMultiScale( const Mat&, vector<Rect>&, double, int, int, Size, Size) { throw_nogpu(); }\r
+\r
+       \r
+\r
+\r
+\r
+#else\r
+\r
+\r
+cv::gpu::CascadeClassifier::CascadeClassifier()\r
+{\r
+\r
+}\r
+\r
+cv::gpu::CascadeClassifier::CascadeClassifier(const string& filename)\r
+{\r
+\r
+}\r
+\r
+cv::gpu::CascadeClassifier::~CascadeClassifier()\r
+{\r
+    \r
+}\r
+\r
+bool cv::gpu::CascadeClassifier::empty() const\r
+{\r
+    int *a = (int*)&nppiStTranspose_32u_C1R;\r
+    return *a == 0xFFFFF;\r
+    return true;\r
+}\r
+\r
+bool cv::gpu::CascadeClassifier::load(const string& filename)\r
+{\r
+    return true;\r
+}\r
+\r
+bool cv::gpu::CascadeClassifier::read(const FileNode& node)\r
+{\r
+    return true;\r
+}\r
+\r
+void cv::gpu::CascadeClassifier::detectMultiScale( const Mat& image, vector<Rect>& objects, double scaleFactor, \r
+    int minNeighbors, int flags, Size minSize, Size maxSize)\r
+\r
+{\r
+\r
+}\r
+\r
+#endif
\ No newline at end of file
index ab6c42d..faf9a18 100644 (file)
@@ -66,6 +66,7 @@
     #include "cuda_runtime_api.h"\r
     #include "opencv2/gpu/stream_accessor.hpp"\r
     #include "npp.h"    \r
+    #include "npp_staging.h"\r
 \r
 #define CUDART_MINIMUM_REQUIRED_VERSION 3020\r
 #define NPP_MINIMUM_REQUIRED_VERSION 3216\r
@@ -78,6 +79,7 @@
     #error "Insufficient NPP version, please update it."\r
 #endif\r
 \r
+    static inline void throw_nogpu() { CV_Error(CV_GpuNotSupported, "The called functionality is disabled for current build or platform"); }\r
 \r
 #else /* defined(HAVE_CUDA) */\r
 \r
index c01912f..5b6062b 100644 (file)
@@ -55,12 +55,7 @@ struct CV_GpuStereoBMTest : public CvTest
 
 
     void run_stress()
-    {
-        //cv::setBreakOnError(true);
-        int winsz[] = { 13, 15, 17, 19 };
-        int disps[] = { 128, 160, 192, 256};
-
-        Size res[] = { Size(1027, 768), Size(1280, 1024), Size(1600, 1152), Size(1920, 1080) };        
+    {                
         RNG rng;
 
         for(int i = 0; i < 10; ++i)