source_group("Src\\NVidia" FILES ${ncv_files})
ocv_include_directories("src/nvidia" "src/nvidia/core" "src/nvidia/NPP_staging" ${CUDA_INCLUDE_DIRS})
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations /wd4211 /wd4201 /wd4100 /wd4505 /wd4408)
+ string(REPLACE "-Wsign-promo" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
#set (CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-keep")
#set (CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler;/EHsc-;")
void cv::gpu::gemm(const GpuMat& src1, const GpuMat& src2, double alpha, const GpuMat& src3, double beta, GpuMat& dst, int flags, Stream& stream)\r
{\r
#ifndef HAVE_CUBLAS\r
-\r
+ (void)src1; (void)src2; (void)alpha; (void)src3; (void)beta; (void)dst; (void)flags; (void)stream;\r
CV_Error(CV_StsNotImplemented, "The library was build without CUBLAS");\r
\r
#else\r
const DevMem2Db& trainIdx, const DevMem2Db& distance,\r
int cc, cudaStream_t stream)\r
{\r
+ (void)cc;\r
if (query.cols <= 64)\r
{\r
matchUnrolledCached<16, 64, Dist>(query, train, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
const DevMem2Db& trainIdx, const DevMem2Db& imgIdx, const DevMem2Db& distance,\r
int cc, cudaStream_t stream)\r
{\r
+ (void)cc;\r
if (query.cols <= 64)\r
{\r
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, static_cast< DevMem2D_<int2> >(trainIdx), static_cast< DevMem2D_<int2> >(imgIdx), static_cast< DevMem2D_<float2> > (distance), stream);\r
const DevMem2Df& allDist,\r
int cc, cudaStream_t stream)\r
{\r
+ (void)cc;\r
if (query.cols <= 64)\r
{\r
calcDistanceUnrolled<16, 64, Dist>(query, train, mask, allDist, stream);\r
const DevMem2Di& trainIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
+ (void)cc;\r
if (query.cols <= 64)\r
{\r
matchUnrolledCached<16, 64, Dist>(query, train, mask, trainIdx, distance, stream);\r
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance,\r
int cc, cudaStream_t stream)\r
{\r
+ (void)cc;\r
if (query.cols <= 64)\r
{\r
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, trainIdx, imgIdx, distance, stream);\r
const DevMem2Di& trainIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
+ (void)cc;\r
if (query.cols <= 64)\r
{\r
matchUnrolled<16, 64, Dist>(query, train, maxDistance, mask, trainIdx, distance, nMatches, stream);\r
const DevMem2Di& trainIdx, const DevMem2Di& imgIdx, const DevMem2Df& distance, const DevMem2D_<unsigned int>& nMatches,\r
int cc, cudaStream_t stream)\r
{\r
+ (void)cc;\r
if (query.cols <= 64)\r
{\r
matchUnrolled<16, 64, Dist>(query, trains, n, maxDistance, masks, trainIdx, imgIdx, distance, nMatches, stream);\r
void compute_gradients_8UC4(int nbins, int height, int width, const DevMem2Db& img,\r
float angle_scale, DevMem2Df grad, DevMem2Db qangle, bool correct_gamma)\r
{\r
+ (void)nbins;\r
const int nthreads = 256;\r
\r
dim3 bdim(nthreads, 1);\r
void compute_gradients_8UC1(int nbins, int height, int width, const DevMem2Db& img,\r
float angle_scale, DevMem2Df grad, DevMem2Db qangle, bool correct_gamma)\r
{\r
+ (void)nbins;\r
const int nthreads = 256;\r
\r
dim3 bdim(nthreads, 1);\r
{\r
static void call(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2Df mapx, DevMem2Df mapy, DevMem2D_<T> dst, const float* borderValue, int)\r
{\r
+ (void)srcWhole;\r
+ (void)xoff;\r
+ (void)yoff;\r
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;\r
\r
dim3 block(32, 8);\r
{\r
static void call(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, float fx, float fy, DevMem2D_<T> dst)\r
{\r
+ (void)srcWhole;\r
+ (void)xoff;\r
+ (void)yoff;\r
+\r
dim3 block(32, 8);\r
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));\r
\r
{\r
static void call(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, float fx, float fy, DevMem2D_<T> dst, cudaStream_t stream)\r
{\r
+ (void)srcWhole;\r
+ (void)xoff;\r
+ (void)yoff;\r
int iscale_x = round(fx);\r
int iscale_y = round(fy);\r
\r
{\r
static void call(DevMem2D_<T> src, DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2D_<T> dst, const float* borderValue, int)\r
{\r
+ (void)xoff;\r
+ (void)yoff;\r
+ (void)srcWhole;\r
+\r
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;\r
\r
dim3 block(32, 8);\r
ptrVNew->ptr(), dstSize, ns * sizeof (float), dstROI, 1.0f/scale_factor, 1.0f/scale_factor, nppStBicubic) );\r
\r
ScaleVector(ptrVNew->ptr(), ptrVNew->ptr(), 1.0f/scale_factor, ns * nh, stream);\r
- ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);\r
+ ncvAssertCUDALastErrorReturn((int)NCV_CUDA_ERROR);\r
\r
cv::gpu::device::swap<FloatVector*>(ptrU, ptrUNew);\r
cv::gpu::device::swap<FloatVector*>(ptrV, ptrVNew);\r
}\r
\r
// end of warping iterations\r
- ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR);\r
+ ncvAssertCUDAReturn(cudaStreamSynchronize(stream), (int)NCV_CUDA_ERROR);\r
\r
ncvAssertCUDAReturn( cudaMemcpy2DAsync\r
(uOut.ptr(), uOut.pitch(), ptrU->ptr(),\r
- kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR );\r
+ kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR );\r
\r
ncvAssertCUDAReturn( cudaMemcpy2DAsync\r
(vOut.ptr(), vOut.pitch(), ptrV->ptr(),\r
- kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), NCV_CUDA_ERROR );\r
+ kSourcePitch, kSourceWidth*sizeof(float), kSourceHeight, cudaMemcpyDeviceToDevice, stream), (int)NCV_CUDA_ERROR );\r
\r
- ncvAssertCUDAReturn(cudaStreamSynchronize(stream), NCV_CUDA_ERROR);\r
+ ncvAssertCUDAReturn(cudaStreamSynchronize(stream), (int)NCV_CUDA_ERROR);\r
}\r
\r
return NCV_SUCCESS;\r
template<class TList>\r
void call(TList tl)\r
{\r
+ (void)tl;\r
applyHaarClassifierAnchorParallel <\r
Loki::TL::TypeAt<TList, 0>::Result::value,\r
Loki::TL::TypeAt<TList, 1>::Result::value,\r
template<class TList>\r
void call(TList tl)\r
{\r
+ (void)tl;\r
applyHaarClassifierClassifierParallel <\r
Loki::TL::TypeAt<TList, 0>::Result::value,\r
Loki::TL::TypeAt<TList, 1>::Result::value,\r
template<class TList>\r
void call(TList tl)\r
{\r
+ (void)tl;\r
initializeMaskVector <\r
Loki::TL::TypeAt<TList, 0>::Result::value,\r
Loki::TL::TypeAt<TList, 1>::Result::value >\r
T color,\r
cudaStream_t cuStream)\r
{\r
+ (void)cuStream;\r
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);\r
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);\r
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);\r
/*M///////////////////////////////////////////////////////////////////////////////////////\r
//\r
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. \r
-// \r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
// By downloading, copying, installing or using the software you agree to this license.\r
// If you do not agree to this license, do not download, install,\r
// copy or use the software.\r
\r
virtual NcvBool isInitialized(void) const = 0;\r
virtual NcvBool isCounting(void) const = 0;\r
- \r
+\r
virtual NCVMemoryType memType(void) const = 0;\r
virtual Ncv32u alignment(void) const = 0;\r
virtual size_t maxSize(void) const = 0;\r
}\r
else\r
{\r
- ncvAssertReturn(dst._length * sizeof(T) >= howMuch && \r
+ ncvAssertReturn(dst._length * sizeof(T) >= howMuch &&\r
this->_length * sizeof(T) >= howMuch &&\r
howMuch > 0, NCV_MEM_COPY_ERROR);\r
}\r
- ncvAssertReturn((this->_ptr != NULL || this->_memtype == NCVMemoryTypeNone) && \r
+ ncvAssertReturn((this->_ptr != NULL || this->_memtype == NCVMemoryTypeNone) &&\r
(dst._ptr != NULL || dst._memtype == NCVMemoryTypeNone), NCV_NULL_PTR);\r
\r
NCVStatus ncvStat = NCV_SUCCESS;\r
}\r
else\r
{\r
- ncvAssertReturn(dst._pitch * dst._height >= howMuch && \r
+ ncvAssertReturn(dst._pitch * dst._height >= howMuch &&\r
this->_pitch * this->_height >= howMuch &&\r
howMuch > 0, NCV_MEM_COPY_ERROR);\r
}\r
- ncvAssertReturn((this->_ptr != NULL || this->_memtype == NCVMemoryTypeNone) && \r
+ ncvAssertReturn((this->_ptr != NULL || this->_memtype == NCVMemoryTypeNone) &&\r
(dst._ptr != NULL || dst._memtype == NCVMemoryTypeNone), NCV_NULL_PTR);\r
\r
NCVStatus ncvStat = NCV_SUCCESS;\r
if (this->_memtype != NCVMemoryTypeNone)\r
{\r
- ncvStat = memSegCopyHelper(dst._ptr, dst._memtype, \r
- this->_ptr, this->_memtype, \r
+ ncvStat = memSegCopyHelper(dst._ptr, dst._memtype,\r
+ this->_ptr, this->_memtype,\r
howMuch, cuStream);\r
}\r
\r
{\r
ncvAssertReturn(this->width() >= roi.width && this->height() >= roi.height &&\r
dst.width() >= roi.width && dst.height() >= roi.height, NCV_MEM_COPY_ERROR);\r
- ncvAssertReturn((this->_ptr != NULL || this->_memtype == NCVMemoryTypeNone) && \r
+ ncvAssertReturn((this->_ptr != NULL || this->_memtype == NCVMemoryTypeNone) &&\r
(dst._ptr != NULL || dst._memtype == NCVMemoryTypeNone), NCV_NULL_PTR);\r
\r
NCVStatus ncvStat = NCV_SUCCESS;\r
return ncvStat;\r
}\r
\r
- T &at(Ncv32u x, Ncv32u y) const\r
+ T& at(Ncv32u x, Ncv32u y) const\r
{\r
NcvBool bOutRange = (x >= this->_width || y >= this->_height);\r
ncvAssertPrintCheck(!bOutRange, "Error addressing matrix at [" << x << ", " << y << "]");\r
\r
static void call(Func &functor, std::vector<int> &templateParams)\r
{\r
+ (void)templateParams;\r
functor.call(TList());\r
}\r
};\r
typedef typename Ptr2D::elem_type elem_type;\r
typedef float index_type;\r
\r
- explicit __host__ __device__ __forceinline__ PointFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) : src(src_) {}\r
+ explicit __host__ __device__ __forceinline__ PointFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)\r
+ : src(src_)\r
+ {\r
+ (void)fx;\r
+ (void)fy;\r
+ }\r
\r
__device__ __forceinline__ elem_type operator ()(float y, float x) const\r
{\r
typedef typename Ptr2D::elem_type elem_type;\r
typedef float index_type;\r
\r
- explicit __host__ __device__ __forceinline__ LinearFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) : src(src_) {}\r
-\r
+ explicit __host__ __device__ __forceinline__ LinearFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f)\r
+ : src(src_)\r
+ {\r
+ (void)fx;\r
+ (void)fy;\r
+ }\r
__device__ __forceinline__ elem_type operator ()(float y, float x) const\r
{\r
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;\r
typedef float index_type;\r
typedef typename TypeVec<float, VecTraits<elem_type>::cn>::vec_type work_type;\r
\r
- explicit __host__ __device__ __forceinline__ CubicFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) : src(src_) {}\r
+ explicit __host__ __device__ __forceinline__ CubicFilter(const Ptr2D& src_, float fx = 0.f, float fy = 0.f) \r
+ : src(src_)\r
+ {\r
+ (void)fx;\r
+ (void)fy;\r
+ }\r
\r
static __device__ __forceinline__ float bicubicCoeff(float x_)\r
{\r
\r
template <typename T> struct thresh_trunc_func : unary_function<T, T>\r
{\r
- explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {}\r
+ explicit __host__ __device__ __forceinline__ thresh_trunc_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;}\r
\r
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const\r
{\r
\r
template <typename T> struct thresh_to_zero_func : unary_function<T, T>\r
{\r
- explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {}\r
+ explicit __host__ __device__ __forceinline__ thresh_to_zero_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;}\r
\r
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const\r
{\r
\r
template <typename T> struct thresh_to_zero_inv_func : unary_function<T, T>\r
{\r
- explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {}\r
+ explicit __host__ __device__ __forceinline__ thresh_to_zero_inv_func(T thresh_, T maxVal_ = 0) : thresh(thresh_) {(void)maxVal_;}\r
\r
__device__ __forceinline__ T operator()(typename TypeTraits<T>::ParameterType src) const\r
{\r
/*\r
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
*\r
- * NVIDIA Corporation and its licensors retain all intellectual \r
- * property and proprietary rights in and to this software and \r
- * related documentation and any modifications thereto. \r
- * Any use, reproduction, disclosure, or distribution of this \r
- * software and related documentation without an express license \r
+ * NVIDIA Corporation and its licensors retain all intellectual\r
+ * property and proprietary rights in and to this software and\r
+ * related documentation and any modifications thereto.\r
+ * Any use, reproduction, disclosure, or distribution of this\r
+ * software and related documentation without an express license\r
* agreement from NVIDIA Corporation is strictly prohibited.\r
*/\r
\r
#include "NCVHaarObjectDetection.hpp"\r
\r
\r
-TestHypothesesFilter::TestHypothesesFilter(std::string testName, NCVTestSourceProvider<Ncv32u> &src,\r
- Ncv32u numDstRects, Ncv32u minNeighbors, Ncv32f eps)\r
+TestHypothesesFilter::TestHypothesesFilter(std::string testName, NCVTestSourceProvider<Ncv32u> &src_,\r
+ Ncv32u numDstRects_, Ncv32u minNeighbors_, Ncv32f eps_)\r
:\r
NCVTestProvider(testName),\r
- src(src),\r
- numDstRects(numDstRects),\r
- minNeighbors(minNeighbors),\r
- eps(eps)\r
+ src(src_),\r
+ numDstRects(numDstRects_),\r
+ minNeighbors(minNeighbors_),\r
+ eps(eps_)\r
{\r
}\r
\r
for (Ncv32u j=0; j<numNeighbors; j++)\r
{\r
randVal = (1.0 * h_random32u.ptr()[randCnt++]) / 0xFFFFFFFF; randCnt = randCnt % h_random32u.length();\r
- h_vecSrc.ptr()[srcSlotSize * i + j].x = \r
+ h_vecSrc.ptr()[srcSlotSize * i + j].x =\r
h_vecDst_groundTruth.ptr()[i].x +\r
(Ncv32s)(h_vecDst_groundTruth.ptr()[i].width * this->eps * (randVal - 0.5));\r
randVal = (1.0 * h_random32u.ptr()[randCnt++]) / 0xFFFFFFFF; randCnt = randCnt % h_random32u.length();\r
- h_vecSrc.ptr()[srcSlotSize * i + j].y = \r
+ h_vecSrc.ptr()[srcSlotSize * i + j].y =\r
h_vecDst_groundTruth.ptr()[i].y +\r
(Ncv32s)(h_vecDst_groundTruth.ptr()[i].height * this->eps * (randVal - 0.5));\r
h_vecSrc.ptr()[srcSlotSize * i + j].width = h_vecDst_groundTruth.ptr()[i].width;\r
for (Ncv32u j=numNeighbors; j<srcSlotSize; j++)\r
{\r
randVal = (1.0 * h_random32u.ptr()[randCnt++]) / 0xFFFFFFFF; randCnt = randCnt % h_random32u.length();\r
- h_vecSrc.ptr()[srcSlotSize * i + j].x = \r
+ h_vecSrc.ptr()[srcSlotSize * i + j].x =\r
this->canvasWidth + h_vecDst_groundTruth.ptr()[i].x +\r
(Ncv32s)(h_vecDst_groundTruth.ptr()[i].width * this->eps * (randVal - 0.5));\r
randVal = (1.0 * h_random32u.ptr()[randCnt++]) / 0xFFFFFFFF; randCnt = randCnt % h_random32u.length();\r
- h_vecSrc.ptr()[srcSlotSize * i + j].y = \r
+ h_vecSrc.ptr()[srcSlotSize * i + j].y =\r
this->canvasHeight + h_vecDst_groundTruth.ptr()[i].y +\r
(Ncv32s)(h_vecDst_groundTruth.ptr()[i].height * this->eps * (randVal - 0.5));\r
h_vecSrc.ptr()[srcSlotSize * i + j].width = h_vecDst_groundTruth.ptr()[i].width;\r
//shuffle\r
for (Ncv32u i=0; i<this->numDstRects*srcSlotSize-1; i++)\r
{\r
- Ncv32u randVal = h_random32u.ptr()[randCnt++]; randCnt = randCnt % h_random32u.length();\r
- Ncv32u secondSwap = randVal % (this->numDstRects*srcSlotSize-1 - i);\r
+ Ncv32u randValLocal = h_random32u.ptr()[randCnt++]; randCnt = randCnt % h_random32u.length();\r
+ Ncv32u secondSwap = randValLocal % (this->numDstRects*srcSlotSize-1 - i);\r
NcvRect32u tmp = h_vecSrc.ptr()[i + secondSwap];\r
h_vecSrc.ptr()[i + secondSwap] = h_vecSrc.ptr()[i];\r
h_vecSrc.ptr()[i] = tmp;\r
/*\r
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
*\r
- * NVIDIA Corporation and its licensors retain all intellectual \r
- * property and proprietary rights in and to this software and \r
- * related documentation and any modifications thereto. \r
- * Any use, reproduction, disclosure, or distribution of this \r
- * software and related documentation without an express license \r
+ * NVIDIA Corporation and its licensors retain all intellectual\r
+ * property and proprietary rights in and to this software and\r
+ * related documentation and any modifications thereto.\r
+ * Any use, reproduction, disclosure, or distribution of this\r
+ * software and related documentation without an express license\r
* agreement from NVIDIA Corporation is strictly prohibited.\r
*/\r
\r
\r
\r
template <class T>\r
-TestResize<T>::TestResize(std::string testName, NCVTestSourceProvider<T> &src,\r
- Ncv32u width, Ncv32u height, Ncv32u scaleFactor, NcvBool bTextureCache)\r
+TestResize<T>::TestResize(std::string testName, NCVTestSourceProvider<T> &src_,\r
+ Ncv32u width_, Ncv32u height_, Ncv32u scaleFactor_, NcvBool bTextureCache_)\r
:\r
NCVTestProvider(testName),\r
- src(src),\r
- width(width),\r
- height(height),\r
- scaleFactor(scaleFactor),\r
- bTextureCache(bTextureCache)\r
+ src(src_),\r
+ width(width_),\r
+ height(height_),\r
+ scaleFactor(scaleFactor_),\r
+ bTextureCache(bTextureCache_)\r
{\r
}\r
\r
void generateHaarApplicationTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv8u> &src,\r
Ncv32u maxWidth, Ncv32u maxHeight)\r
{\r
+ (void)maxHeight;\r
for (Ncv32u i=20; i<512; i+=11)\r
{\r
for (Ncv32u j=20; j<128; j+=5)\r
\r
static void devNullOutput(const std::string& msg)\r
{\r
+ (void)msg;\r
}\r
\r
bool nvidia_NPPST_Integral_Image(const std::string& test_data_path, OutputLevel outputLevel)\r
{\r
- path = test_data_path;\r
+ path = test_data_path.c_str();\r
ncvSetDebugOutputHandler(devNullOutput);\r
\r
NCVAutoTestLister testListerII("NPPST Integral Image", outputLevel);\r
generateVectorTests(testListerVectorOperations, testSrcRandom_32u, 4096*4096);\r
\r
return testListerVectorOperations.invoke();\r
+\r
}\r
\r
bool nvidia_NCV_Haar_Cascade_Loader(const std::string& test_data_path, OutputLevel outputLevel)\r
{\r
cv::gpu::DeviceInfo devInfo;\r
\r
- std::string path;\r
+ std::string _path;\r
\r
virtual void SetUp()\r
{\r
devInfo = GetParam();\r
\r
cv::gpu::setDevice(devInfo.deviceID());\r
-\r
- path = std::string(TS::ptr()->get_data_path()) + "haarcascade/";\r
+ _path = TS::ptr()->get_data_path().c_str();\r
+ _path = _path + "haarcascade/";\r
}\r
};\r
\r
\r
TEST_P(NPPST, SquaredIntegral)\r
{\r
- bool res = nvidia_NPPST_Squared_Integral_Image(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NPPST_Squared_Integral_Image(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r
\r
TEST_P(NPPST, RectStdDev)\r
{\r
- bool res = nvidia_NPPST_RectStdDev(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NPPST_RectStdDev(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r
\r
TEST_P(NPPST, Resize)\r
{\r
- bool res = nvidia_NPPST_Resize(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NPPST_Resize(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r
\r
TEST_P(NPPST, VectorOperations)\r
{\r
- bool res = nvidia_NPPST_Vector_Operations(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NPPST_Vector_Operations(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r
\r
TEST_P(NPPST, Transpose)\r
{\r
- bool res = nvidia_NPPST_Transpose(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NPPST_Transpose(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r
\r
TEST_P(NCV, VectorOperations)\r
{\r
- bool res = nvidia_NCV_Vector_Operations(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NCV_Vector_Operations(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r
\r
TEST_P(NCV, HaarCascadeLoader)\r
{\r
- bool res = nvidia_NCV_Haar_Cascade_Loader(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NCV_Haar_Cascade_Loader(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r
\r
TEST_P(NCV, HaarCascadeApplication)\r
{\r
- bool res = nvidia_NCV_Haar_Cascade_Application(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NCV_Haar_Cascade_Application(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r
\r
TEST_P(NCV, HypothesesFiltration)\r
{\r
- bool res = nvidia_NCV_Hypotheses_Filtration(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NCV_Hypotheses_Filtration(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r
TEST_P(NCV, Visualization)\r
{\r
// this functionality doesn't used in gpu module\r
- bool res = nvidia_NCV_Visualization(path, nvidiaTestOutputLevel);\r
+ bool res = nvidia_NCV_Visualization(_path, nvidiaTestOutputLevel);\r
\r
ASSERT_TRUE(res);\r
}\r