file(GLOB test_srcs "*.cpp")\r
file(GLOB test_hdrs "*.h*")\r
\r
-add_executable(${the_target} ${test_srcs} ${test_hdrs})\r
-\r
-if(PCHSupport_FOUND)\r
- set(pch_header ${CMAKE_CURRENT_SOURCE_DIR}/test_precomp.hpp)\r
- if(${CMAKE_GENERATOR} MATCHES "Visual*" OR ${CMAKE_GENERATOR} MATCHES "Xcode*")\r
- if(${CMAKE_GENERATOR} MATCHES "Visual*")\r
- set(${the_target}_pch "test_precomp.cpp")\r
- endif() \r
- add_native_precompiled_header(${the_target} ${pch_header})\r
- elseif(CMAKE_COMPILER_IS_GNUCXX AND ${CMAKE_GENERATOR} MATCHES ".*Makefiles")\r
- add_precompiled_header(${the_target} ${pch_header})\r
- endif()\r
+if(HAVE_CUDA)\r
+ include_directories(${CUDA_INCLUDE_DIRS} ${CMAKE_SOURCE_DIR}/modules/gpu/src/nvidia ${CMAKE_SOURCE_DIR}/modules/gpu/src/nvidia/core ${CMAKE_SOURCE_DIR}/modules/gpu/src/nvidia/NPP_staging)\r
+\r
+ file(GLOB nvidia "nvidia/*.*")\r
+ SET(ncv_cpp ${CMAKE_SOURCE_DIR}/modules/gpu/src/nvidia/core/NCV.cpp)\r
+ source_group("nvidia" FILES ${nvidia})\r
endif()\r
\r
+\r
+add_executable(${the_target} ${test_srcs} ${test_hdrs} ${nvidia} ${ncv_cpp})\r
+\r
+#if(PCHSupport_FOUND)\r
+# set(pch_header ${CMAKE_CURRENT_SOURCE_DIR}/test_precomp.hpp)\r
+# if(${CMAKE_GENERATOR} MATCHES "Visual*" OR ${CMAKE_GENERATOR} MATCHES "Xcode*")\r
+# if(${CMAKE_GENERATOR} MATCHES "Visual*")\r
+# set(${the_target}_pch "test_precomp.cpp")\r
+# endif() \r
+# add_native_precompiled_header(${the_target} ${pch_header})\r
+# elseif(CMAKE_COMPILER_IS_GNUCXX AND ${CMAKE_GENERATOR} MATCHES ".*Makefiles")\r
+# add_precompiled_header(${the_target} ${pch_header})\r
+# endif()\r
+#endif()\r
+\r
# Additional target properties\r
set_target_properties(${the_target} PROPERTIES\r
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _ncvautotestlister_hpp_\r
+#define _ncvautotestlister_hpp_\r
+\r
+#include <vector>\r
+\r
+#include "NCVTest.hpp"\r
+\r
+class NCVAutoTestLister\r
+{\r
+public:\r
+\r
+ NCVAutoTestLister(std::string testSuiteName, NcvBool bStopOnFirstFail=false, NcvBool bCompactOutput=true)\r
+ :\r
+ testSuiteName(testSuiteName),\r
+ bStopOnFirstFail(bStopOnFirstFail),\r
+ bCompactOutput(bCompactOutput)\r
+ {\r
+ }\r
+\r
+ void add(INCVTest *test)\r
+ {\r
+ this->tests.push_back(test);\r
+ }\r
+\r
+ bool invoke()\r
+ {\r
+ Ncv32u nPassed = 0;\r
+ Ncv32u nFailed = 0;\r
+ Ncv32u nFailedMem = 0;\r
+\r
+ if (bCompactOutput)\r
+ {\r
+ printf("Test suite '%s' with %d tests\n", \r
+ testSuiteName.c_str(),\r
+ (int)(this->tests.size()));\r
+ }\r
+\r
+ for (Ncv32u i=0; i<this->tests.size(); i++)\r
+ {\r
+ INCVTest &curTest = *tests[i];\r
+\r
+ NCVTestReport curReport;\r
+ bool res = curTest.executeTest(curReport);\r
+\r
+ if (!bCompactOutput)\r
+ {\r
+ printf("Test %3i %16s; Consumed mem GPU = %8d, CPU = %8d; %s\n",\r
+ i,\r
+ curTest.getName().c_str(),\r
+ curReport.statsNums["MemGPU"],\r
+ curReport.statsNums["MemCPU"],\r
+ curReport.statsText["rcode"].c_str());\r
+ }\r
+\r
+ if (res)\r
+ {\r
+ nPassed++;\r
+ if (bCompactOutput)\r
+ {\r
+ printf(".");\r
+ }\r
+ }\r
+ else\r
+ {\r
+ if (!curReport.statsText["rcode"].compare("FAILED"))\r
+ {\r
+ nFailed++;\r
+ if (bCompactOutput)\r
+ {\r
+ printf("x");\r
+ }\r
+ if (bStopOnFirstFail)\r
+ {\r
+ break;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ nFailedMem++;\r
+ if (bCompactOutput)\r
+ {\r
+ printf("m");\r
+ }\r
+ }\r
+ }\r
+ fflush(stdout);\r
+ }\r
+ if (bCompactOutput)\r
+ {\r
+ printf("\n");\r
+ }\r
+\r
+ printf("Test suite '%s' complete: %d total, %d passed, %d memory errors, %d failed\n\n", \r
+ testSuiteName.c_str(),\r
+ (int)(this->tests.size()),\r
+ nPassed,\r
+ nFailedMem,\r
+ nFailed);\r
+\r
+ bool passed = nFailed == 0 && nFailedMem == 0;\r
+ return passed;\r
+ }\r
+\r
+ ~NCVAutoTestLister()\r
+ {\r
+ for (Ncv32u i=0; i<this->tests.size(); i++)\r
+ {\r
+ delete tests[i];\r
+ }\r
+ }\r
+\r
+private:\r
+\r
+ NcvBool bStopOnFirstFail;\r
+ NcvBool bCompactOutput;\r
+ std::string testSuiteName;\r
+ std::vector<INCVTest *> tests;\r
+};\r
+\r
+#endif // _ncvautotestlister_hpp_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _ncvtest_hpp_\r
+#define _ncvtest_hpp_\r
+\r
+#pragma warning( disable : 4201 4408 4127 4100)\r
+\r
+#include <string>\r
+#include <vector>\r
+#include <map>\r
+#include <memory>\r
+#include <algorithm>\r
+#include <fstream>\r
+\r
+#include <cuda_runtime.h>\r
+#include "NPP_staging.hpp"\r
+\r
+\r
+struct NCVTestReport\r
+{\r
+ std::map<std::string, Ncv32u> statsNums;\r
+ std::map<std::string, std::string> statsText;\r
+};\r
+\r
+\r
+class INCVTest\r
+{\r
+public:\r
+ virtual bool executeTest(NCVTestReport &report) = 0;\r
+ virtual std::string getName() const = 0;\r
+};\r
+\r
+\r
+class NCVTestProvider : public INCVTest\r
+{\r
+public:\r
+\r
+ NCVTestProvider(std::string testName)\r
+ :\r
+ testName(testName)\r
+ {\r
+ int devId;\r
+ ncvAssertPrintReturn(cudaSuccess == cudaGetDevice(&devId), "Error returned from cudaGetDevice", );\r
+ ncvAssertPrintReturn(cudaSuccess == cudaGetDeviceProperties(&this->devProp, devId), "Error returned from cudaGetDeviceProperties", );\r
+ }\r
+\r
+ virtual bool init() = 0;\r
+ virtual bool process() = 0;\r
+ virtual bool deinit() = 0;\r
+ virtual bool toString(std::ofstream &strOut) = 0;\r
+\r
+ virtual std::string getName() const\r
+ {\r
+ return this->testName;\r
+ }\r
+\r
+ virtual ~NCVTestProvider()\r
+ {\r
+ deinitMemory();\r
+ }\r
+\r
+ virtual bool executeTest(NCVTestReport &report)\r
+ {\r
+ bool res;\r
+ report.statsText["rcode"] = "FAILED";\r
+\r
+ res = initMemory(report);\r
+ if (!res)\r
+ {\r
+ dumpToFile(report);\r
+ deinitMemory();\r
+ return false;\r
+ }\r
+\r
+ res = init();\r
+ if (!res)\r
+ {\r
+ dumpToFile(report);\r
+ deinit();\r
+ deinitMemory();\r
+ return false;\r
+ }\r
+\r
+ res = process();\r
+ if (!res)\r
+ {\r
+ dumpToFile(report);\r
+ deinit();\r
+ deinitMemory();\r
+ return false;\r
+ }\r
+\r
+ res = deinit();\r
+ if (!res)\r
+ {\r
+ dumpToFile(report);\r
+ deinitMemory();\r
+ return false;\r
+ }\r
+\r
+ deinitMemory();\r
+\r
+ report.statsText["rcode"] = "Passed";\r
+ return true;\r
+ }\r
+\r
+protected:\r
+\r
+ cudaDeviceProp devProp;\r
+ std::auto_ptr<INCVMemAllocator> allocatorGPU;\r
+ std::auto_ptr<INCVMemAllocator> allocatorCPU;\r
+\r
+private:\r
+\r
+ std::string testName;\r
+\r
+ bool initMemory(NCVTestReport &report)\r
+ {\r
+ this->allocatorGPU.reset(new NCVMemStackAllocator(devProp.textureAlignment));\r
+ this->allocatorCPU.reset(new NCVMemStackAllocator(devProp.textureAlignment));\r
+\r
+ if (!this->allocatorGPU.get()->isInitialized() ||\r
+ !this->allocatorCPU.get()->isInitialized())\r
+ {\r
+ report.statsText["rcode"] = "Memory FAILED";\r
+ return false;\r
+ }\r
+\r
+ if (!this->process())\r
+ {\r
+ report.statsText["rcode"] = "Memory FAILED";\r
+ return false;\r
+ }\r
+\r
+ Ncv32u maxGPUsize = (Ncv32u)this->allocatorGPU.get()->maxSize();\r
+ Ncv32u maxCPUsize = (Ncv32u)this->allocatorCPU.get()->maxSize();\r
+\r
+ report.statsNums["MemGPU"] = maxGPUsize;\r
+ report.statsNums["MemCPU"] = maxCPUsize;\r
+\r
+ this->allocatorGPU.reset(new NCVMemStackAllocator(NCVMemoryTypeDevice, maxGPUsize, devProp.textureAlignment));\r
+\r
+ this->allocatorCPU.reset(new NCVMemStackAllocator(NCVMemoryTypeHostPinned, maxCPUsize, devProp.textureAlignment));\r
+\r
+ if (!this->allocatorGPU.get()->isInitialized() ||\r
+ !this->allocatorCPU.get()->isInitialized())\r
+ {\r
+ report.statsText["rcode"] = "Memory FAILED";\r
+ return false;\r
+ }\r
+\r
+ return true;\r
+ }\r
+\r
+ void deinitMemory()\r
+ {\r
+ this->allocatorGPU.reset();\r
+ this->allocatorCPU.reset();\r
+ }\r
+\r
+ void dumpToFile(NCVTestReport &report)\r
+ {\r
+ bool bReasonMem = (0 == report.statsText["rcode"].compare("Memory FAILED"));\r
+ std::string fname = "TestDump_";\r
+ fname += (bReasonMem ? "m_" : "") + this->testName + ".log";\r
+ std::ofstream stream(fname.c_str(), std::ios::trunc | std::ios::out);\r
+ if (!stream.is_open()) return;\r
+\r
+ stream << "NCV Test Failure Log: " << this->testName << std::endl;\r
+ stream << "====================================================" << std::endl << std::endl;\r
+ stream << "Test initialization report: " << std::endl;\r
+ for (std::map<std::string,std::string>::iterator it=report.statsText.begin();\r
+ it != report.statsText.end(); it++)\r
+ {\r
+ stream << it->first << "=" << it->second << std::endl;\r
+ }\r
+ for (std::map<std::string,Ncv32u>::iterator it=report.statsNums.begin();\r
+ it != report.statsNums.end(); it++)\r
+ {\r
+ stream << it->first << "=" << it->second << std::endl;\r
+ }\r
+ stream << std::endl;\r
+\r
+ stream << "Test initialization parameters: " << std::endl;\r
+ bool bSerializeRes = false;\r
+ try\r
+ {\r
+ bSerializeRes = this->toString(stream);\r
+ }\r
+ catch (...)\r
+ {\r
+ }\r
+\r
+ if (!bSerializeRes)\r
+ {\r
+ stream << "Couldn't retrieve object dump" << std::endl;\r
+ }\r
+\r
+ stream.flush();\r
+ }\r
+};\r
+\r
+#endif // _ncvtest_hpp_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _ncvtestsourceprovider_hpp_\r
+#define _ncvtestsourceprovider_hpp_\r
+\r
+#include <memory>\r
+\r
+#include "NCV.hpp"\r
+#include <opencv2/highgui/highgui.hpp>\r
+\r
+\r
+template <class T>\r
+class NCVTestSourceProvider\r
+{\r
+public:\r
+\r
+ NCVTestSourceProvider(Ncv32u seed, T rangeLow, T rangeHigh, Ncv32u maxWidth, Ncv32u maxHeight)\r
+ :\r
+ bInit(false)\r
+ {\r
+ ncvAssertPrintReturn(rangeLow < rangeHigh, "NCVTestSourceProvider ctor:: Invalid range", );\r
+\r
+ int devId;\r
+ cudaDeviceProp devProp;\r
+ ncvAssertPrintReturn(cudaSuccess == cudaGetDevice(&devId), "Error returned from cudaGetDevice", );\r
+ ncvAssertPrintReturn(cudaSuccess == cudaGetDeviceProperties(&devProp, devId), "Error returned from cudaGetDeviceProperties", );\r
+\r
+ //Ncv32u maxWpitch = alignUp(maxWidth * sizeof(T), devProp.textureAlignment);\r
+\r
+ allocatorCPU.reset(new NCVMemNativeAllocator(NCVMemoryTypeHostPinned, devProp.textureAlignment));\r
+ data.reset(new NCVMatrixAlloc<T>(*this->allocatorCPU.get(), maxWidth, maxHeight));\r
+ ncvAssertPrintReturn(data.get()->isMemAllocated(), "NCVTestSourceProvider ctor:: Matrix not allocated", );\r
+\r
+ this->dataWidth = maxWidth;\r
+ this->dataHeight = maxHeight;\r
+\r
+ srand(seed);\r
+\r
+ for (Ncv32u i=0; i<maxHeight; i++)\r
+ {\r
+ for (Ncv32u j=0; j<data.get()->stride(); j++)\r
+ {\r
+ data.get()->ptr()[i * data.get()->stride() + j] =\r
+ (T)(((1.0 * rand()) / RAND_MAX) * (rangeHigh - rangeLow) + rangeLow);\r
+ }\r
+ }\r
+\r
+ this->bInit = true;\r
+ }\r
+\r
+ NCVTestSourceProvider(std::string pgmFilename)\r
+ :\r
+ bInit(false)\r
+ {\r
+ ncvAssertPrintReturn(sizeof(T) == 1, "NCVTestSourceProvider ctor:: PGM constructor complies only with 8bit types", );\r
+\r
+ cv::Mat image = cv::imread(pgmFilename); \r
+ ncvAssertPrintReturn(!image.empty(), "NCVTestSourceProvider ctor:: PGM file error", );\r
+\r
+ int devId;\r
+ cudaDeviceProp devProp;\r
+ ncvAssertPrintReturn(cudaSuccess == cudaGetDevice(&devId), "Error returned from cudaGetDevice", );\r
+ ncvAssertPrintReturn(cudaSuccess == cudaGetDeviceProperties(&devProp, devId), "Error returned from cudaGetDeviceProperties", );\r
+\r
+ allocatorCPU.reset(new NCVMemNativeAllocator(NCVMemoryTypeHostPinned, devProp.textureAlignment));\r
+ data.reset(new NCVMatrixAlloc<T>(*this->allocatorCPU.get(), image.cols, image.rows));\r
+ ncvAssertPrintReturn(data.get()->isMemAllocated(), "NCVTestSourceProvider ctor:: Matrix not allocated", );\r
+\r
+ this->dataWidth = image.cols;\r
+ this->dataHeight = image.rows;\r
+\r
+ cv::Mat hdr(image.size(), CV_8UC1, data.get()->ptr(), data.get()->pitch());\r
+ image.copyTo(hdr);\r
+ \r
+ this->bInit = true;\r
+ }\r
+\r
+ NcvBool fill(NCVMatrix<T> &dst)\r
+ {\r
+ ncvAssertReturn(this->isInit() &&\r
+ dst.memType() == allocatorCPU.get()->memType(), false);\r
+\r
+ if (dst.width() == 0 || dst.height() == 0)\r
+ {\r
+ return true;\r
+ }\r
+\r
+ for (Ncv32u i=0; i<dst.height(); i++)\r
+ {\r
+ Ncv32u srcLine = i % this->dataHeight;\r
+\r
+ Ncv32u srcFullChunks = dst.width() / this->dataWidth;\r
+ for (Ncv32u j=0; j<srcFullChunks; j++)\r
+ {\r
+ memcpy(dst.ptr() + i * dst.stride() + j * this->dataWidth,\r
+ this->data.get()->ptr() + this->data.get()->stride() * srcLine,\r
+ this->dataWidth * sizeof(T));\r
+ }\r
+\r
+ Ncv32u srcLastChunk = dst.width() % this->dataWidth;\r
+ memcpy(dst.ptr() + i * dst.stride() + srcFullChunks * this->dataWidth,\r
+ this->data.get()->ptr() + this->data.get()->stride() * srcLine,\r
+ srcLastChunk * sizeof(T));\r
+ }\r
+\r
+ return true;\r
+ }\r
+\r
+ NcvBool fill(NCVVector<T> &dst)\r
+ {\r
+ ncvAssertReturn(this->isInit() &&\r
+ dst.memType() == allocatorCPU.get()->memType(), false);\r
+\r
+ if (dst.length() == 0)\r
+ {\r
+ return true;\r
+ }\r
+\r
+ Ncv32u srcLen = this->dataWidth * this->dataHeight;\r
+\r
+ Ncv32u srcFullChunks = (Ncv32u)dst.length() / srcLen;\r
+ for (Ncv32u j=0; j<srcFullChunks; j++)\r
+ {\r
+ memcpy(dst.ptr() + j * srcLen, this->data.get()->ptr(), srcLen * sizeof(T));\r
+ }\r
+\r
+ Ncv32u srcLastChunk = dst.length() % srcLen;\r
+ memcpy(dst.ptr() + srcFullChunks * srcLen, this->data.get()->ptr(), srcLastChunk * sizeof(T));\r
+\r
+ return true;\r
+ }\r
+\r
+ ~NCVTestSourceProvider()\r
+ {\r
+ data.reset();\r
+ allocatorCPU.reset();\r
+ }\r
+\r
+private:\r
+\r
+ NcvBool isInit(void)\r
+ {\r
+ return this->bInit;\r
+ }\r
+\r
+ NcvBool bInit;\r
+ std::auto_ptr< INCVMemAllocator > allocatorCPU;\r
+ std::auto_ptr< NCVMatrixAlloc<T> > data;\r
+ Ncv32u dataWidth;\r
+ Ncv32u dataHeight;\r
+};\r
+\r
+#endif // _ncvtestsourceprovider_hpp_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include "TestCompact.h"\r
+\r
+\r
+TestCompact::TestCompact(std::string testName, NCVTestSourceProvider<Ncv32u> &src,\r
+ Ncv32u length, Ncv32u badElem, Ncv32u badElemPercentage)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ length(length),\r
+ badElem(badElem),\r
+ badElemPercentage(badElemPercentage > 100 ? 100 : badElemPercentage)\r
+{\r
+}\r
+\r
+\r
+bool TestCompact::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "length=" << length << std::endl;\r
+ strOut << "badElem=" << badElem << std::endl;\r
+ strOut << "badElemPercentage=" << badElemPercentage << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+bool TestCompact::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+bool TestCompact::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ NCVVectorAlloc<Ncv32u> h_vecSrc(*this->allocatorCPU.get(), this->length);\r
+ ncvAssertReturn(h_vecSrc.isMemAllocated(), false);\r
+ NCVVectorAlloc<Ncv32u> d_vecSrc(*this->allocatorGPU.get(), this->length);\r
+ ncvAssertReturn(d_vecSrc.isMemAllocated(), false);\r
+\r
+ NCVVectorAlloc<Ncv32u> h_vecDst(*this->allocatorCPU.get(), this->length);\r
+ ncvAssertReturn(h_vecDst.isMemAllocated(), false);\r
+ NCVVectorAlloc<Ncv32u> d_vecDst(*this->allocatorGPU.get(), this->length);\r
+ ncvAssertReturn(d_vecDst.isMemAllocated(), false);\r
+ NCVVectorAlloc<Ncv32u> h_vecDst_d(*this->allocatorCPU.get(), this->length);\r
+ ncvAssertReturn(h_vecDst_d.isMemAllocated(), false);\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+ NCV_SKIP_COND_BEGIN\r
+ ncvAssertReturn(this->src.fill(h_vecSrc), false);\r
+ for (Ncv32u i=0; i<this->length; i++)\r
+ {\r
+ Ncv32u tmp = (h_vecSrc.ptr()[i]) & 0xFF;\r
+ tmp = tmp * 99 / 255;\r
+ if (tmp < this->badElemPercentage)\r
+ {\r
+ h_vecSrc.ptr()[i] = this->badElem;\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ NCVVectorAlloc<Ncv32u> h_dstLen(*this->allocatorCPU.get(), 1);\r
+ ncvAssertReturn(h_dstLen.isMemAllocated(), false);\r
+ Ncv32u bufSize;\r
+ ncvStat = nppsStCompactGetSize_32u(this->length, &bufSize, this->devProp);\r
+ ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);\r
+ NCVVectorAlloc<Ncv8u> d_tmpBuf(*this->allocatorGPU.get(), bufSize);\r
+ ncvAssertReturn(d_tmpBuf.isMemAllocated(), false);\r
+\r
+ Ncv32u h_outElemNum_h = 0;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ ncvStat = h_vecSrc.copySolid(d_vecSrc, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ ncvStat = nppsStCompact_32u(d_vecSrc.ptr(), this->length,\r
+ d_vecDst.ptr(), h_dstLen.ptr(), this->badElem,\r
+ d_tmpBuf.ptr(), bufSize, this->devProp);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ ncvStat = d_vecDst.copySolid(h_vecDst_d, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = nppsStCompact_32u_host(h_vecSrc.ptr(), this->length, h_vecDst.ptr(), &h_outElemNum_h, this->badElem);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ if (h_dstLen.ptr()[0] != h_outElemNum_h)\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ else\r
+ {\r
+ for (Ncv32u i=0; bLoopVirgin && i < h_outElemNum_h; i++)\r
+ {\r
+ if (h_vecDst.ptr()[i] != h_vecDst_d.ptr()[i])\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+bool TestCompact::deinit()\r
+{\r
+ return true;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testhypothesescompact_h_\r
+#define _testhypothesescompact_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+class TestCompact : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestCompact(std::string testName, NCVTestSourceProvider<Ncv32u> &src,\r
+ Ncv32u length, Ncv32u badElem, Ncv32u badElemPercentage);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+ TestCompact(const TestCompact&);\r
+ TestCompact& operator=(const TestCompact&); \r
+\r
+\r
+ NCVTestSourceProvider<Ncv32u> &src;\r
+ Ncv32u length;\r
+ Ncv32u badElem;\r
+ Ncv32u badElemPercentage;\r
+};\r
+\r
+#endif // _testhypothesescompact_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include "TestDrawRects.h"\r
+#include "NCVHaarObjectDetection.hpp"\r
+\r
+\r
+template <class T>\r
+TestDrawRects<T>::TestDrawRects(std::string testName, NCVTestSourceProvider<T> &src, NCVTestSourceProvider<Ncv32u> &src32u,\r
+ Ncv32u width, Ncv32u height, Ncv32u numRects, T color)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ src32u(src32u),\r
+ width(width),\r
+ height(height),\r
+ numRects(numRects),\r
+ color(color)\r
+{\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestDrawRects<T>::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "sizeof(T)=" << sizeof(T) << std::endl;\r
+ strOut << "width=" << width << std::endl;\r
+ strOut << "height=" << height << std::endl;\r
+ strOut << "numRects=" << numRects << std::endl;\r
+ strOut << "color=" << color << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestDrawRects<T>::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestDrawRects<T>::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ NCVMatrixAlloc<T> d_img(*this->allocatorGPU.get(), this->width, this->height);\r
+ ncvAssertReturn(d_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T> h_img(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T> h_img_d(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_img_d.isMemAllocated(), false);\r
+\r
+ NCVVectorAlloc<NcvRect32u> d_rects(*this->allocatorGPU.get(), this->numRects);\r
+ ncvAssertReturn(d_rects.isMemAllocated(), false);\r
+ NCVVectorAlloc<NcvRect32u> h_rects(*this->allocatorCPU.get(), this->numRects);\r
+ ncvAssertReturn(h_rects.isMemAllocated(), false);\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+ NCV_SKIP_COND_BEGIN\r
+ ncvAssertReturn(this->src.fill(h_img), false);\r
+ ncvStat = h_img.copySolid(d_img, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);\r
+\r
+ //fill vector of rectangles with random rects covering the input\r
+ NCVVectorReuse<Ncv32u> h_rects_as32u(h_rects.getSegment());\r
+ ncvAssertReturn(h_rects_as32u.isMemReused(), false);\r
+ ncvAssertReturn(this->src32u.fill(h_rects_as32u), false);\r
+ for (Ncv32u i=0; i<this->numRects; i++)\r
+ {\r
+ h_rects.ptr()[i].x = (Ncv32u)(((1.0 * h_rects.ptr()[i].x) / RAND_MAX) * (this->width-2));\r
+ h_rects.ptr()[i].y = (Ncv32u)(((1.0 * h_rects.ptr()[i].y) / RAND_MAX) * (this->height-2));\r
+ h_rects.ptr()[i].width = (Ncv32u)(((1.0 * h_rects.ptr()[i].width) / RAND_MAX) * (this->width+10 - h_rects.ptr()[i].x));\r
+ h_rects.ptr()[i].height = (Ncv32u)(((1.0 * h_rects.ptr()[i].height) / RAND_MAX) * (this->height+10 - h_rects.ptr()[i].y));\r
+ }\r
+ ncvStat = h_rects.copySolid(d_rects, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);\r
+\r
+ if (sizeof(T) == sizeof(Ncv32u))\r
+ {\r
+ ncvStat = ncvDrawRects_32u_device((Ncv32u *)d_img.ptr(), d_img.stride(), this->width, this->height,\r
+ (NcvRect32u *)d_rects.ptr(), this->numRects, this->color, 0);\r
+ }\r
+ else if (sizeof(T) == sizeof(Ncv8u))\r
+ {\r
+ ncvStat = ncvDrawRects_8u_device((Ncv8u *)d_img.ptr(), d_img.stride(), this->width, this->height,\r
+ (NcvRect32u *)d_rects.ptr(), this->numRects, (Ncv8u)this->color, 0);\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect drawrects test instance", false);\r
+ }\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+\r
+ ncvStat = d_img.copySolid(h_img_d, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ if (sizeof(T) == sizeof(Ncv32u))\r
+ {\r
+ ncvStat = ncvDrawRects_32u_host((Ncv32u *)h_img.ptr(), h_img.stride(), this->width, this->height,\r
+ (NcvRect32u *)h_rects.ptr(), this->numRects, this->color);\r
+ }\r
+ else if (sizeof(T) == sizeof(Ncv8u))\r
+ {\r
+ ncvStat = ncvDrawRects_8u_host((Ncv8u *)h_img.ptr(), h_img.stride(), this->width, this->height,\r
+ (NcvRect32u *)h_rects.ptr(), this->numRects, (Ncv8u)this->color);\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect drawrects test instance", false);\r
+ }\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ //const Ncv64f relEPS = 0.005;\r
+ for (Ncv32u i=0; bLoopVirgin && i < h_img.height(); i++)\r
+ {\r
+ for (Ncv32u j=0; bLoopVirgin && j < h_img.width(); j++)\r
+ {\r
+ if (h_img.ptr()[h_img.stride()*i+j] != h_img_d.ptr()[h_img_d.stride()*i+j])\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestDrawRects<T>::deinit()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+template class TestDrawRects<Ncv8u>;\r
+template class TestDrawRects<Ncv32u>;\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testdrawrects_h_\r
+#define _testdrawrects_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+template <class T>\r
+class TestDrawRects : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestDrawRects(std::string testName, NCVTestSourceProvider<T> &src, NCVTestSourceProvider<Ncv32u> &src32u,\r
+ Ncv32u width, Ncv32u height, Ncv32u numRects, T color);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+\r
+ TestDrawRects(const TestDrawRects&);\r
+ TestDrawRects& operator=(const TestDrawRects&); \r
+\r
+ NCVTestSourceProvider<T> &src;\r
+ NCVTestSourceProvider<Ncv32u> &src32u;\r
+ Ncv32u width;\r
+ Ncv32u height;\r
+ Ncv32u numRects;\r
+ T color;\r
+};\r
+\r
+#endif // _testdrawrects_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include <float.h>\r
+\r
+#if defined(__GNUC__)\r
+ #include <fpu_control.h>\r
+#endif\r
+\r
+#include "TestHaarCascadeApplication.h"\r
+#include "NCVHaarObjectDetection.hpp"\r
+\r
+\r
+TestHaarCascadeApplication::TestHaarCascadeApplication(std::string testName, NCVTestSourceProvider<Ncv8u> &src,\r
+ std::string cascadeName, Ncv32u width, Ncv32u height)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ cascadeName(cascadeName),\r
+ width(width),\r
+ height(height)\r
+{\r
+}\r
+\r
+\r
+bool TestHaarCascadeApplication::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "cascadeName=" << cascadeName << std::endl;\r
+ strOut << "width=" << width << std::endl;\r
+ strOut << "height=" << height << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+bool TestHaarCascadeApplication::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+bool TestHaarCascadeApplication::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ Ncv32u numStages, numNodes, numFeatures;\r
+\r
+ ncvStat = ncvHaarGetClassifierSize(this->cascadeName, numStages, numNodes, numFeatures);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ NCVVectorAlloc<HaarStage64> h_HaarStages(*this->allocatorCPU.get(), numStages);\r
+ ncvAssertReturn(h_HaarStages.isMemAllocated(), false);\r
+ NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(*this->allocatorCPU.get(), numNodes);\r
+ ncvAssertReturn(h_HaarNodes.isMemAllocated(), false);\r
+ NCVVectorAlloc<HaarFeature64> h_HaarFeatures(*this->allocatorCPU.get(), numFeatures);\r
+ ncvAssertReturn(h_HaarFeatures.isMemAllocated(), false);\r
+\r
+ NCVVectorAlloc<HaarStage64> d_HaarStages(*this->allocatorGPU.get(), numStages);\r
+ ncvAssertReturn(d_HaarStages.isMemAllocated(), false);\r
+ NCVVectorAlloc<HaarClassifierNode128> d_HaarNodes(*this->allocatorGPU.get(), numNodes);\r
+ ncvAssertReturn(d_HaarNodes.isMemAllocated(), false);\r
+ NCVVectorAlloc<HaarFeature64> d_HaarFeatures(*this->allocatorGPU.get(), numFeatures);\r
+ ncvAssertReturn(d_HaarFeatures.isMemAllocated(), false);\r
+\r
+ HaarClassifierCascadeDescriptor haar;\r
+ haar.ClassifierSize.width = haar.ClassifierSize.height = 1;\r
+ haar.bNeedsTiltedII = false;\r
+ haar.NumClassifierRootNodes = numNodes;\r
+ haar.NumClassifierTotalNodes = numNodes;\r
+ haar.NumFeatures = numFeatures;\r
+ haar.NumStages = numStages;\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+ NCV_SKIP_COND_BEGIN\r
+\r
+ ncvStat = ncvHaarLoadFromFile_host(this->cascadeName, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ ncvAssertReturn(NCV_SUCCESS == h_HaarStages.copySolid(d_HaarStages, 0), false);\r
+ ncvAssertReturn(NCV_SUCCESS == h_HaarNodes.copySolid(d_HaarNodes, 0), false);\r
+ ncvAssertReturn(NCV_SUCCESS == h_HaarFeatures.copySolid(d_HaarFeatures, 0), false);\r
+ ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);\r
+\r
+ NCV_SKIP_COND_END\r
+\r
+ NcvSize32s srcRoi, srcIIRoi, searchRoi;\r
+ srcRoi.width = this->width;\r
+ srcRoi.height = this->height;\r
+ srcIIRoi.width = srcRoi.width + 1;\r
+ srcIIRoi.height = srcRoi.height + 1;\r
+ searchRoi.width = srcIIRoi.width - haar.ClassifierSize.width;\r
+ searchRoi.height = srcIIRoi.height - haar.ClassifierSize.height;\r
+ if (searchRoi.width <= 0 || searchRoi.height <= 0)\r
+ {\r
+ return false;\r
+ }\r
+ NcvSize32u searchRoiU(searchRoi.width, searchRoi.height);\r
+\r
+ NCVMatrixAlloc<Ncv8u> d_img(*this->allocatorGPU.get(), this->width, this->height);\r
+ ncvAssertReturn(d_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv8u> h_img(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_img.isMemAllocated(), false);\r
+\r
+ Ncv32u integralWidth = this->width + 1;\r
+ Ncv32u integralHeight = this->height + 1;\r
+\r
+ NCVMatrixAlloc<Ncv32u> d_integralImage(*this->allocatorGPU.get(), integralWidth, integralHeight);\r
+ ncvAssertReturn(d_integralImage.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(*this->allocatorGPU.get(), integralWidth, integralHeight);\r
+ ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv32u> h_integralImage(*this->allocatorCPU.get(), integralWidth, integralHeight);\r
+ ncvAssertReturn(h_integralImage.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv64u> h_sqIntegralImage(*this->allocatorCPU.get(), integralWidth, integralHeight);\r
+ ncvAssertReturn(h_sqIntegralImage.isMemAllocated(), false);\r
+\r
+ NCVMatrixAlloc<Ncv32f> d_rectStdDev(*this->allocatorGPU.get(), this->width, this->height);\r
+ ncvAssertReturn(d_rectStdDev.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv32u> d_pixelMask(*this->allocatorGPU.get(), this->width, this->height);\r
+ ncvAssertReturn(d_pixelMask.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv32f> h_rectStdDev(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_rectStdDev.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv32u> h_pixelMask(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_pixelMask.isMemAllocated(), false);\r
+\r
+ NCVVectorAlloc<NcvRect32u> d_hypotheses(*this->allocatorGPU.get(), this->width * this->height);\r
+ ncvAssertReturn(d_hypotheses.isMemAllocated(), false);\r
+ NCVVectorAlloc<NcvRect32u> h_hypotheses(*this->allocatorCPU.get(), this->width * this->height);\r
+ ncvAssertReturn(h_hypotheses.isMemAllocated(), false);\r
+\r
+ NCVStatus nppStat;\r
+ Ncv32u szTmpBufIntegral, szTmpBufSqIntegral;\r
+ nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(this->width, this->height), &szTmpBufIntegral, this->devProp);\r
+ ncvAssertReturn(nppStat == NPPST_SUCCESS, false);\r
+ nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(this->width, this->height), &szTmpBufSqIntegral, this->devProp);\r
+ ncvAssertReturn(nppStat == NPPST_SUCCESS, false);\r
+ NCVVectorAlloc<Ncv8u> d_tmpIIbuf(*this->allocatorGPU.get(), std::max(szTmpBufIntegral, szTmpBufSqIntegral));\r
+ ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), false);\r
+\r
+ Ncv32u detectionsOnThisScale_d = 0;\r
+ Ncv32u detectionsOnThisScale_h = 0;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+\r
+ ncvAssertReturn(this->src.fill(h_img), false);\r
+ ncvStat = h_img.copySolid(d_img, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);\r
+\r
+ nppStat = nppiStIntegral_8u32u_C1R(d_img.ptr(), d_img.pitch(),\r
+ d_integralImage.ptr(), d_integralImage.pitch(),\r
+ NcvSize32u(d_img.width(), d_img.height()),\r
+ d_tmpIIbuf.ptr(), szTmpBufIntegral, this->devProp);\r
+ ncvAssertReturn(nppStat == NPPST_SUCCESS, false);\r
+\r
+ nppStat = nppiStSqrIntegral_8u64u_C1R(d_img.ptr(), d_img.pitch(),\r
+ d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),\r
+ NcvSize32u(d_img.width(), d_img.height()),\r
+ d_tmpIIbuf.ptr(), szTmpBufSqIntegral, this->devProp);\r
+ ncvAssertReturn(nppStat == NPPST_SUCCESS, false);\r
+\r
+ const NcvRect32u rect(\r
+ HAAR_STDDEV_BORDER,\r
+ HAAR_STDDEV_BORDER,\r
+ haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER,\r
+ haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER);\r
+ nppStat = nppiStRectStdDev_32f_C1R(\r
+ d_integralImage.ptr(), d_integralImage.pitch(),\r
+ d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(),\r
+ d_rectStdDev.ptr(), d_rectStdDev.pitch(),\r
+ NcvSize32u(searchRoi.width, searchRoi.height), rect,\r
+ 1.0f, true);\r
+ ncvAssertReturn(nppStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = d_integralImage.copySolid(h_integralImage, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ ncvStat = d_rectStdDev.copySolid(h_rectStdDev, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ for (Ncv32u i=0; i<searchRoiU.height; i++)\r
+ {\r
+ for (Ncv32u j=0; j<h_pixelMask.stride(); j++)\r
+ {\r
+ if (j<searchRoiU.width)\r
+ {\r
+ h_pixelMask.ptr()[i*h_pixelMask.stride()+j] = (i << 16) | j;\r
+ }\r
+ else\r
+ {\r
+ h_pixelMask.ptr()[i*h_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U;\r
+ }\r
+ }\r
+ }\r
+ ncvAssertReturn(cudaSuccess == cudaStreamSynchronize(0), false);\r
+\r
+#if defined(__GNUC__)\r
+ //http://www.christian-seiler.de/projekte/fpmath/\r
+\r
+ fpu_control_t fpu_oldcw, fpu_cw;\r
+ _FPU_GETCW(fpu_oldcw); // store old cw\r
+ fpu_cw = (fpu_oldcw & ~_FPU_EXTENDED & ~_FPU_DOUBLE & ~_FPU_SINGLE) | _FPU_SINGLE;\r
+ _FPU_SETCW(fpu_cw);\r
+\r
+ // calculations here\r
+ ncvStat = ncvApplyHaarClassifierCascade_host(\r
+ h_integralImage, h_rectStdDev, h_pixelMask,\r
+ detectionsOnThisScale_h,\r
+ haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,\r
+ searchRoiU, 1, 1.0f);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ _FPU_SETCW(fpu_oldcw); // restore old cw\r
+#else\r
+ Ncv32u fpu_oldcw, fpu_cw;\r
+ _controlfp_s(&fpu_cw, 0, 0);\r
+ fpu_oldcw = fpu_cw;\r
+ _controlfp_s(&fpu_cw, _PC_24, _MCW_PC);\r
+ ncvStat = ncvApplyHaarClassifierCascade_host(\r
+ h_integralImage, h_rectStdDev, h_pixelMask,\r
+ detectionsOnThisScale_h,\r
+ haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, false,\r
+ searchRoiU, 1, 1.0f);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ _controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC);\r
+#endif\r
+ NCV_SKIP_COND_END\r
+\r
+ int devId;\r
+ ncvAssertCUDAReturn(cudaGetDevice(&devId), false);\r
+ cudaDeviceProp devProp;\r
+ ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), false);\r
+\r
+ ncvStat = ncvApplyHaarClassifierCascade_device(\r
+ d_integralImage, d_rectStdDev, d_pixelMask,\r
+ detectionsOnThisScale_d,\r
+ haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false,\r
+ searchRoiU, 1, 1.0f,\r
+ *this->allocatorGPU.get(), *this->allocatorCPU.get(),\r
+ devProp, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ NCVMatrixAlloc<Ncv32u> h_pixelMask_d(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_pixelMask_d.isMemAllocated(), false);\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+\r
+ ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ if (detectionsOnThisScale_d != detectionsOnThisScale_h)\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ else\r
+ {\r
+ std::sort(h_pixelMask_d.ptr(), h_pixelMask_d.ptr() + detectionsOnThisScale_d);\r
+ for (Ncv32u i=0; i<detectionsOnThisScale_d && bLoopVirgin; i++)\r
+ {\r
+ if (h_pixelMask.ptr()[i] != h_pixelMask_d.ptr()[i])\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ }\r
+\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+bool TestHaarCascadeApplication::deinit()\r
+{\r
+ return true;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testhaarcascadeapplication_h_\r
+#define _testhaarcascadeapplication_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+class TestHaarCascadeApplication : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestHaarCascadeApplication(std::string testName, NCVTestSourceProvider<Ncv8u> &src,\r
+ std::string cascadeName, Ncv32u width, Ncv32u height);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+ TestHaarCascadeApplication(const TestHaarCascadeApplication&);\r
+ TestHaarCascadeApplication& operator=(const TestHaarCascadeApplication&); \r
+\r
+\r
+ NCVTestSourceProvider<Ncv8u> &src;\r
+ std::string cascadeName;\r
+ Ncv32u width;\r
+ Ncv32u height;\r
+};\r
+\r
+#endif // _testhaarcascadeapplication_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include "TestHaarCascadeLoader.h"\r
+#include "NCVHaarObjectDetection.hpp"\r
+\r
+\r
+TestHaarCascadeLoader::TestHaarCascadeLoader(std::string testName, std::string cascadeName)\r
+ :\r
+ NCVTestProvider(testName),\r
+ cascadeName(cascadeName)\r
+{\r
+}\r
+\r
+\r
+bool TestHaarCascadeLoader::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "cascadeName=" << cascadeName << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+bool TestHaarCascadeLoader::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+bool TestHaarCascadeLoader::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ Ncv32u numStages, numNodes, numFeatures;\r
+ Ncv32u numStages_2 = 0, numNodes_2 = 0, numFeatures_2 = 0;\r
+\r
+ ncvStat = ncvHaarGetClassifierSize(this->cascadeName, numStages, numNodes, numFeatures);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ NCVVectorAlloc<HaarStage64> h_HaarStages(*this->allocatorCPU.get(), numStages);\r
+ ncvAssertReturn(h_HaarStages.isMemAllocated(), false);\r
+ NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(*this->allocatorCPU.get(), numNodes);\r
+ ncvAssertReturn(h_HaarNodes.isMemAllocated(), false);\r
+ NCVVectorAlloc<HaarFeature64> h_HaarFeatures(*this->allocatorCPU.get(), numFeatures);\r
+ ncvAssertReturn(h_HaarFeatures.isMemAllocated(), false);\r
+\r
+ NCVVectorAlloc<HaarStage64> h_HaarStages_2(*this->allocatorCPU.get(), numStages);\r
+ ncvAssertReturn(h_HaarStages_2.isMemAllocated(), false);\r
+ NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes_2(*this->allocatorCPU.get(), numNodes);\r
+ ncvAssertReturn(h_HaarNodes_2.isMemAllocated(), false);\r
+ NCVVectorAlloc<HaarFeature64> h_HaarFeatures_2(*this->allocatorCPU.get(), numFeatures);\r
+ ncvAssertReturn(h_HaarFeatures_2.isMemAllocated(), false);\r
+\r
+ HaarClassifierCascadeDescriptor haar;\r
+ HaarClassifierCascadeDescriptor haar_2;\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+ NCV_SKIP_COND_BEGIN\r
+\r
+ const std::string testNvbinName = "test.nvbin";\r
+ ncvStat = ncvHaarLoadFromFile_host(this->cascadeName, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ ncvStat = ncvHaarStoreNVBIN_host(testNvbinName, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ ncvStat = ncvHaarGetClassifierSize(testNvbinName, numStages_2, numNodes_2, numFeatures_2);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ ncvStat = ncvHaarLoadFromFile_host(testNvbinName, haar_2, h_HaarStages_2, h_HaarNodes_2, h_HaarFeatures_2);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+\r
+ NCV_SKIP_COND_END\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+\r
+ if (\r
+ numStages_2 != numStages ||\r
+ numNodes_2 != numNodes ||\r
+ numFeatures_2 != numFeatures ||\r
+ haar.NumStages != haar_2.NumStages ||\r
+ haar.NumClassifierRootNodes != haar_2.NumClassifierRootNodes ||\r
+ haar.NumClassifierTotalNodes != haar_2.NumClassifierTotalNodes ||\r
+ haar.NumFeatures != haar_2.NumFeatures ||\r
+ haar.ClassifierSize.width != haar_2.ClassifierSize.width ||\r
+ haar.ClassifierSize.height != haar_2.ClassifierSize.height ||\r
+ haar.bNeedsTiltedII != haar_2.bNeedsTiltedII ||\r
+ haar.bHasStumpsOnly != haar_2.bHasStumpsOnly )\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ if (memcmp(h_HaarStages.ptr(), h_HaarStages_2.ptr(), haar.NumStages * sizeof(HaarStage64)) ||\r
+ memcmp(h_HaarNodes.ptr(), h_HaarNodes_2.ptr(), haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128)) ||\r
+ memcmp(h_HaarFeatures.ptr(), h_HaarFeatures_2.ptr(), haar.NumFeatures * sizeof(HaarFeature64)) )\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+bool TestHaarCascadeLoader::deinit()\r
+{\r
+ return true;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testhaarcascadeloader_h_\r
+#define _testhaarcascadeloader_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+class TestHaarCascadeLoader : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestHaarCascadeLoader(std::string testName, std::string cascadeName);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+\r
+ std::string cascadeName;\r
+};\r
+\r
+#endif // _testhaarcascadeloader_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include "TestHypothesesFilter.h"\r
+#include "NCVHaarObjectDetection.hpp"\r
+\r
+\r
+TestHypothesesFilter::TestHypothesesFilter(std::string testName, NCVTestSourceProvider<Ncv32u> &src,\r
+ Ncv32u numDstRects, Ncv32u minNeighbors, Ncv32f eps)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ numDstRects(numDstRects),\r
+ minNeighbors(minNeighbors),\r
+ eps(eps)\r
+{\r
+}\r
+\r
+\r
+bool TestHypothesesFilter::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "numDstRects=" << numDstRects << std::endl;\r
+ strOut << "minNeighbors=" << minNeighbors << std::endl;\r
+ strOut << "eps=" << eps << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+bool TestHypothesesFilter::init()\r
+{\r
+ this->canvasWidth = 4096;\r
+ this->canvasHeight = 4096;\r
+ return true;\r
+}\r
+\r
+\r
+bool compareRects(const NcvRect32u &r1, const NcvRect32u &r2, Ncv32f eps)\r
+{\r
+ double delta = eps*(std::min(r1.width, r2.width) + std::min(r1.height, r2.height))*0.5;\r
+ return std::abs((Ncv32s)r1.x - (Ncv32s)r2.x) <= delta &&\r
+ std::abs((Ncv32s)r1.y - (Ncv32s)r2.y) <= delta &&\r
+ std::abs((Ncv32s)r1.x + (Ncv32s)r1.width - (Ncv32s)r2.x - (Ncv32s)r2.width) <= delta &&\r
+ std::abs((Ncv32s)r1.y + (Ncv32s)r1.height - (Ncv32s)r2.y - (Ncv32s)r2.height) <= delta;\r
+}\r
+\r
+\r
+inline bool operator < (const NcvRect32u &a, const NcvRect32u &b)\r
+{\r
+ return a.x < b.x;\r
+}\r
+\r
+\r
+bool TestHypothesesFilter::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ NCVVectorAlloc<Ncv32u> h_random32u(*this->allocatorCPU.get(), this->numDstRects * sizeof(NcvRect32u) / sizeof(Ncv32u));\r
+ ncvAssertReturn(h_random32u.isMemAllocated(), false);\r
+\r
+ Ncv32u srcSlotSize = 2 * this->minNeighbors + 1;\r
+\r
+ NCVVectorAlloc<NcvRect32u> h_vecSrc(*this->allocatorCPU.get(), this->numDstRects*srcSlotSize);\r
+ ncvAssertReturn(h_vecSrc.isMemAllocated(), false);\r
+ NCVVectorAlloc<NcvRect32u> h_vecDst_groundTruth(*this->allocatorCPU.get(), this->numDstRects);\r
+ ncvAssertReturn(h_vecDst_groundTruth.isMemAllocated(), false);\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorCPU.get()->isCounting());\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ ncvAssertReturn(this->src.fill(h_random32u), false);\r
+ Ncv32u randCnt = 0;\r
+ Ncv64f randVal;\r
+\r
+ for (Ncv32u i=0; i<this->numDstRects; i++)\r
+ {\r
+ h_vecDst_groundTruth.ptr()[i].x = i * this->canvasWidth / this->numDstRects + this->canvasWidth / (this->numDstRects * 4);\r
+ h_vecDst_groundTruth.ptr()[i].y = i * this->canvasHeight / this->numDstRects + this->canvasHeight / (this->numDstRects * 4);\r
+ h_vecDst_groundTruth.ptr()[i].width = this->canvasWidth / (this->numDstRects * 2);\r
+ h_vecDst_groundTruth.ptr()[i].height = this->canvasHeight / (this->numDstRects * 2);\r
+\r
+ Ncv32u numNeighbors = this->minNeighbors + 1 + (Ncv32u)(((1.0 * h_random32u.ptr()[i]) * (this->minNeighbors + 1)) / 0xFFFFFFFF);\r
+ numNeighbors = (numNeighbors > srcSlotSize) ? srcSlotSize : numNeighbors;\r
+\r
+ //fill in strong hypotheses (2 * ((1.0 * randVal) / 0xFFFFFFFF) - 1)\r
+ for (Ncv32u j=0; j<numNeighbors; j++)\r
+ {\r
+ randVal = (1.0 * h_random32u.ptr()[randCnt++]) / 0xFFFFFFFF; randCnt = randCnt % h_random32u.length();\r
+ h_vecSrc.ptr()[srcSlotSize * i + j].x = \r
+ h_vecDst_groundTruth.ptr()[i].x +\r
+ (Ncv32s)(h_vecDst_groundTruth.ptr()[i].width * this->eps * (randVal - 0.5));\r
+ randVal = (1.0 * h_random32u.ptr()[randCnt++]) / 0xFFFFFFFF; randCnt = randCnt % h_random32u.length();\r
+ h_vecSrc.ptr()[srcSlotSize * i + j].y = \r
+ h_vecDst_groundTruth.ptr()[i].y +\r
+ (Ncv32s)(h_vecDst_groundTruth.ptr()[i].height * this->eps * (randVal - 0.5));\r
+ h_vecSrc.ptr()[srcSlotSize * i + j].width = h_vecDst_groundTruth.ptr()[i].width;\r
+ h_vecSrc.ptr()[srcSlotSize * i + j].height = h_vecDst_groundTruth.ptr()[i].height;\r
+ }\r
+\r
+ //generate weak hypotheses (to be removed in processing)\r
+ for (Ncv32u j=numNeighbors; j<srcSlotSize; j++)\r
+ {\r
+ randVal = (1.0 * h_random32u.ptr()[randCnt++]) / 0xFFFFFFFF; randCnt = randCnt % h_random32u.length();\r
+ h_vecSrc.ptr()[srcSlotSize * i + j].x = \r
+ this->canvasWidth + h_vecDst_groundTruth.ptr()[i].x +\r
+ (Ncv32s)(h_vecDst_groundTruth.ptr()[i].width * this->eps * (randVal - 0.5));\r
+ randVal = (1.0 * h_random32u.ptr()[randCnt++]) / 0xFFFFFFFF; randCnt = randCnt % h_random32u.length();\r
+ h_vecSrc.ptr()[srcSlotSize * i + j].y = \r
+ this->canvasHeight + h_vecDst_groundTruth.ptr()[i].y +\r
+ (Ncv32s)(h_vecDst_groundTruth.ptr()[i].height * this->eps * (randVal - 0.5));\r
+ h_vecSrc.ptr()[srcSlotSize * i + j].width = h_vecDst_groundTruth.ptr()[i].width;\r
+ h_vecSrc.ptr()[srcSlotSize * i + j].height = h_vecDst_groundTruth.ptr()[i].height;\r
+ }\r
+ }\r
+\r
+ //shuffle\r
+ for (Ncv32u i=0; i<this->numDstRects*srcSlotSize-1; i++)\r
+ {\r
+ Ncv32u randVal = h_random32u.ptr()[randCnt++]; randCnt = randCnt % h_random32u.length();\r
+ Ncv32u secondSwap = randVal % (this->numDstRects*srcSlotSize-1 - i);\r
+ NcvRect32u tmp = h_vecSrc.ptr()[i + secondSwap];\r
+ h_vecSrc.ptr()[i + secondSwap] = h_vecSrc.ptr()[i];\r
+ h_vecSrc.ptr()[i] = tmp;\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ Ncv32u numHypothesesSrc = h_vecSrc.length();\r
+ NCV_SKIP_COND_BEGIN\r
+ ncvStat = ncvFilterHypotheses_host(h_vecSrc, numHypothesesSrc, this->minNeighbors, this->eps, NULL);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+\r
+ //verification\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ if (numHypothesesSrc != this->numDstRects)\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ else\r
+ {\r
+ std::vector<NcvRect32u> tmpRects(numHypothesesSrc);\r
+ memcpy(&tmpRects[0], h_vecSrc.ptr(), numHypothesesSrc * sizeof(NcvRect32u));\r
+ std::sort(tmpRects.begin(), tmpRects.end());\r
+ for (Ncv32u i=0; i<numHypothesesSrc && bLoopVirgin; i++)\r
+ {\r
+ if (!compareRects(tmpRects[i], h_vecDst_groundTruth.ptr()[i], this->eps))\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+bool TestHypothesesFilter::deinit()\r
+{\r
+ return true;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testhypothesesfilter_h_\r
+#define _testhypothesesfilter_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+class TestHypothesesFilter : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestHypothesesFilter(std::string testName, NCVTestSourceProvider<Ncv32u> &src,\r
+ Ncv32u numDstRects, Ncv32u minNeighbors, Ncv32f eps);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+\r
+ TestHypothesesFilter(const TestHypothesesFilter&);\r
+ TestHypothesesFilter& operator=(const TestHypothesesFilter&); \r
+\r
+ NCVTestSourceProvider<Ncv32u> &src;\r
+ Ncv32u numDstRects;\r
+ Ncv32u minNeighbors;\r
+ Ncv32f eps;\r
+\r
+ Ncv32u canvasWidth;\r
+ Ncv32u canvasHeight;\r
+};\r
+\r
+#endif // _testhypothesesfilter_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include "TestHypothesesGrow.h"\r
+#include "NCVHaarObjectDetection.hpp"\r
+\r
+\r
+TestHypothesesGrow::TestHypothesesGrow(std::string testName, NCVTestSourceProvider<Ncv32u> &src,\r
+ Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f rectScale, \r
+ Ncv32u maxLenSrc, Ncv32u lenSrc, Ncv32u maxLenDst, Ncv32u lenDst)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ rectWidth(rectWidth),\r
+ rectHeight(rectHeight),\r
+ rectScale(rectScale),\r
+ maxLenSrc(maxLenSrc),\r
+ lenSrc(lenSrc),\r
+ maxLenDst(maxLenDst),\r
+ lenDst(lenDst)\r
+{\r
+}\r
+\r
+\r
+bool TestHypothesesGrow::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "rectWidth=" << rectWidth << std::endl;\r
+ strOut << "rectHeight=" << rectHeight << std::endl;\r
+ strOut << "rectScale=" << rectScale << std::endl;\r
+ strOut << "maxLenSrc=" << maxLenSrc << std::endl;\r
+ strOut << "lenSrc=" << lenSrc << std::endl;\r
+ strOut << "maxLenDst=" << maxLenDst << std::endl;\r
+ strOut << "lenDst=" << lenDst << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+bool TestHypothesesGrow::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+bool TestHypothesesGrow::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ NCVVectorAlloc<Ncv32u> h_vecSrc(*this->allocatorCPU.get(), this->maxLenSrc);\r
+ ncvAssertReturn(h_vecSrc.isMemAllocated(), false);\r
+ NCVVectorAlloc<Ncv32u> d_vecSrc(*this->allocatorGPU.get(), this->maxLenSrc);\r
+ ncvAssertReturn(d_vecSrc.isMemAllocated(), false);\r
+\r
+ NCVVectorAlloc<NcvRect32u> h_vecDst(*this->allocatorCPU.get(), this->maxLenDst);\r
+ ncvAssertReturn(h_vecDst.isMemAllocated(), false);\r
+ NCVVectorAlloc<NcvRect32u> d_vecDst(*this->allocatorGPU.get(), this->maxLenDst);\r
+ ncvAssertReturn(d_vecDst.isMemAllocated(), false);\r
+ NCVVectorAlloc<NcvRect32u> h_vecDst_d(*this->allocatorCPU.get(), this->maxLenDst);\r
+ ncvAssertReturn(h_vecDst_d.isMemAllocated(), false);\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ ncvAssertReturn(this->src.fill(h_vecSrc), false);\r
+ memset(h_vecDst.ptr(), 0, h_vecDst.length() * sizeof(NcvRect32u));\r
+ NCVVectorReuse<Ncv32u> h_vecDst_as32u(h_vecDst.getSegment(), lenDst * sizeof(NcvRect32u) / sizeof(Ncv32u));\r
+ ncvAssertReturn(h_vecDst_as32u.isMemReused(), false);\r
+ ncvAssertReturn(this->src.fill(h_vecDst_as32u), false);\r
+ memcpy(h_vecDst_d.ptr(), h_vecDst.ptr(), h_vecDst.length() * sizeof(NcvRect32u));\r
+ NCV_SKIP_COND_END\r
+\r
+ ncvStat = h_vecSrc.copySolid(d_vecSrc, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ ncvStat = h_vecDst.copySolid(d_vecDst, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);\r
+\r
+ Ncv32u h_outElemNum_d = 0;\r
+ Ncv32u h_outElemNum_h = 0;\r
+ NCV_SKIP_COND_BEGIN\r
+ h_outElemNum_d = this->lenDst;\r
+ ncvStat = ncvGrowDetectionsVector_device(d_vecSrc, this->lenSrc,\r
+ d_vecDst, h_outElemNum_d, this->maxLenDst,\r
+ this->rectWidth, this->rectHeight, this->rectScale, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ ncvStat = d_vecDst.copySolid(h_vecDst_d, 0);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ ncvAssertCUDAReturn(cudaStreamSynchronize(0), false);\r
+\r
+ h_outElemNum_h = this->lenDst;\r
+ ncvStat = ncvGrowDetectionsVector_host(h_vecSrc, this->lenSrc,\r
+ h_vecDst, h_outElemNum_h, this->maxLenDst,\r
+ this->rectWidth, this->rectHeight, this->rectScale);\r
+ ncvAssertReturn(ncvStat == NCV_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ if (h_outElemNum_d != h_outElemNum_h)\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ else\r
+ {\r
+ if (memcmp(h_vecDst.ptr(), h_vecDst_d.ptr(), this->maxLenDst * sizeof(NcvRect32u)))\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+bool TestHypothesesGrow::deinit()\r
+{\r
+ return true;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testhypothesesgrow_h_\r
+#define _testhypothesesgrow_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+class TestHypothesesGrow : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestHypothesesGrow(std::string testName, NCVTestSourceProvider<Ncv32u> &src,\r
+ Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f rectScale, \r
+ Ncv32u maxLenSrc, Ncv32u lenSrc, Ncv32u maxLenDst, Ncv32u lenDst);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+ TestHypothesesGrow(const TestHypothesesGrow&);\r
+ TestHypothesesGrow& operator=(const TestHypothesesGrow&); \r
+\r
+\r
+ NCVTestSourceProvider<Ncv32u> &src;\r
+ Ncv32u rectWidth;\r
+ Ncv32u rectHeight;\r
+ Ncv32f rectScale;\r
+ Ncv32u maxLenSrc;\r
+ Ncv32u lenSrc;\r
+ Ncv32u maxLenDst;\r
+ Ncv32u lenDst;\r
+};\r
+\r
+#endif // _testhypothesesgrow_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include <math.h>\r
+#include "TestIntegralImage.h"\r
+\r
+\r
+template <class T_in, class T_out>\r
+TestIntegralImage<T_in, T_out>::TestIntegralImage(std::string testName, NCVTestSourceProvider<T_in> &src,\r
+ Ncv32u width, Ncv32u height)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ width(width),\r
+ height(height)\r
+{\r
+}\r
+\r
+\r
+template <class T_in, class T_out>\r
+bool TestIntegralImage<T_in, T_out>::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "sizeof(T_in)=" << sizeof(T_in) << std::endl;\r
+ strOut << "sizeof(T_out)=" << sizeof(T_out) << std::endl;\r
+ strOut << "width=" << width << std::endl;\r
+ strOut << "height=" << height << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+template <class T_in, class T_out>\r
+bool TestIntegralImage<T_in, T_out>::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+template <class T_in, class T_out>\r
+bool TestIntegralImage<T_in, T_out>::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ Ncv32u widthII = this->width + 1;\r
+ Ncv32u heightII = this->height + 1;\r
+\r
+ NCVMatrixAlloc<T_in> d_img(*this->allocatorGPU.get(), this->width, this->height);\r
+ ncvAssertReturn(d_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T_in> h_img(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T_out> d_imgII(*this->allocatorGPU.get(), widthII, heightII);\r
+ ncvAssertReturn(d_imgII.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T_out> h_imgII(*this->allocatorCPU.get(), widthII, heightII);\r
+ ncvAssertReturn(h_imgII.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T_out> h_imgII_d(*this->allocatorCPU.get(), widthII, heightII);\r
+ ncvAssertReturn(h_imgII_d.isMemAllocated(), false);\r
+\r
+ Ncv32u bufSize;\r
+ if (sizeof(T_in) == sizeof(Ncv8u))\r
+ {\r
+ ncvStat = nppiStIntegralGetSize_8u32u(NcvSize32u(this->width, this->height), &bufSize, this->devProp);\r
+ ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);\r
+ }\r
+ else if (sizeof(T_in) == sizeof(Ncv32f))\r
+ {\r
+ ncvStat = nppiStIntegralGetSize_32f32f(NcvSize32u(this->width, this->height), &bufSize, this->devProp);\r
+ ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect integral image test instance", false);\r
+ }\r
+\r
+ NCVVectorAlloc<Ncv8u> d_tmpBuf(*this->allocatorGPU.get(), bufSize);\r
+ ncvAssertReturn(d_tmpBuf.isMemAllocated(), false);\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+ NCV_SKIP_COND_BEGIN\r
+\r
+ ncvAssertReturn(this->src.fill(h_img), false);\r
+\r
+ ncvStat = h_img.copySolid(d_img, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ if (sizeof(T_in) == sizeof(Ncv8u))\r
+ {\r
+ ncvStat = nppiStIntegral_8u32u_C1R((Ncv8u *)d_img.ptr(), d_img.pitch(),\r
+ (Ncv32u *)d_imgII.ptr(), d_imgII.pitch(),\r
+ NcvSize32u(this->width, this->height),\r
+ d_tmpBuf.ptr(), bufSize, this->devProp);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ }\r
+ else if (sizeof(T_in) == sizeof(Ncv32f))\r
+ {\r
+ ncvStat = nppiStIntegral_32f32f_C1R((Ncv32f *)d_img.ptr(), d_img.pitch(),\r
+ (Ncv32f *)d_imgII.ptr(), d_imgII.pitch(),\r
+ NcvSize32u(this->width, this->height),\r
+ d_tmpBuf.ptr(), bufSize, this->devProp);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect integral image test instance", false);\r
+ }\r
+\r
+ ncvStat = d_imgII.copySolid(h_imgII_d, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ if (sizeof(T_in) == sizeof(Ncv8u))\r
+ {\r
+ ncvStat = nppiStIntegral_8u32u_C1R_host((Ncv8u *)h_img.ptr(), h_img.pitch(),\r
+ (Ncv32u *)h_imgII.ptr(), h_imgII.pitch(),\r
+ NcvSize32u(this->width, this->height));\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ }\r
+ else if (sizeof(T_in) == sizeof(Ncv32f))\r
+ {\r
+ ncvStat = nppiStIntegral_32f32f_C1R_host((Ncv32f *)h_img.ptr(), h_img.pitch(),\r
+ (Ncv32f *)h_imgII.ptr(), h_imgII.pitch(),\r
+ NcvSize32u(this->width, this->height));\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect integral image test instance", false);\r
+ }\r
+\r
+ NCV_SKIP_COND_END\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ for (Ncv32u i=0; bLoopVirgin && i < h_img.height() + 1; i++)\r
+ {\r
+ for (Ncv32u j=0; bLoopVirgin && j < h_img.width() + 1; j++)\r
+ {\r
+ if (sizeof(T_in) == sizeof(Ncv8u))\r
+ {\r
+ if (h_imgII.ptr()[h_imgII.stride()*i+j] != h_imgII_d.ptr()[h_imgII_d.stride()*i+j])\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ else if (sizeof(T_in) == sizeof(Ncv32f))\r
+ {\r
+ if (fabsf((float)h_imgII.ptr()[h_imgII.stride()*i+j] - (float)h_imgII_d.ptr()[h_imgII_d.stride()*i+j]) > 0.01f)\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect integral image test instance", false);\r
+ }\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+template <class T_in, class T_out>\r
+bool TestIntegralImage<T_in, T_out>::deinit()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+template class TestIntegralImage<Ncv8u, Ncv32u>;\r
+template class TestIntegralImage<Ncv32f, Ncv32f>;\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testintegralimage_h_\r
+#define _testintegralimage_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+template <class T_in, class T_out>\r
+class TestIntegralImage : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestIntegralImage(std::string testName, NCVTestSourceProvider<T_in> &src,\r
+ Ncv32u width, Ncv32u height);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+ TestIntegralImage(const TestIntegralImage&);\r
+ TestIntegralImage& operator=(const TestIntegralImage&); \r
+\r
+ NCVTestSourceProvider<T_in> &src;\r
+ Ncv32u width;\r
+ Ncv32u height;\r
+};\r
+\r
+#endif // _testintegralimage_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include "TestIntegralImageSquared.h"\r
+\r
+\r
+TestIntegralImageSquared::TestIntegralImageSquared(std::string testName, NCVTestSourceProvider<Ncv8u> &src,\r
+ Ncv32u width, Ncv32u height)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ width(width),\r
+ height(height)\r
+{\r
+}\r
+\r
+\r
+bool TestIntegralImageSquared::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "width=" << width << std::endl;\r
+ strOut << "height=" << height << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+bool TestIntegralImageSquared::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+bool TestIntegralImageSquared::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ Ncv32u widthSII = this->width + 1;\r
+ Ncv32u heightSII = this->height + 1;\r
+\r
+ NCVMatrixAlloc<Ncv8u> d_img(*this->allocatorGPU.get(), this->width, this->height);\r
+ ncvAssertReturn(d_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv8u> h_img(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv64u> d_imgSII(*this->allocatorGPU.get(), widthSII, heightSII);\r
+ ncvAssertReturn(d_imgSII.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv64u> h_imgSII(*this->allocatorCPU.get(), widthSII, heightSII);\r
+ ncvAssertReturn(h_imgSII.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv64u> h_imgSII_d(*this->allocatorCPU.get(), widthSII, heightSII);\r
+ ncvAssertReturn(h_imgSII_d.isMemAllocated(), false);\r
+\r
+ Ncv32u bufSize;\r
+ ncvStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(this->width, this->height), &bufSize, this->devProp);\r
+ ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);\r
+ NCVVectorAlloc<Ncv8u> d_tmpBuf(*this->allocatorGPU.get(), bufSize);\r
+ ncvAssertReturn(d_tmpBuf.isMemAllocated(), false);\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+ NCV_SKIP_COND_BEGIN\r
+\r
+ ncvAssertReturn(this->src.fill(h_img), false);\r
+\r
+ ncvStat = h_img.copySolid(d_img, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = nppiStSqrIntegral_8u64u_C1R(d_img.ptr(), d_img.pitch(),\r
+ d_imgSII.ptr(), d_imgSII.pitch(),\r
+ NcvSize32u(this->width, this->height),\r
+ d_tmpBuf.ptr(), bufSize, this->devProp);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = d_imgSII.copySolid(h_imgSII_d, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = nppiStSqrIntegral_8u64u_C1R_host(h_img.ptr(), h_img.pitch(),\r
+ h_imgSII.ptr(), h_imgSII.pitch(),\r
+ NcvSize32u(this->width, this->height));\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ NCV_SKIP_COND_END\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ for (Ncv32u i=0; bLoopVirgin && i < h_img.height() + 1; i++)\r
+ {\r
+ for (Ncv32u j=0; bLoopVirgin && j < h_img.width() + 1; j++)\r
+ {\r
+ if (h_imgSII.ptr()[h_imgSII.stride()*i+j] != h_imgSII_d.ptr()[h_imgSII_d.stride()*i+j])\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+bool TestIntegralImageSquared::deinit()\r
+{\r
+ return true;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testintegralimagesquared_h_\r
+#define _testintegralimagesquared_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+class TestIntegralImageSquared : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestIntegralImageSquared(std::string testName, NCVTestSourceProvider<Ncv8u> &src,\r
+ Ncv32u width, Ncv32u height);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+ TestIntegralImageSquared(const TestIntegralImageSquared&);\r
+ TestIntegralImageSquared& operator=(const TestIntegralImageSquared&); \r
+\r
+ NCVTestSourceProvider<Ncv8u> &src;\r
+ Ncv32u width;\r
+ Ncv32u height;\r
+};\r
+\r
+#endif // _testintegralimagesquared_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include <math.h>\r
+\r
+#include "TestRectStdDev.h"\r
+\r
+\r
+TestRectStdDev::TestRectStdDev(std::string testName, NCVTestSourceProvider<Ncv8u> &src,\r
+ Ncv32u width, Ncv32u height, NcvRect32u rect, Ncv32f scaleFactor,\r
+ NcvBool bTextureCache)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ width(width),\r
+ height(height),\r
+ rect(rect),\r
+ scaleFactor(scaleFactor),\r
+ bTextureCache(bTextureCache)\r
+{\r
+}\r
+\r
+\r
+bool TestRectStdDev::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "width=" << width << std::endl;\r
+ strOut << "height=" << height << std::endl;\r
+ strOut << "rect=[" << rect.x << ", " << rect.y << ", " << rect.width << ", " << rect.height << "]\n";\r
+ strOut << "scaleFactor=" << scaleFactor << std::endl;\r
+ strOut << "bTextureCache=" << bTextureCache << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+bool TestRectStdDev::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+bool TestRectStdDev::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ Ncv32s _normWidth = (Ncv32s)this->width - this->rect.x - this->rect.width + 1;\r
+ Ncv32s _normHeight = (Ncv32s)this->height - this->rect.y - this->rect.height + 1;\r
+ if (_normWidth <= 0 || _normHeight <= 0)\r
+ {\r
+ return true;\r
+ }\r
+ Ncv32u normWidth = (Ncv32u)_normWidth;\r
+ Ncv32u normHeight = (Ncv32u)_normHeight;\r
+ NcvSize32u szNormRoi(normWidth, normHeight);\r
+\r
+ Ncv32u widthII = this->width + 1;\r
+ Ncv32u heightII = this->height + 1;\r
+ Ncv32u widthSII = this->width + 1;\r
+ Ncv32u heightSII = this->height + 1;\r
+\r
+ NCVMatrixAlloc<Ncv8u> d_img(*this->allocatorGPU.get(), this->width, this->height);\r
+ ncvAssertReturn(d_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv8u> h_img(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_img.isMemAllocated(), false);\r
+\r
+ NCVMatrixAlloc<Ncv32u> d_imgII(*this->allocatorGPU.get(), widthII, heightII);\r
+ ncvAssertReturn(d_imgII.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv32u> h_imgII(*this->allocatorCPU.get(), widthII, heightII);\r
+ ncvAssertReturn(h_imgII.isMemAllocated(), false);\r
+\r
+ NCVMatrixAlloc<Ncv64u> d_imgSII(*this->allocatorGPU.get(), widthSII, heightSII);\r
+ ncvAssertReturn(d_imgSII.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv64u> h_imgSII(*this->allocatorCPU.get(), widthSII, heightSII);\r
+ ncvAssertReturn(h_imgSII.isMemAllocated(), false);\r
+\r
+ NCVMatrixAlloc<Ncv32f> d_norm(*this->allocatorGPU.get(), normWidth, normHeight);\r
+ ncvAssertReturn(d_norm.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv32f> h_norm(*this->allocatorCPU.get(), normWidth, normHeight);\r
+ ncvAssertReturn(h_norm.isMemAllocated(), false);\r
+ NCVMatrixAlloc<Ncv32f> h_norm_d(*this->allocatorCPU.get(), normWidth, normHeight);\r
+ ncvAssertReturn(h_norm_d.isMemAllocated(), false);\r
+\r
+ Ncv32u bufSizeII, bufSizeSII;\r
+ ncvStat = nppiStIntegralGetSize_8u32u(NcvSize32u(this->width, this->height), &bufSizeII, this->devProp);\r
+ ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);\r
+ ncvStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(this->width, this->height), &bufSizeSII, this->devProp);\r
+ ncvAssertReturn(NPPST_SUCCESS == ncvStat, false);\r
+ Ncv32u bufSize = bufSizeII > bufSizeSII ? bufSizeII : bufSizeSII;\r
+ NCVVectorAlloc<Ncv8u> d_tmpBuf(*this->allocatorGPU.get(), bufSize);\r
+ ncvAssertReturn(d_tmpBuf.isMemAllocated(), false);\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+ NCV_SKIP_COND_BEGIN\r
+ ncvAssertReturn(this->src.fill(h_img), false);\r
+\r
+ ncvStat = h_img.copySolid(d_img, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = nppiStIntegral_8u32u_C1R(d_img.ptr(), d_img.pitch(),\r
+ d_imgII.ptr(), d_imgII.pitch(),\r
+ NcvSize32u(this->width, this->height),\r
+ d_tmpBuf.ptr(), bufSize, this->devProp);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = nppiStSqrIntegral_8u64u_C1R(d_img.ptr(), d_img.pitch(),\r
+ d_imgSII.ptr(), d_imgSII.pitch(),\r
+ NcvSize32u(this->width, this->height),\r
+ d_tmpBuf.ptr(), bufSize, this->devProp);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = nppiStRectStdDev_32f_C1R(d_imgII.ptr(), d_imgII.pitch(),\r
+ d_imgSII.ptr(), d_imgSII.pitch(),\r
+ d_norm.ptr(), d_norm.pitch(),\r
+ szNormRoi, this->rect,\r
+ this->scaleFactor,\r
+ this->bTextureCache);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = d_norm.copySolid(h_norm_d, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = nppiStIntegral_8u32u_C1R_host(h_img.ptr(), h_img.pitch(),\r
+ h_imgII.ptr(), h_imgII.pitch(),\r
+ NcvSize32u(this->width, this->height));\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = nppiStSqrIntegral_8u64u_C1R_host(h_img.ptr(), h_img.pitch(),\r
+ h_imgSII.ptr(), h_imgSII.pitch(),\r
+ NcvSize32u(this->width, this->height));\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ ncvStat = nppiStRectStdDev_32f_C1R_host(h_imgII.ptr(), h_imgII.pitch(),\r
+ h_imgSII.ptr(), h_imgSII.pitch(),\r
+ h_norm.ptr(), h_norm.pitch(),\r
+ szNormRoi, this->rect,\r
+ this->scaleFactor);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ const Ncv64f relEPS = 0.005;\r
+ for (Ncv32u i=0; bLoopVirgin && i < h_norm.height(); i++)\r
+ {\r
+ for (Ncv32u j=0; bLoopVirgin && j < h_norm.width(); j++)\r
+ {\r
+ Ncv64f absErr = fabs(h_norm.ptr()[h_norm.stride()*i+j] - h_norm_d.ptr()[h_norm_d.stride()*i+j]);\r
+ Ncv64f relErr = absErr / h_norm.ptr()[h_norm.stride()*i+j];\r
+\r
+ if (relErr > relEPS)\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+bool TestRectStdDev::deinit()\r
+{\r
+ return true;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testrectstddev_h_\r
+#define _testrectstddev_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+class TestRectStdDev : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestRectStdDev(std::string testName, NCVTestSourceProvider<Ncv8u> &src,\r
+ Ncv32u width, Ncv32u height, NcvRect32u rect, Ncv32f scaleFactor,\r
+ NcvBool bTextureCache);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+ TestRectStdDev(const TestRectStdDev&);\r
+ TestRectStdDev& operator=(const TestRectStdDev&); \r
+\r
+ NCVTestSourceProvider<Ncv8u> &src;\r
+ NcvRect32u rect;\r
+ Ncv32u width;\r
+ Ncv32u height;\r
+ Ncv32f scaleFactor;\r
+\r
+ NcvBool bTextureCache;\r
+};\r
+\r
+#endif // _testrectstddev_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include <math.h>\r
+\r
+#include "TestResize.h"\r
+\r
+\r
+template <class T>\r
+TestResize<T>::TestResize(std::string testName, NCVTestSourceProvider<T> &src,\r
+ Ncv32u width, Ncv32u height, Ncv32u scaleFactor, NcvBool bTextureCache)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ width(width),\r
+ height(height),\r
+ scaleFactor(scaleFactor),\r
+ bTextureCache(bTextureCache)\r
+{\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestResize<T>::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "sizeof(T)=" << sizeof(T) << std::endl;\r
+ strOut << "width=" << width << std::endl;\r
+ strOut << "scaleFactor=" << scaleFactor << std::endl;\r
+ strOut << "bTextureCache=" << bTextureCache << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestResize<T>::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestResize<T>::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ Ncv32s smallWidth = this->width / this->scaleFactor;\r
+ Ncv32s smallHeight = this->height / this->scaleFactor;\r
+ if (smallWidth == 0 || smallHeight == 0)\r
+ {\r
+ return true;\r
+ }\r
+\r
+ NcvSize32u srcSize(this->width, this->height);\r
+\r
+ NCVMatrixAlloc<T> d_img(*this->allocatorGPU.get(), this->width, this->height);\r
+ ncvAssertReturn(d_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T> h_img(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_img.isMemAllocated(), false);\r
+\r
+ NCVMatrixAlloc<T> d_small(*this->allocatorGPU.get(), smallWidth, smallHeight);\r
+ ncvAssertReturn(d_small.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T> h_small(*this->allocatorCPU.get(), smallWidth, smallHeight);\r
+ ncvAssertReturn(h_small.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T> h_small_d(*this->allocatorCPU.get(), smallWidth, smallHeight);\r
+ ncvAssertReturn(h_small_d.isMemAllocated(), false);\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+ NCV_SKIP_COND_BEGIN\r
+ ncvAssertReturn(this->src.fill(h_img), false);\r
+ NCV_SKIP_COND_END\r
+\r
+ ncvStat = h_img.copySolid(d_img, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ NCV_SKIP_COND_BEGIN\r
+ if (sizeof(T) == sizeof(Ncv32u))\r
+ {\r
+ ncvStat = nppiStDownsampleNearest_32u_C1R((Ncv32u *)d_img.ptr(), d_img.pitch(),\r
+ (Ncv32u *)d_small.ptr(), d_small.pitch(),\r
+ srcSize, this->scaleFactor,\r
+ this->bTextureCache);\r
+ }\r
+ else if (sizeof(T) == sizeof(Ncv64u))\r
+ {\r
+ ncvStat = nppiStDownsampleNearest_64u_C1R((Ncv64u *)d_img.ptr(), d_img.pitch(),\r
+ (Ncv64u *)d_small.ptr(), d_small.pitch(),\r
+ srcSize, this->scaleFactor,\r
+ this->bTextureCache);\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect downsample test instance", false);\r
+ }\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+ ncvStat = d_small.copySolid(h_small_d, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ if (sizeof(T) == sizeof(Ncv32u))\r
+ {\r
+ ncvStat = nppiStDownsampleNearest_32u_C1R_host((Ncv32u *)h_img.ptr(), h_img.pitch(),\r
+ (Ncv32u *)h_small.ptr(), h_small.pitch(),\r
+ srcSize, this->scaleFactor);\r
+ }\r
+ else if (sizeof(T) == sizeof(Ncv64u))\r
+ {\r
+ ncvStat = nppiStDownsampleNearest_64u_C1R_host((Ncv64u *)h_img.ptr(), h_img.pitch(),\r
+ (Ncv64u *)h_small.ptr(), h_small.pitch(),\r
+ srcSize, this->scaleFactor);\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect downsample test instance", false);\r
+ }\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ //const Ncv64f relEPS = 0.005;\r
+ for (Ncv32u i=0; bLoopVirgin && i < h_small.height(); i++)\r
+ {\r
+ for (Ncv32u j=0; bLoopVirgin && j < h_small.width(); j++)\r
+ {\r
+ if (h_small.ptr()[h_small.stride()*i+j] != h_small_d.ptr()[h_small_d.stride()*i+j])\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestResize<T>::deinit()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+template class TestResize<Ncv32u>;\r
+template class TestResize<Ncv64u>;\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testresize_h_\r
+#define _testresize_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+template <class T>\r
+class TestResize : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestResize(std::string testName, NCVTestSourceProvider<T> &src,\r
+ Ncv32u width, Ncv32u height, Ncv32u scaleFactor, NcvBool bTextureCache);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+ TestResize(const TestResize&);\r
+ TestResize& operator=(const TestResize&); \r
+\r
+ NCVTestSourceProvider<T> &src;\r
+ Ncv32u width;\r
+ Ncv32u height;\r
+ Ncv32u scaleFactor;\r
+\r
+ NcvBool bTextureCache;\r
+};\r
+\r
+#endif // _testresize_h_\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+\r
+#include <math.h>\r
+\r
+#include "TestTranspose.h"\r
+\r
+\r
+template <class T>\r
+TestTranspose<T>::TestTranspose(std::string testName, NCVTestSourceProvider<T> &src,\r
+ Ncv32u width, Ncv32u height)\r
+ :\r
+ NCVTestProvider(testName),\r
+ src(src),\r
+ width(width),\r
+ height(height)\r
+{\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestTranspose<T>::toString(std::ofstream &strOut)\r
+{\r
+ strOut << "sizeof(T)=" << sizeof(T) << std::endl;\r
+ strOut << "width=" << width << std::endl;\r
+ return true;\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestTranspose<T>::init()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestTranspose<T>::process()\r
+{\r
+ NCVStatus ncvStat;\r
+ bool rcode = false;\r
+\r
+ NcvSize32u srcSize(this->width, this->height);\r
+\r
+ NCVMatrixAlloc<T> d_img(*this->allocatorGPU.get(), this->width, this->height);\r
+ ncvAssertReturn(d_img.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T> h_img(*this->allocatorCPU.get(), this->width, this->height);\r
+ ncvAssertReturn(h_img.isMemAllocated(), false);\r
+\r
+ NCVMatrixAlloc<T> d_dst(*this->allocatorGPU.get(), this->height, this->width);\r
+ ncvAssertReturn(d_dst.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T> h_dst(*this->allocatorCPU.get(), this->height, this->width);\r
+ ncvAssertReturn(h_dst.isMemAllocated(), false);\r
+ NCVMatrixAlloc<T> h_dst_d(*this->allocatorCPU.get(), this->height, this->width);\r
+ ncvAssertReturn(h_dst_d.isMemAllocated(), false);\r
+\r
+ NCV_SET_SKIP_COND(this->allocatorGPU.get()->isCounting());\r
+ NCV_SKIP_COND_BEGIN\r
+ ncvAssertReturn(this->src.fill(h_img), false);\r
+ NCV_SKIP_COND_END\r
+\r
+ ncvStat = h_img.copySolid(d_img, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ NCV_SKIP_COND_BEGIN\r
+ if (sizeof(T) == sizeof(Ncv32u))\r
+ {\r
+ ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)d_img.ptr(), d_img.pitch(),\r
+ (Ncv32u *)d_dst.ptr(), d_dst.pitch(),\r
+ NcvSize32u(this->width, this->height));\r
+ }\r
+ else if (sizeof(T) == sizeof(Ncv64u))\r
+ {\r
+ ncvStat = nppiStTranspose_64u_C1R((Ncv64u *)d_img.ptr(), d_img.pitch(),\r
+ (Ncv64u *)d_dst.ptr(), d_dst.pitch(),\r
+ NcvSize32u(this->width, this->height));\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect transpose test instance", false);\r
+ }\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+ ncvStat = d_dst.copySolid(h_dst_d, 0);\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ if (sizeof(T) == sizeof(Ncv32u))\r
+ {\r
+ ncvStat = nppiStTranspose_32u_C1R_host((Ncv32u *)h_img.ptr(), h_img.pitch(),\r
+ (Ncv32u *)h_dst.ptr(), h_dst.pitch(),\r
+ NcvSize32u(this->width, this->height));\r
+ }\r
+ else if (sizeof(T) == sizeof(Ncv64u))\r
+ {\r
+ ncvStat = nppiStTranspose_64u_C1R_host((Ncv64u *)h_img.ptr(), h_img.pitch(),\r
+ (Ncv64u *)h_dst.ptr(), h_dst.pitch(),\r
+ NcvSize32u(this->width, this->height));\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintReturn(false, "Incorrect downsample test instance", false);\r
+ }\r
+ ncvAssertReturn(ncvStat == NPPST_SUCCESS, false);\r
+ NCV_SKIP_COND_END\r
+\r
+ //bit-to-bit check\r
+ bool bLoopVirgin = true;\r
+\r
+ NCV_SKIP_COND_BEGIN\r
+ //const Ncv64f relEPS = 0.005;\r
+ for (Ncv32u i=0; bLoopVirgin && i < this->width; i++)\r
+ {\r
+ for (Ncv32u j=0; bLoopVirgin && j < this->height; j++)\r
+ {\r
+ if (h_dst.ptr()[h_dst.stride()*i+j] != h_dst_d.ptr()[h_dst_d.stride()*i+j])\r
+ {\r
+ bLoopVirgin = false;\r
+ }\r
+ }\r
+ }\r
+ NCV_SKIP_COND_END\r
+\r
+ if (bLoopVirgin)\r
+ {\r
+ rcode = true;\r
+ }\r
+\r
+ return rcode;\r
+}\r
+\r
+\r
+template <class T>\r
+bool TestTranspose<T>::deinit()\r
+{\r
+ return true;\r
+}\r
+\r
+\r
+template class TestTranspose<Ncv32u>;\r
+template class TestTranspose<Ncv64u>;\r
--- /dev/null
+/*\r
+ * Copyright 1993-2010 NVIDIA Corporation. All rights reserved.\r
+ *\r
+ * NVIDIA Corporation and its licensors retain all intellectual \r
+ * property and proprietary rights in and to this software and \r
+ * related documentation and any modifications thereto. \r
+ * Any use, reproduction, disclosure, or distribution of this \r
+ * software and related documentation without an express license \r
+ * agreement from NVIDIA Corporation is strictly prohibited.\r
+ */\r
+#ifndef _testtranspose_h_\r
+#define _testtranspose_h_\r
+\r
+#include "NCVTest.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+template <class T>\r
+class TestTranspose : public NCVTestProvider\r
+{\r
+public:\r
+\r
+ TestTranspose(std::string testName, NCVTestSourceProvider<T> &src,\r
+ Ncv32u width, Ncv32u height);\r
+\r
+ virtual bool init();\r
+ virtual bool process();\r
+ virtual bool deinit();\r
+ virtual bool toString(std::ofstream &strOut);\r
+\r
+private:\r
+\r
+ TestTranspose(const TestTranspose&);\r
+ TestTranspose& operator=(const TestTranspose&); \r
+\r
+ NCVTestSourceProvider<T> &src;\r
+ Ncv32u width;\r
+ Ncv32u height;\r
+};\r
+\r
+#endif // _testtranspose_h_\r
--- /dev/null
+#pragma warning (disable : 4408 4201 4100)\r
+ \r
+#include <cstdio>\r
+\r
+#include "NCV.hpp"\r
+#include "NCVHaarObjectDetection.hpp"\r
+\r
+#include "TestIntegralImage.h"\r
+#include "TestIntegralImageSquared.h"\r
+#include "TestRectStdDev.h"\r
+#include "TestResize.h"\r
+#include "TestCompact.h"\r
+#include "TestTranspose.h"\r
+\r
+#include "TestDrawRects.h"\r
+#include "TestHypothesesGrow.h"\r
+#include "TestHypothesesFilter.h"\r
+#include "TestHaarCascadeLoader.h"\r
+#include "TestHaarCascadeApplication.h"\r
+\r
+#include "NCVAutoTestLister.hpp"\r
+#include "NCVTestSourceProvider.hpp"\r
+\r
+\r
+template <class T_in, class T_out>\r
+void generateIntegralTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T_in> &src,\r
+ Ncv32u maxWidth, Ncv32u maxHeight)\r
+{\r
+ for (Ncv32f _i=1.0; _i<maxWidth; _i*=1.2f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ char testName[80];\r
+ sprintf(testName, "LinIntImgW%dH%d", i, 2);\r
+ testLister.add(new TestIntegralImage<T_in, T_out>(testName, src, i, 2));\r
+ }\r
+ for (Ncv32f _i=1.0; _i<maxHeight; _i*=1.2f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ char testName[80];\r
+ sprintf(testName, "LinIntImgW%dH%d", 2, i);\r
+ testLister.add(new TestIntegralImage<T_in, T_out>(testName, src, 2, i));\r
+ }\r
+\r
+ //test VGA\r
+ testLister.add(new TestIntegralImage<T_in, T_out>("LinIntImg_VGA", src, 640, 480));\r
+\r
+ //TODO: add tests of various resolutions up to 4096x4096\r
+}\r
+\r
+\r
+void generateSquaredIntegralTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv8u> &src,\r
+ Ncv32u maxWidth, Ncv32u maxHeight)\r
+{\r
+ for (Ncv32f _i=1.0; _i<maxWidth; _i*=1.2f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ char testName[80];\r
+ sprintf(testName, "SqIntImgW%dH%d", i, 32);\r
+ testLister.add(new TestIntegralImageSquared(testName, src, i, 32));\r
+ }\r
+ for (Ncv32f _i=1.0; _i<maxHeight; _i*=1.2f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ char testName[80];\r
+ sprintf(testName, "SqIntImgW%dH%d", 32, i);\r
+ testLister.add(new TestIntegralImageSquared(testName, src, 32, i));\r
+ }\r
+\r
+ //test VGA\r
+ testLister.add(new TestIntegralImageSquared("SqLinIntImg_VGA", src, 640, 480));\r
+\r
+ //TODO: add tests of various resolutions up to 4096x4096\r
+}\r
+\r
+\r
+void generateRectStdDevTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv8u> &src,\r
+ Ncv32u maxWidth, Ncv32u maxHeight)\r
+{\r
+ NcvRect32u rect(1,1,18,18);\r
+\r
+ for (Ncv32f _i=32; _i<maxHeight/2 && _i < maxWidth/2; _i*=1.2f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ char testName[80];\r
+ sprintf(testName, "RectStdDevW%dH%d", i*2, i);\r
+ testLister.add(new TestRectStdDev(testName, src, i*2, i, rect, 1, true));\r
+ testLister.add(new TestRectStdDev(testName, src, i*2, i, rect, 1.5, false));\r
+ testLister.add(new TestRectStdDev(testName, src, i-1, i*2-1, rect, 1, false));\r
+ testLister.add(new TestRectStdDev(testName, src, i-1, i*2-1, rect, 2.5, true));\r
+ }\r
+\r
+ //test VGA\r
+ testLister.add(new TestRectStdDev("RectStdDev_VGA", src, 640, 480, rect, 1, true));\r
+\r
+ //TODO: add tests of various resolutions up to 4096x4096\r
+}\r
+\r
+\r
+template <class T>\r
+void generateResizeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T> &src)\r
+{\r
+ //test VGA\r
+ for (Ncv32u i=1; i<480; i+=3)\r
+ {\r
+ char testName[80];\r
+ sprintf(testName, "TestResize_VGA_s%d", i);\r
+ testLister.add(new TestResize<T>(testName, src, 640, 480, i, true));\r
+ testLister.add(new TestResize<T>(testName, src, 640, 480, i, false));\r
+ }\r
+\r
+ //test HD\r
+ for (Ncv32u i=1; i<1080; i+=5)\r
+ {\r
+ char testName[80];\r
+ sprintf(testName, "TestResize_1080_s%d", i);\r
+ testLister.add(new TestResize<T>(testName, src, 1920, 1080, i, true));\r
+ testLister.add(new TestResize<T>(testName, src, 1920, 1080, i, false));\r
+ }\r
+\r
+ //TODO: add tests of various resolutions up to 4096x4096\r
+}\r
+\r
+\r
+void generateNPPSTVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)\r
+{\r
+ //compaction\r
+ for (Ncv32f _i=256.0; _i<maxLength; _i*=1.1f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ char testName[80];\r
+ sprintf(testName, "Compaction%d", i);\r
+ testLister.add(new TestCompact(testName, src, i, 0xFFFFFFFF, 30));\r
+ }\r
+ for (Ncv32u i=1; i<260; i++)\r
+ {\r
+ char testName[80];\r
+ sprintf(testName, "Compaction%d", i);\r
+ testLister.add(new TestCompact(testName, src, i, 0xC001C0DE, 70));\r
+ testLister.add(new TestCompact(testName, src, i, 0xC001C0DE, 0));\r
+ testLister.add(new TestCompact(testName, src, i, 0xC001C0DE, 100));\r
+ }\r
+ for (Ncv32u i=256*256-256; i<256*256+257; i++)\r
+ {\r
+ char testName[80];\r
+ sprintf(testName, "Compaction%d", i);\r
+ testLister.add(new TestCompact(testName, src, i, 0xFFFFFFFF, 40));\r
+ }\r
+ for (Ncv32u i=256*256*256-10; i<256*256*256+10; i++)\r
+ {\r
+ char testName[80];\r
+ sprintf(testName, "Compaction%d", i);\r
+ testLister.add(new TestCompact(testName, src, i, 0x00000000, 2));\r
+ }\r
+}\r
+\r
+\r
+template <class T>\r
+void generateTransposeTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T> &src)\r
+{\r
+ for (int i=2; i<64; i+=4)\r
+ {\r
+ for (int j=2; j<64; j+=4)\r
+ {\r
+ char testName[80];\r
+ sprintf(testName, "TestTranspose_%dx%d", i, j);\r
+ testLister.add(new TestTranspose<T>(testName, src, i, j));\r
+ }\r
+ }\r
+\r
+ for (int i=1; i<128; i+=1)\r
+ {\r
+ for (int j=1; j<2; j+=1)\r
+ {\r
+ char testName[80];\r
+ sprintf(testName, "TestTranspose_%dx%d", i, j);\r
+ testLister.add(new TestTranspose<T>(testName, src, i, j));\r
+ }\r
+ }\r
+\r
+ testLister.add(new TestTranspose<T>("TestTranspose_VGA", src, 640, 480));\r
+ testLister.add(new TestTranspose<T>("TestTranspose_HD1080", src, 1920, 1080));\r
+}\r
+\r
+\r
+template <class T>\r
+void generateDrawRectsTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<T> &src, NCVTestSourceProvider<Ncv32u> &src32u,\r
+ Ncv32u maxWidth, Ncv32u maxHeight)\r
+{\r
+ for (Ncv32f _i=16.0; _i<maxWidth; _i*=1.1f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ Ncv32u j = maxHeight * i / maxWidth;\r
+ if (!j) continue;\r
+ char testName[80];\r
+ sprintf(testName, "DrawRectsW%dH%d", i, j);\r
+\r
+ if (sizeof(T) == sizeof(Ncv32u))\r
+ {\r
+ testLister.add(new TestDrawRects<T>(testName, src, src32u, i, j, i*j/1000+1, (T)0xFFFFFFFF));\r
+ }\r
+ else if (sizeof(T) == sizeof(Ncv8u))\r
+ {\r
+ testLister.add(new TestDrawRects<T>(testName, src, src32u, i, j, i*j/1000+1, (T)0xFF));\r
+ }\r
+ else\r
+ {\r
+ ncvAssertPrintCheck(false, "Attempted to instantiate non-existing DrawRects test suite");\r
+ }\r
+ }\r
+\r
+ //test VGA\r
+ testLister.add(new TestDrawRects<T>("DrawRects_VGA", src, src32u, 640, 480, 640*480/1000, (T)0xFF));\r
+\r
+ //TODO: add tests of various resolutions up to 4096x4096\r
+}\r
+\r
+\r
+void generateVectorTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)\r
+{\r
+ //growth\r
+ for (Ncv32f _i=10.0; _i<maxLength; _i*=1.1f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ char testName[80];\r
+ sprintf(testName, "VectorGrow%d", i);\r
+ testLister.add(new TestHypothesesGrow(testName, src, 20, 20, 2.2f, i, i/2, i, i/4));\r
+ testLister.add(new TestHypothesesGrow(testName, src, 10, 42, 1.2f, i, i, i, 0));\r
+ }\r
+ testLister.add(new TestHypothesesGrow("VectorGrow01b", src, 10, 42, 1.2f, 10, 0, 10, 1));\r
+ testLister.add(new TestHypothesesGrow("VectorGrow11b", src, 10, 42, 1.2f, 10, 1, 10, 1));\r
+ testLister.add(new TestHypothesesGrow("VectorGrow10b", src, 10, 42, 1.2f, 10, 1, 10, 0));\r
+ testLister.add(new TestHypothesesGrow("VectorGrow00b", src, 10, 42, 1.2f, 10, 0, 10, 0));\r
+}\r
+\r
+\r
+void generateHypothesesFiltrationTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv32u> &src, Ncv32u maxLength)\r
+{\r
+ for (Ncv32f _i=1.0; _i<maxLength; _i*=1.1f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ char testName[80];\r
+ sprintf(testName, "HypFilter%d", i);\r
+ testLister.add(new TestHypothesesFilter(testName, src, i, 3, 0.2f));\r
+ testLister.add(new TestHypothesesFilter(testName, src, i, 0, 0.2f));\r
+ testLister.add(new TestHypothesesFilter(testName, src, i, 1, 0.1f));\r
+ }\r
+}\r
+\r
+\r
+void generateHaarLoaderTests(NCVAutoTestLister &testLister)\r
+{\r
+ testLister.add(new TestHaarCascadeLoader("haarcascade_eye.xml", "haarcascade_eye.xml"));\r
+ testLister.add(new TestHaarCascadeLoader("haarcascade_frontalface_alt.xml", "haarcascade_frontalface_alt.xml"));\r
+ testLister.add(new TestHaarCascadeLoader("haarcascade_frontalface_alt2.xml", "haarcascade_frontalface_alt2.xml"));\r
+ testLister.add(new TestHaarCascadeLoader("haarcascade_frontalface_alt_tree.xml", "haarcascade_frontalface_alt_tree.xml"));\r
+ testLister.add(new TestHaarCascadeLoader("haarcascade_eye_tree_eyeglasses.xml", "haarcascade_eye_tree_eyeglasses.xml"));\r
+}\r
+\r
+\r
+void generateHaarApplicationTests(NCVAutoTestLister &testLister, NCVTestSourceProvider<Ncv8u> &src,\r
+ Ncv32u maxWidth, Ncv32u maxHeight)\r
+{\r
+ for (Ncv32u i=20; i<512; i+=11)\r
+ {\r
+ for (Ncv32u j=20; j<128; j+=5)\r
+ {\r
+ char testName[80];\r
+ sprintf(testName, "HaarAppl%d_%d", i, j);\r
+ testLister.add(new TestHaarCascadeApplication(testName, src, "haarcascade_frontalface_alt.xml", j, i));\r
+ }\r
+ }\r
+ for (Ncv32f _i=20.0; _i<maxWidth; _i*=1.1f)\r
+ {\r
+ Ncv32u i = (Ncv32u)_i;\r
+ char testName[80];\r
+ sprintf(testName, "HaarAppl%d", i);\r
+ testLister.add(new TestHaarCascadeApplication(testName, src, "haarcascade_frontalface_alt.xml", i, i));\r
+ }\r
+}\r
+\r
+\r
+static void devNullOutput(const char *msg)\r
+{\r
+}\r
+\r
+\r
+bool main_nvidia()\r
+{\r
+ printf("Testing NVIDIA Computer Vision SDK\n");\r
+ printf("==================================\n");\r
+\r
+ ncvSetDebugOutputHandler(devNullOutput);\r
+\r
+ NCVAutoTestLister testListerII("NPPST Integral Image" );//,,true, false);\r
+ NCVAutoTestLister testListerSII("NPPST Squared Integral Image" );//,,true, false);\r
+ NCVAutoTestLister testListerRStdDev("NPPST RectStdDev" );//,,true, false);\r
+ NCVAutoTestLister testListerResize("NPPST Resize" );//,,true, false);\r
+ NCVAutoTestLister testListerNPPSTVectorOperations("NPPST Vector Operations" );//,,true, false);\r
+ NCVAutoTestLister testListerTranspose("NPPST Transpose" );//,,true, false);\r
+\r
+ NCVAutoTestLister testListerVectorOperations("Vector Operations" );//,,true, false);\r
+ NCVAutoTestLister testListerHaarLoader("Haar Cascade Loader" );//,,true, false);\r
+ NCVAutoTestLister testListerHaarAppl("Haar Cascade Application" );//,,true, false);\r
+ NCVAutoTestLister testListerHypFiltration("Hypotheses Filtration" );//,,true, false);\r
+ NCVAutoTestLister testListerVisualize("Visualization" );//,,true, false);\r
+\r
+ printf("Initializing data source providers\n");\r
+ NCVTestSourceProvider<Ncv32u> testSrcRandom_32u(2010, 0, 0xFFFFFFFF, 4096, 4096);\r
+ NCVTestSourceProvider<Ncv8u> testSrcRandom_8u(2010, 0, 255, 4096, 4096);\r
+ NCVTestSourceProvider<Ncv64u> testSrcRandom_64u(2010, 0, 0xFFFFFFFFFFFFFFFF, 4096, 4096);\r
+ NCVTestSourceProvider<Ncv8u> testSrcFacesVGA_8u("../../data/group_1_640x480_VGA.pgm");\r
+ NCVTestSourceProvider<Ncv32f> testSrcRandom_32f(2010, -1.0f, 1.0f, 4096, 4096);\r
+\r
+ printf("Generating NPPST test suites\n");\r
+ generateIntegralTests<Ncv8u, Ncv32u>(testListerII, testSrcRandom_8u, 4096, 4096);\r
+ generateIntegralTests<Ncv32f, Ncv32f>(testListerII, testSrcRandom_32f, 4096, 4096);\r
+ generateSquaredIntegralTests(testListerSII, testSrcRandom_8u, 4096, 4096);\r
+ generateRectStdDevTests(testListerRStdDev, testSrcRandom_8u, 4096, 4096);\r
+ generateResizeTests(testListerResize, testSrcRandom_32u);\r
+ generateResizeTests(testListerResize, testSrcRandom_64u);\r
+ generateNPPSTVectorTests(testListerNPPSTVectorOperations, testSrcRandom_32u, 4096*4096);\r
+ generateTransposeTests(testListerTranspose, testSrcRandom_32u);\r
+ generateTransposeTests(testListerTranspose, testSrcRandom_64u);\r
+\r
+ printf("Generating NCV test suites\n");\r
+ generateDrawRectsTests(testListerVisualize, testSrcRandom_8u, testSrcRandom_32u, 4096, 4096);\r
+ generateDrawRectsTests(testListerVisualize, testSrcRandom_32u, testSrcRandom_32u, 4096, 4096);\r
+ generateVectorTests(testListerVectorOperations, testSrcRandom_32u, 4096*4096);\r
+ generateHypothesesFiltrationTests(testListerHypFiltration, testSrcRandom_32u, 1024);\r
+ generateHaarLoaderTests(testListerHaarLoader);\r
+ generateHaarApplicationTests(testListerHaarAppl, testSrcFacesVGA_8u, 1280, 720);\r
+\r
+ // Indicate if at least one test failed\r
+ bool passed = true;\r
+\r
+ // Invoke all tests\r
+ passed &= testListerII.invoke();\r
+ passed &= testListerSII.invoke();\r
+ passed &= testListerRStdDev.invoke();\r
+ passed &= testListerResize.invoke();\r
+ passed &= testListerNPPSTVectorOperations.invoke();\r
+ passed &= testListerTranspose.invoke();\r
+ passed &= testListerVisualize.invoke();\r
+ passed &= testListerVectorOperations.invoke();\r
+ passed &= testListerHypFiltration.invoke();\r
+ passed &= testListerHaarLoader.invoke();\r
+ passed &= testListerHaarAppl.invoke();\r
+\r
+ return passed;\r
+}\r
TEST(warpAffine, accuracy) { CV_GpuNppImageWarpAffineTest test; test.safe_run(); }\r
TEST(warpPerspective, accuracy) { CV_GpuNppImageWarpPerspectiveTest test; test.safe_run(); }\r
TEST(integral, accuracy) { CV_GpuNppImageIntegralTest test; test.safe_run(); }\r
-//TEST(canny, accuracy) { CV_GpuNppImageCannyTest test; test.safe_run(); }\r
TEST(cvtColor, accuracy) { CV_GpuCvtColorTest test; test.safe_run(); }\r
TEST(histograms, accuracy) { CV_GpuHistogramsTest test; test.safe_run(); }\r
TEST(cornerHearris, accuracy) { CV_GpuCornerHarrisTest test; test.safe_run(); }\r
\r
// Run test with --gtest_catch_exceptions flag to avoid runtime errors in \r
// the case when there is no GPU\r
-\r
-// TODO Add NVIDIA tests\r
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include "test_precomp.hpp"\r
+#include "cvconfig.h"\r
+\r
+class CV_NVidiaTestsCaller : public cvtest::BaseTest\r
+{\r
+public:\r
+ CV_NVidiaTestsCaller() {}\r
+ virtual ~CV_NVidiaTestsCaller() {}\r
+\r
+protected:\r
+ \r
+ void run( int )\r
+ { \r
+#if defined(HAVE_CUDA)\r
+ bool main_nvidia();\r
+\r
+ // Invoke all NVIDIA Staging tests and obtain the result\r
+ bool passed = main_nvidia();\r
+\r
+ if (passed)\r
+ ts->set_failed_test_info(cvtest::TS::OK);\r
+ else\r
+ ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);\r
+\r
+#else\r
+ ts->set_failed_test_info(cvtest::TS::SKIPPED);\r
+#endif\r
+ } \r
+};\r
+\r
+TEST(NPP_Staging, DISABLED_multitest) { CV_NVidiaTestsCaller test; test.safe_run(); }\r