updated gpu accuracy tests
authorVladislav Vinogradov <vlad.vinogradov@itseez.com>
Thu, 16 Aug 2012 12:23:27 +0000 (16:23 +0400)
committerVladislav Vinogradov <vlad.vinogradov@itseez.com>
Thu, 16 Aug 2012 12:23:27 +0000 (16:23 +0400)
added posibility to specify device on which tests will be executed

modules/gpu/test/main.cpp
modules/gpu/test/precomp.hpp
modules/gpu/test/utility.cpp
modules/gpu/test/utility.hpp

index 6a8c67d..6df7db0 100644 (file)
@@ -49,35 +49,39 @@ using namespace cv::gpu;
 using namespace cvtest;\r
 using namespace testing;\r
 \r
-void print_info()\r
+void printInfo()\r
 {\r
-    printf("\n");\r
 #if defined _WIN32\r
 #   if defined _WIN64\r
-        puts("OS: Windows 64");\r
+        puts("OS: Windows x64");\r
 #   else\r
-        puts("OS: Windows 32");\r
+        puts("OS: Windows x32");\r
 #   endif\r
 #elif defined linux\r
 #   if defined _LP64\r
-        puts("OS: Linux 64");\r
+        puts("OS: Linux x64");\r
 #   else\r
-        puts("OS: Linux 32");\r
+        puts("OS: Linux x32");\r
 #   endif\r
 #elif defined __APPLE__\r
 #   if defined _LP64\r
-        puts("OS: Apple 64");\r
+        puts("OS: Apple x64");\r
 #   else\r
-        puts("OS: Apple 32");\r
+        puts("OS: Apple x32");\r
 #   endif\r
 #endif\r
 \r
-    int deviceCount = getCudaEnabledDeviceCount();\r
     int driver;\r
     cudaDriverGetVersion(&driver);\r
 \r
     printf("CUDA Driver  version: %d\n", driver);\r
     printf("CUDA Runtime version: %d\n", CUDART_VERSION);\r
+\r
+    puts("GPU module was compiled for the following GPU archs:");\r
+    printf("    BIN: %s\n", CUDA_ARCH_BIN);\r
+    printf("    PTX: %s\n\n", CUDA_ARCH_PTX);\r
+\r
+    int deviceCount = getCudaEnabledDeviceCount();\r
     printf("CUDA device count: %d\n\n", deviceCount);\r
 \r
     for (int i = 0; i < deviceCount; ++i)\r
@@ -87,17 +91,13 @@ void print_info()
         printf("Device %d:\n", i);\r
         printf("    Name: %s\n", info.name().c_str());\r
         printf("    Compute capability version: %d.%d\n", info.majorVersion(), info.minorVersion());\r
+        printf("    Multi Processor Count: %d\n", info.multiProcessorCount());\r
         printf("    Total memory: %d Mb\n", static_cast<int>(static_cast<int>(info.totalMemory() / 1024.0) / 1024.0));\r
         printf("    Free  memory: %d Mb\n", static_cast<int>(static_cast<int>(info.freeMemory() / 1024.0) / 1024.0));\r
-        if (info.isCompatible())\r
-            puts("    This device is compatible with current GPU module build\n");\r
-        else\r
-            puts("    This device is NOT compatible with current GPU module build\n");\r
+        if (!info.isCompatible())\r
+            puts("    !!! This device is NOT compatible with current GPU module build\n");\r
+        printf("\n");\r
     }\r
-\r
-    puts("GPU module was compiled for the following GPU archs:");\r
-    printf("    BIN: %s\n", CUDA_ARCH_BIN);\r
-    printf("    PTX: %s\n\n", CUDA_ARCH_PTX);\r
 }\r
 \r
 enum OutputLevel\r
@@ -111,25 +111,56 @@ extern OutputLevel nvidiaTestOutputLevel;
 \r
 int main(int argc, char** argv)\r
 {\r
-    TS::ptr()->init("gpu");\r
-    InitGoogleTest(&argc, argv);\r
-\r
-    const char* keys ="{ nvtest_output_level | nvtest_output_level | compact | NVidia test verbosity level }";\r
+    try\r
+    {\r
+        CommandLineParser parser(argc, (const char**)argv,\r
+                                 "{ print_info_only | print_info_only | false | Print information about system and exit }"\r
+                                 "{ device | device | -1 | Device on which tests will be executed (-1 means all devices) }"\r
+                                 "{ nvtest_output_level | nvtest_output_level | compact | NVidia test verbosity level }");\r
+\r
+        printInfo();\r
+\r
+        if (parser.get<bool>("print_info_only"))\r
+            return 0;\r
+\r
+        int device = parser.get<int>("device");\r
+        if (device < 0)\r
+        {\r
+            DeviceManager::instance().loadAll();\r
+            std::cout << "Run tests on all supported devices\n" << std::endl;\r
+        }\r
+        else\r
+        {\r
+            DeviceManager::instance().load(device);\r
+            std::cout << "Run tests on device " << device << '\n' << std::endl;\r
+        }\r
 \r
-    CommandLineParser parser(argc, (const char**)argv, keys);\r
+        string outputLevel = parser.get<string>("nvtest_output_level");\r
 \r
-    string outputLevel = parser.get<string>("nvtest_output_level", "none");\r
+        if (outputLevel == "none")\r
+            nvidiaTestOutputLevel = OutputLevelNone;\r
+        else if (outputLevel == "compact")\r
+            nvidiaTestOutputLevel = OutputLevelCompact;\r
+        else if (outputLevel == "full")\r
+            nvidiaTestOutputLevel = OutputLevelFull;\r
 \r
-    if (outputLevel == "none")\r
-        nvidiaTestOutputLevel = OutputLevelNone;\r
-    else if (outputLevel == "compact")\r
-        nvidiaTestOutputLevel = OutputLevelCompact;\r
-    else if (outputLevel == "full")\r
-        nvidiaTestOutputLevel = OutputLevelFull;\r
+        TS::ptr()->init("gpu");\r
+        InitGoogleTest(&argc, argv);\r
 \r
-    print_info();\r
+        return RUN_ALL_TESTS();\r
+    }\r
+    catch (const exception& e)\r
+    {\r
+        cerr << e.what() << endl;\r
+        return -1;\r
+    }\r
+    catch (...)\r
+    {\r
+        cerr << "Unknown error" << endl;\r
+        return -1;\r
+    }\r
 \r
-    return RUN_ALL_TESTS();\r
+    return 0;\r
 }\r
 \r
 #else // HAVE_CUDA\r
index afc3be8..753367c 100644 (file)
@@ -56,6 +56,7 @@
 #include <limits>\r
 #include <algorithm>\r
 #include <iterator>\r
+#include <stdexcept>\r
 \r
 #include "cvconfig.h"\r
 #include "opencv2/core/core.hpp"\r
index bc73d30..148c9d2 100644 (file)
@@ -46,6 +46,7 @@ using namespace cv;
 using namespace cv::gpu;\r
 using namespace cvtest;\r
 using namespace testing;\r
+using namespace testing::internal;\r
 \r
 //////////////////////////////////////////////////////////////////////\r
 // random generators\r
@@ -108,12 +109,12 @@ GpuMat loadMat(const Mat& m, bool useRoi)
 //////////////////////////////////////////////////////////////////////\r
 // Image load\r
 \r
-Mat readImage(const string& fileName, int flags)\r
+Mat readImage(const std::string& fileName, int flags)\r
 {\r
-    return imread(string(cvtest::TS::ptr()->get_data_path()) + fileName, flags);\r
+    return imread(TS::ptr()->get_data_path() + fileName, flags);\r
 }\r
 \r
-Mat readImageType(const string& fname, int type)\r
+Mat readImageType(const std::string& fname, int type)\r
 {\r
     Mat src = readImage(fname, CV_MAT_CN(type) == 1 ? IMREAD_GRAYSCALE : IMREAD_COLOR);\r
     if (CV_MAT_CN(type) == 4)\r
@@ -134,50 +135,150 @@ bool supportFeature(const DeviceInfo& info, FeatureSet feature)
     return TargetArchs::builtWith(feature) && info.supports(feature);\r
 }\r
 \r
-const vector<DeviceInfo>& devices()\r
+DeviceManager& DeviceManager::instance()\r
 {\r
-    static vector<DeviceInfo> devs;\r
-    static bool first = true;\r
+    static DeviceManager obj;\r
+    return obj;\r
+}\r
 \r
-    if (first)\r
-    {\r
-        int deviceCount = getCudaEnabledDeviceCount();\r
+void DeviceManager::load(int i)\r
+{\r
+    devices_.clear();\r
+    devices_.reserve(1);\r
 \r
-        devs.reserve(deviceCount);\r
+    ostringstream msg;\r
 \r
-        for (int i = 0; i < deviceCount; ++i)\r
-        {\r
-            DeviceInfo info(i);\r
-            if (info.isCompatible())\r
-                devs.push_back(info);\r
-        }\r
+    if (i < 0 || i >= getCudaEnabledDeviceCount())\r
+    {\r
+        msg << "Incorrect device number - " << i;\r
+        throw runtime_error(msg.str());\r
+    }\r
+\r
+    DeviceInfo info(i);\r
 \r
-        first = false;\r
+    if (!info.isCompatible())\r
+    {\r
+        msg << "Device " << i << " [" << info.name() << "] is NOT compatible with current GPU module build";\r
+        throw runtime_error(msg.str());\r
     }\r
 \r
-    return devs;\r
+    devices_.push_back(info);\r
 }\r
 \r
-vector<DeviceInfo> devices(FeatureSet feature)\r
+void DeviceManager::loadAll()\r
 {\r
-    const vector<DeviceInfo>& d = devices();\r
+    int deviceCount = getCudaEnabledDeviceCount();\r
 \r
-    vector<DeviceInfo> devs_filtered;\r
+    devices_.clear();\r
+    devices_.reserve(deviceCount);\r
 \r
-    if (TargetArchs::builtWith(feature))\r
+    for (int i = 0; i < deviceCount; ++i)\r
     {\r
-        devs_filtered.reserve(d.size());\r
-\r
-        for (size_t i = 0, size = d.size(); i < size; ++i)\r
+        DeviceInfo info(i);\r
+        if (info.isCompatible())\r
         {\r
-            const DeviceInfo& info = d[i];\r
-\r
-            if (info.supports(feature))\r
-                devs_filtered.push_back(info);\r
+            devices_.push_back(info);\r
         }\r
     }\r
+}\r
 \r
-    return devs_filtered;\r
+class DevicesGenerator : public ParamGeneratorInterface<DeviceInfo>\r
+{\r
+public:\r
+    ~DevicesGenerator();\r
+\r
+    ParamIteratorInterface<DeviceInfo>* Begin() const;\r
+    ParamIteratorInterface<DeviceInfo>* End() const;\r
+\r
+private:\r
+    class Iterator : public ParamIteratorInterface<DeviceInfo>\r
+    {\r
+    public:\r
+        Iterator(const ParamGeneratorInterface<DeviceInfo>* base, vector<DeviceInfo>::const_iterator iterator);\r
+\r
+        virtual ~Iterator();\r
+\r
+        virtual const ParamGeneratorInterface<DeviceInfo>* BaseGenerator() const;\r
+\r
+        virtual void Advance();\r
+\r
+        virtual ParamIteratorInterface<DeviceInfo>* Clone() const;\r
+\r
+        virtual const DeviceInfo* Current() const;\r
+\r
+        virtual bool Equals(const ParamIteratorInterface<DeviceInfo>& other) const;\r
+\r
+    private:\r
+        Iterator(const Iterator& other);\r
+\r
+        const ParamGeneratorInterface<DeviceInfo>* const base_;\r
+        vector<DeviceInfo>::const_iterator iterator_;\r
+\r
+        mutable DeviceInfo value_;\r
+    };\r
+};\r
+\r
+DevicesGenerator::~DevicesGenerator()\r
+{\r
+}\r
+\r
+ParamIteratorInterface<DeviceInfo>* DevicesGenerator::Begin() const\r
+{\r
+    return new Iterator(this, DeviceManager::instance().values().begin());\r
+}\r
+\r
+ParamIteratorInterface<DeviceInfo>* DevicesGenerator::End() const\r
+{\r
+    return new Iterator(this, DeviceManager::instance().values().end());\r
+}\r
+\r
+DevicesGenerator::Iterator::Iterator(const ParamGeneratorInterface<DeviceInfo>* base, vector<DeviceInfo>::const_iterator iterator)\r
+    : base_(base), iterator_(iterator)\r
+{\r
+}\r
+\r
+DevicesGenerator::Iterator::~Iterator()\r
+{\r
+}\r
+\r
+const ParamGeneratorInterface<DeviceInfo>* DevicesGenerator::Iterator::BaseGenerator() const\r
+{\r
+    return base_;\r
+}\r
+\r
+void DevicesGenerator::Iterator::Advance()\r
+{\r
+    ++iterator_;\r
+}\r
+\r
+ParamIteratorInterface<DeviceInfo>* DevicesGenerator::Iterator::Clone() const\r
+{\r
+    return new Iterator(*this);\r
+}\r
+\r
+const DeviceInfo* DevicesGenerator::Iterator::Current() const\r
+{\r
+    value_ = *iterator_;\r
+    return &value_;\r
+}\r
+\r
+bool DevicesGenerator::Iterator::Equals(const ParamIteratorInterface<DeviceInfo>& other) const\r
+{\r
+    GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())\r
+        << "The program attempted to compare iterators "\r
+        << "from different generators." << endl;\r
+\r
+    return iterator_ == CheckedDowncastToActualType<const Iterator>(&other)->iterator_;\r
+}\r
+\r
+DevicesGenerator::Iterator::Iterator(const Iterator& other) :\r
+    ParamIteratorInterface<DeviceInfo>(), base_(other.base_), iterator_(other.iterator_)\r
+{\r
+}\r
+\r
+ParamGenerator<DeviceInfo> DevicesGenerator_()\r
+{\r
+  return ParamGenerator<DeviceInfo>(new DevicesGenerator);\r
 }\r
 \r
 //////////////////////////////////////////////////////////////////////\r
@@ -250,7 +351,7 @@ void minMaxLocGold(const Mat& src, double* minVal_, double* maxVal_, Point* minL
 \r
 namespace\r
 {\r
-    template <typename T, typename OutT> string printMatValImpl(const Mat& m, Point p)\r
+    template <typename T, typename OutT> std::string printMatValImpl(const Mat& m, Point p)\r
     {\r
         const int cn = m.channels();\r
 \r
@@ -269,9 +370,9 @@ namespace
         return ostr.str();\r
     }\r
 \r
-    string printMatVal(const Mat& m, Point p)\r
+    std::string printMatVal(const Mat& m, Point p)\r
     {\r
-        typedef string (*func_t)(const Mat& m, Point p);\r
+        typedef std::string (*func_t)(const Mat& m, Point p);\r
 \r
         static const func_t funcs[] =\r
         {\r
index 3ad02de..b36f177 100644 (file)
@@ -80,14 +80,23 @@ cv::Mat readImageType(const std::string& fname, int type);
 //! return true if device supports specified feature and gpu module was built with support the feature.\r
 bool supportFeature(const cv::gpu::DeviceInfo& info, cv::gpu::FeatureSet feature);\r
 \r
-//! return all devices compatible with current gpu module build.\r
-const std::vector<cv::gpu::DeviceInfo>& devices();\r
+class DeviceManager\r
+{\r
+public:\r
+    static DeviceManager& instance();\r
+\r
+    void load(int i);\r
+    void loadAll();\r
+\r
+    const std::vector<cv::gpu::DeviceInfo>& values() const { return devices_; }\r
+\r
+private:\r
+    std::vector<cv::gpu::DeviceInfo> devices_;\r
+};\r
 \r
-//! return all devices compatible with current gpu module build which support specified feature.\r
-std::vector<cv::gpu::DeviceInfo> devices(cv::gpu::FeatureSet feature);\r
+testing::internal::ParamGenerator<cv::gpu::DeviceInfo> DevicesGenerator_();\r
 \r
-#define ALL_DEVICES testing::ValuesIn(devices())\r
-#define DEVICES(feature) testing::ValuesIn(devices(feature))\r
+#define ALL_DEVICES DevicesGenerator_()\r
 \r
 //////////////////////////////////////////////////////////////////////\r
 // Additional assertion\r