\r
/////////////////////////// Multi GPU Manager //////////////////////////////\r
\r
- // Provides functionality for working with many GPUs. Object of this\r
- // class must be created before any OpenCV GPU call and no call must\r
- // be done after its destruction.\r
+ // Provides functionality for working with many GPUs\r
class CV_EXPORTS MultiGpuMgr\r
{\r
public:\r
MultiGpuMgr();\r
~MultiGpuMgr();\r
\r
+ // Must be called before any other GPU calls\r
+ void init();\r
+\r
// Makes the given GPU active\r
void gpuOn(int gpu_id);\r
\r
class MultiGpuMgr::Impl {};
MultiGpuMgr::MultiGpuMgr() { throw_nogpu(); }
+void MultiGpuMgr::init() { throw_nogpu(); }
void MultiGpuMgr::gpuOn(int) { throw_nogpu(); }
void MultiGpuMgr::gpuOff() { throw_nogpu(); }
void gpuOn(int gpu_id)
{
+ if (gpu_id < 0 || gpu_id >= num_devices_)
+ CV_Error(CV_StsBadArg, "MultiGpuMgr::gpuOn: GPU ID is out of range");
cuSafeCall(cuCtxPushCurrent(contexts_[gpu_id]));
}
num_devices_ = getCudaEnabledDeviceCount();
contexts_.resize(num_devices_);
+ cuSafeCall(cuInit(0));
+
CUdevice device;
CUcontext prev_context;
for (int i = 0; i < num_devices_; ++i)
}
-MultiGpuMgr::MultiGpuMgr(): impl_(new Impl()) {}
+MultiGpuMgr::MultiGpuMgr() {}
MultiGpuMgr::~MultiGpuMgr() {}
+void MultiGpuMgr::init()
+{
+ impl_ = Ptr<Impl>(new Impl());
+}
+
+
void MultiGpuMgr::gpuOn(int gpu_id)
{
+ if (impl_.empty())
+ CV_Error(CV_StsNullPtr, "MultiGpuMgr::gpuOn: must be initialized before any calls");
impl_->gpuOn(gpu_id);
}
void MultiGpuMgr::gpuOff()
{
+ if (impl_.empty())
+ CV_Error(CV_StsNullPtr, "MultiGpuMgr::gpuOff: must be initialized before any calls");
impl_->gpuOff();
}
// Init CUDA Driver API\r
safeCall(cuInit(0));\r
\r
- // Create context for the first GPU\r
+ // Create context for GPU #0\r
CUdevice device;\r
safeCall(cuDeviceGet(&device, 0));\r
safeCall(cuCtxCreate(&contexts[0], 0, device));\r
contextOff();\r
\r
- // Create context for the second GPU\r
+ // Create context for GPU #1\r
safeCall(cuDeviceGet(&device, 1));\r
safeCall(cuCtxCreate(&contexts[1], 0, device));\r
contextOff();\r
\r
- // Split source images for processing on the first GPU\r
+ // Split source images for processing on GPU #0\r
contextOn(0);\r
d_left[0].upload(left.rowRange(0, left.rows / 2));\r
d_right[0].upload(right.rowRange(0, right.rows / 2));\r
bm[0] = new StereoBM_GPU();\r
contextOff();\r
\r
- // Split source images for processing on the second GPU\r
+ // Split source images for processing on the GPU #1\r
contextOn(1);\r
d_left[1].upload(left.rowRange(left.rows / 2, left.rows));\r
d_right[1].upload(right.rowRange(right.rows / 2, right.rows));\r
--- /dev/null
+/* This sample demonstrates the way you can perform independed tasks \r
+ on the different GPUs */\r
+\r
+// Disable some warnings which are caused with CUDA headers\r
+#if defined(_MSC_VER)\r
+#pragma warning(disable: 4201 4408 4100)\r
+#endif\r
+\r
+#include <iostream>\r
+#include <cvconfig.h>\r
+#include <opencv2/core/core.hpp>\r
+#include <opencv2/gpu/gpu.hpp>\r
+\r
+#if !defined(HAVE_CUDA) || !defined(HAVE_TBB)\r
+\r
+int main()\r
+{\r
+#if !defined(HAVE_CUDA)\r
+ cout << "CUDA support is required (CMake key 'WITH_CUDA' must be true).\n";\r
+#endif\r
+\r
+#if !defined(HAVE_TBB)\r
+ cout << "TBB support is required (CMake key 'WITH_TBB' must be true).\n";\r
+#endif\r
+\r
+ return 0;\r
+}\r
+\r
+#else\r
+\r
+#include "opencv2/core/internal.hpp" // For TBB wrappers\r
+\r
+using namespace std;\r
+using namespace cv;\r
+using namespace cv::gpu;\r
+\r
+struct Worker { void operator()(int device_id) const; };\r
+\r
+MultiGpuMgr multi_gpu_mgr;\r
+\r
+int main()\r
+{\r
+ int num_devices = getCudaEnabledDeviceCount();\r
+ if (num_devices < 2)\r
+ {\r
+ cout << "Two or more GPUs are required\n";\r
+ return -1;\r
+ }\r
+ for (int i = 0; i < num_devices; ++i)\r
+ {\r
+ DeviceInfo dev_info(i);\r
+ if (!dev_info.isCompatible())\r
+ {\r
+ cout << "GPU module isn't built for GPU #" << i << " ("\r
+ << dev_info.name() << ", CC " << dev_info.majorVersion()\r
+ << dev_info.minorVersion() << "\n";\r
+ return -1;\r
+ }\r
+ }\r
+\r
+ multi_gpu_mgr.init();\r
+\r
+ // Execute calculation in two threads using two GPUs\r
+ int devices[] = {0, 2};\r
+ parallel_do(devices, devices + 2, Worker());\r
+\r
+ return 0;\r
+}\r
+\r
+\r
+void Worker::operator()(int device_id) const\r
+{\r
+ multi_gpu_mgr.gpuOn(device_id);\r
+\r
+ Mat src(1000, 1000, CV_32F);\r
+ Mat dst;\r
+\r
+ RNG rng(0);\r
+ rng.fill(src, RNG::UNIFORM, 0, 1);\r
+\r
+ // CPU works\r
+ transpose(src, dst);\r
+\r
+ // GPU works\r
+ GpuMat d_src(src);\r
+ GpuMat d_dst;\r
+ transpose(d_src, d_dst);\r
+\r
+ // Check results\r
+ bool passed = norm(dst - Mat(d_dst), NORM_INF) < 1e-3;\r
+ cout << "GPU #" << device_id << " (" << DeviceInfo().name() << "): "\r
+ << (passed ? "passed" : "FAILED") << endl;\r
+\r
+ // Deallocate data here, otherwise deallocation will be performed\r
+ // after context is extracted from the stack\r
+ d_src.release();\r
+ d_dst.release();\r
+\r
+ multi_gpu_mgr.gpuOff();\r
+}\r
+\r
+#endif\r
--- /dev/null
+/* This sample demonstrates working on one piece of data using two GPUs.\r
+ It splits input into two parts and processes them separately on different\r
+ GPUs. */\r
+\r
+// Disable some warnings which are caused with CUDA headers\r
+#if defined(_MSC_VER)\r
+#pragma warning(disable: 4201 4408 4100)\r
+#endif\r
+\r
+#include <iostream>\r
+#include <cvconfig.h>\r
+#include <opencv2/core/core.hpp>\r
+#include <opencv2/highgui/highgui.hpp>\r
+#include <opencv2/gpu/gpu.hpp>\r
+\r
+#if !defined(HAVE_CUDA) || !defined(HAVE_TBB)\r
+\r
+int main()\r
+{\r
+#if !defined(HAVE_CUDA)\r
+ cout << "CUDA support is required (CMake key 'WITH_CUDA' must be true).\n";\r
+#endif\r
+\r
+#if !defined(HAVE_TBB)\r
+ cout << "TBB support is required (CMake key 'WITH_TBB' must be true).\n";\r
+#endif\r
+\r
+ return 0;\r
+}\r
+\r
+#else\r
+\r
+#include "opencv2/core/internal.hpp" // For TBB wrappers\r
+\r
+using namespace std;\r
+using namespace cv;\r
+using namespace cv::gpu;\r
+\r
+struct Worker { void operator()(int device_id) const; };\r
+\r
+MultiGpuMgr multi_gpu_mgr;\r
+\r
+// GPUs data\r
+GpuMat d_left[2];\r
+GpuMat d_right[2];\r
+StereoBM_GPU* bm[2];\r
+GpuMat d_result[2];\r
+\r
+// CPU result\r
+Mat result;\r
+\r
+int main(int argc, char** argv)\r
+{\r
+ if (argc < 3)\r
+ {\r
+ cout << "Usage: stereo_multi_gpu <left_image> <right_image>\n";\r
+ return -1;\r
+ }\r
+\r
+ int num_devices = getCudaEnabledDeviceCount();\r
+ if (num_devices < 2)\r
+ {\r
+ cout << "Two or more GPUs are required\n";\r
+ return -1;\r
+ }\r
+ for (int i = 0; i < num_devices; ++i)\r
+ {\r
+ DeviceInfo dev_info(i);\r
+ if (!dev_info.isCompatible())\r
+ {\r
+ cout << "GPU module isn't built for GPU #" << i << " ("\r
+ << dev_info.name() << ", CC " << dev_info.majorVersion()\r
+ << dev_info.minorVersion() << "\n";\r
+ return -1;\r
+ }\r
+ }\r
+\r
+ // Load input data\r
+ Mat left = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);\r
+ Mat right = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);\r
+ if (left.empty())\r
+ {\r
+ cout << "Cannot open '" << argv[1] << "'\n";\r
+ return -1;\r
+ }\r
+ if (right.empty())\r
+ {\r
+ cout << "Cannot open '" << argv[2] << "'\n";\r
+ return -1;\r
+ }\r
+\r
+ multi_gpu_mgr.init();\r
+\r
+ // Split source images for processing on the GPU #0\r
+ multi_gpu_mgr.gpuOn(0);\r
+ d_left[0].upload(left.rowRange(0, left.rows / 2));\r
+ d_right[0].upload(right.rowRange(0, right.rows / 2));\r
+ bm[0] = new StereoBM_GPU();\r
+ multi_gpu_mgr.gpuOff();\r
+\r
+ // Split source images for processing on the GPU #1\r
+ multi_gpu_mgr.gpuOn(1);\r
+ d_left[1].upload(left.rowRange(left.rows / 2, left.rows));\r
+ d_right[1].upload(right.rowRange(right.rows / 2, right.rows));\r
+ bm[1] = new StereoBM_GPU();\r
+ multi_gpu_mgr.gpuOff();\r
+\r
+ // Execute calculation in two threads using two GPUs\r
+ int devices[] = {0, 1};\r
+ parallel_do(devices, devices + 2, Worker());\r
+\r
+ // Release the first GPU resources\r
+ multi_gpu_mgr.gpuOn(0);\r
+ imshow("GPU #0 result", Mat(d_result[0]));\r
+ d_left[0].release();\r
+ d_right[0].release();\r
+ d_result[0].release();\r
+ delete bm[0];\r
+ multi_gpu_mgr.gpuOff();\r
+\r
+ // Release the second GPU resources\r
+ multi_gpu_mgr.gpuOn(1);\r
+ imshow("GPU #1 result", Mat(d_result[1]));\r
+ d_left[1].release();\r
+ d_right[1].release();\r
+ d_result[1].release();\r
+ delete bm[1];\r
+ multi_gpu_mgr.gpuOff();\r
+\r
+ waitKey();\r
+ return 0;\r
+}\r
+\r
+\r
+void Worker::operator()(int device_id) const\r
+{\r
+ multi_gpu_mgr.gpuOn(device_id);\r
+\r
+ bm[device_id]->operator()(d_left[device_id], d_right[device_id],\r
+ d_result[device_id]);\r
+\r
+ cout << "GPU #" << device_id << " (" << DeviceInfo().name()\r
+ << "): finished\n";\r
+\r
+ multi_gpu_mgr.gpuOff();\r
+}\r
+\r
+#endif\r