+/* This sample demonstrates the way you can perform independed tasks \r
+ on the different GPUs */\r
+\r
// Disable some warnings which are caused with CUDA headers\r
#pragma warning(disable: 4201 4408 4100)\r
\r
struct Worker { void operator()(int device_id) const; };\r
void destroyContexts();\r
\r
-#define cuSafeCall(code) if (code != CUDA_SUCCESS) { \\r
+#define safeCall(code) if (code != CUDA_SUCCESS) { \\r
cout << "CUDA driver API error: code " << code \\r
<< ", file " << __FILE__ << ", line " << __LINE__ << endl; \\r
destroyContexts(); \\r
exit(-1); \\r
}\r
\r
-\r
// Each GPU is associated with its own context\r
CUcontext contexts[2];\r
\r
-\r
int main()\r
{\r
- if (getCudaEnabledDeviceCount() < 2)\r
+ int num_devices = getCudaEnabledDeviceCount();\r
+\r
+ if (num_devices < 2)\r
{\r
cout << "Two or more GPUs are required\n";\r
return -1;\r
}\r
\r
- cuSafeCall(cuInit(0));\r
+ for (int i = 0; i < num_devices; ++i)\r
+ {\r
+ if (!DeviceInfo(i).isCompatible())\r
+ {\r
+ cout << "GPU module isn't built for GPU #" << i << " (" << DeviceInfo(i).name() << ")";\r
+ return -1;\r
+ }\r
+ }\r
+\r
+ safeCall(cuInit(0));\r
\r
- // Create context for the first GPU\r
+ // Create context for GPU #0\r
CUdevice device;\r
- cuSafeCall(cuDeviceGet(&device, 0));\r
- cuSafeCall(cuCtxCreate(&contexts[0], 0, device));\r
+ safeCall(cuDeviceGet(&device, 0));\r
+ safeCall(cuCtxCreate(&contexts[0], 0, device));\r
\r
CUcontext prev_context;\r
- cuSafeCall(cuCtxPopCurrent(&prev_context));\r
+ safeCall(cuCtxPopCurrent(&prev_context));\r
\r
- // Create context for the second GPU\r
- cuSafeCall(cuDeviceGet(&device, 1));\r
- cuSafeCall(cuCtxCreate(&contexts[1], 0, device));\r
+ // Create context for GPU #1\r
+ safeCall(cuDeviceGet(&device, 1));\r
+ safeCall(cuCtxCreate(&contexts[1], 0, device));\r
\r
- cuSafeCall(cuCtxPopCurrent(&prev_context));\r
+ safeCall(cuCtxPopCurrent(&prev_context));\r
\r
// Execute calculation in two threads using two GPUs\r
int devices[] = {0, 1};\r
\r
void Worker::operator()(int device_id) const\r
{\r
- // Set proper context\r
- cuSafeCall(cuCtxPushCurrent(contexts[device_id]));\r
+ // Set the proper context\r
+ safeCall(cuCtxPushCurrent(contexts[device_id]));\r
\r
Mat src(1000, 1000, CV_32F);\r
Mat dst;\r
// CPU works\r
transpose(src, dst);\r
\r
+ // GPU works\r
GpuMat d_src(src);\r
GpuMat d_dst;\r
-\r
- // GPU works\r
transpose(d_src, d_dst);\r
\r
// Check results\r
bool passed = norm(dst - Mat(d_dst), NORM_INF) < 1e-3;\r
- cout << "GPU #" << device_id << ": "<< (passed ? "passed" : "FAILED") << endl;\r
+ cout << "GPU #" << device_id << " (" << DeviceInfo().name() << "): "\r
+ << (passed ? "passed" : "FAILED") << endl;\r
\r
// Deallocate data here, otherwise deallocation will be performed\r
// after context is extracted from the stack\r
d_dst.release();\r
\r
CUcontext prev_context;\r
- cuSafeCall(cuCtxPopCurrent(&prev_context));\r
+ safeCall(cuCtxPopCurrent(&prev_context));\r
}\r
\r
\r
void destroyContexts()\r
{\r
- cuSafeCall(cuCtxDestroy(contexts[0]));\r
- cuSafeCall(cuCtxDestroy(contexts[1]));\r
+ safeCall(cuCtxDestroy(contexts[0]));\r
+ safeCall(cuCtxDestroy(contexts[1]));\r
}\r
\r
#endif
\ No newline at end of file
--- /dev/null
+/* This sample demonstrates working on one piece of data using two GPUs.\r
+ It splits input into two parts and processes them separately on different\r
+ GPUs. */\r
+\r
+// Disable some warnings which are caused with CUDA headers\r
+#pragma warning(disable: 4201 4408 4100)\r
+\r
+#include <iostream>\r
+#include <cvconfig.h>\r
+#include <opencv2/core/core.hpp>\r
+#include <opencv2/highgui/highgui.hpp>\r
+#include <opencv2/gpu/gpu.hpp>\r
+\r
+#if !defined(HAVE_CUDA) || !defined(HAVE_TBB)\r
+\r
+int main()\r
+{\r
+#if !defined(HAVE_CUDA)\r
+ cout << "CUDA support is required (CMake key 'WITH_CUDA' must be true).\n";\r
+#endif\r
+\r
+#if !defined(HAVE_TBB)\r
+ cout << "TBB support is required (CMake key 'WITH_TBB' must be true).\n";\r
+#endif\r
+\r
+ return 0;\r
+}\r
+\r
+#else\r
+\r
+#include <cuda.h>\r
+#include <cuda_runtime.h>\r
+#include "opencv2/core/internal.hpp" // For TBB wrappers\r
+\r
+using namespace std;\r
+using namespace cv;\r
+using namespace cv::gpu;\r
+\r
+struct Worker { void operator()(int device_id) const; };\r
+void destroyContexts();\r
+\r
+#define safeCall(code) if (code != CUDA_SUCCESS) { \\r
+ cout << "CUDA driver API error: code " << code \\r
+ << ", file " << __FILE__ << ", line " << __LINE__ << endl; \\r
+ destroyContexts(); \\r
+ exit(-1); \\r
+}\r
+\r
+// Each GPU is associated with its own context\r
+CUcontext contexts[2];\r
+\r
+void inline contextOn(int id) \r
+{\r
+ safeCall(cuCtxPushCurrent(contexts[id]));\r
+}\r
+\r
+void inline contextOff() \r
+{\r
+ CUcontext prev_context;\r
+ safeCall(cuCtxPopCurrent(&prev_context));\r
+}\r
+\r
+GpuMat d_left[2];\r
+GpuMat d_right[2];\r
+StereoBM_GPU* bm[2];\r
+GpuMat d_result[2];\r
+Mat result;\r
+\r
+int main(int argc, char** argv)\r
+{\r
+ if (argc < 3)\r
+ {\r
+ cout << "Usage: stereo_multi_gpu <left_image> <right_image>\n";\r
+ return -1;\r
+ }\r
+\r
+ int num_devices = getCudaEnabledDeviceCount();\r
+\r
+ if (num_devices < 2)\r
+ {\r
+ cout << "Two or more GPUs are required\n";\r
+ return -1;\r
+ }\r
+\r
+ for (int i = 0; i < num_devices; ++i)\r
+ {\r
+ if (!DeviceInfo(i).isCompatible())\r
+ {\r
+ cout << "GPU module isn't built for GPU #" << i << " (" << DeviceInfo(i).name() << ")";\r
+ return -1;\r
+ }\r
+ }\r
+\r
+ // Load input data\r
+ Mat left = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);\r
+ Mat right = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);\r
+ if (left.empty())\r
+ {\r
+ cout << "Cannot open '" << argv[1] << "'\n";\r
+ return -1;\r
+ }\r
+ if (right.empty())\r
+ {\r
+ cout << "Cannot open '" << argv[2] << "'\n";\r
+ return -1;\r
+ }\r
+\r
+ safeCall(cuInit(0));\r
+\r
+ // Create context for the first GPU\r
+ CUdevice device;\r
+ safeCall(cuDeviceGet(&device, 0));\r
+ safeCall(cuCtxCreate(&contexts[0], 0, device));\r
+ contextOff();\r
+\r
+ // Create context for the second GPU\r
+ safeCall(cuDeviceGet(&device, 1));\r
+ safeCall(cuCtxCreate(&contexts[1], 0, device));\r
+ contextOff();\r
+\r
+ // Split source images for processing on the first GPU\r
+ contextOn(0);\r
+ d_left[0].upload(left.rowRange(0, left.rows / 2));\r
+ d_right[0].upload(right.rowRange(0, right.rows / 2));\r
+ bm[0] = new StereoBM_GPU();\r
+ contextOff();\r
+\r
+ // Split source images for processing on the second GPU\r
+ contextOn(1);\r
+ d_left[1].upload(left.rowRange(left.rows / 2, left.rows));\r
+ d_right[1].upload(right.rowRange(right.rows / 2, right.rows));\r
+ bm[1] = new StereoBM_GPU();\r
+ contextOff();\r
+\r
+ // Execute calculation in two threads using two GPUs\r
+ int devices[] = {0, 1};\r
+ parallel_do(devices, devices + 2, Worker());\r
+\r
+ // Release the first GPU resources\r
+ contextOn(0);\r
+ imshow("GPU #0 result", Mat(d_result[0]));\r
+ d_left[0].release();\r
+ d_right[0].release();\r
+ d_result[0].release();\r
+ delete bm[0];\r
+ contextOff();\r
+\r
+ // Release the second GPU resources\r
+ contextOn(1);\r
+ imshow("GPU #1 result", Mat(d_result[1]));\r
+ d_left[1].release();\r
+ d_right[1].release();\r
+ d_result[1].release();\r
+ delete bm[1];\r
+ contextOff();\r
+\r
+ waitKey();\r
+ destroyContexts();\r
+ return 0;\r
+}\r
+\r
+\r
+void Worker::operator()(int device_id) const\r
+{\r
+ contextOn(device_id);\r
+\r
+ bm[device_id]->operator()(d_left[device_id], d_right[device_id],\r
+ d_result[device_id]);\r
+\r
+ cout << "GPU #" << device_id << " (" << DeviceInfo().name()\r
+ << "): finished\n";\r
+\r
+ contextOff();\r
+}\r
+\r
+\r
+void destroyContexts()\r
+{\r
+ safeCall(cuCtxDestroy(contexts[0]));\r
+ safeCall(cuCtxDestroy(contexts[1]));\r
+}\r
+\r
+#endif\r