[nvptx-arch] Dynamically load the CUDA runtime if not found during the build
authorJoseph Huber <jhuber6@vols.utk.edu>
Mon, 16 Jan 2023 17:18:54 +0000 (11:18 -0600)
committerJoseph Huber <jhuber6@vols.utk.edu>
Mon, 16 Jan 2023 19:13:47 +0000 (13:13 -0600)
Much like the changes in D141859, this patch allows the `nvptx-arch`
tool to be built and provided with every distrubition of LLVM / Clang.
This will make it more reliable for our toolchains to depend on. The
changes here configure a version that dynamically loads CUDA if it was
not found at build time.

Reviewed By: tianshilei1992

Differential Revision: https://reviews.llvm.org/D141861

clang/tools/nvptx-arch/CMakeLists.txt
clang/tools/nvptx-arch/NVPTXArch.cpp

index 94f544a..1b6362c 100644 (file)
@@ -6,6 +6,8 @@
 # //
 # //===--------------------------------------------------------------------===//
 
+set(LLVM_LINK_COMPONENTS Support)
+add_clang_tool(nvptx-arch NVPTXArch.cpp)
 
 # TODO: This is deprecated. Since CMake 3.17 we can use FindCUDAToolkit instead.
 find_package(CUDA QUIET)
@@ -15,14 +17,8 @@ if (NOT cuda-library AND CUDA_FOUND)
   find_library(cuda-library NAMES cuda HINTS "${CUDA_LIBDIR}/stubs")
 endif()
 
-if (NOT CUDA_FOUND OR NOT cuda-library)
-  message(STATUS "Not building nvptx-arch: cuda runtime not found")
-  return()
+# If we found the CUDA library directly we just dynamically link against it.
+if (CUDA_FOUND AND cuda-library)
+  target_include_directories(nvptx-arch PRIVATE ${CUDA_INCLUDE_DIRS})
+  target_link_libraries(nvptx-arch PRIVATE ${cuda-library})
 endif()
-
-add_clang_tool(nvptx-arch NVPTXArch.cpp)
-
-set_target_properties(nvptx-arch PROPERTIES INSTALL_RPATH_USE_LINK_PATH ON)
-target_include_directories(nvptx-arch PRIVATE ${CUDA_INCLUDE_DIRS})
-
-target_link_libraries(nvptx-arch PRIVATE ${cuda-library})
index f70acf9..acde975 100644 (file)
 //
 //===----------------------------------------------------------------------===//
 
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <cstdio>
+#include <memory>
+
 #if defined(__has_include)
 #if __has_include("cuda.h")
 #include "cuda.h"
 #endif
 
 #if !CUDA_HEADER_FOUND
-int main() { return 1; }
-#else
+typedef enum cudaError_enum {
+  CUDA_SUCCESS = 0,
+  CUDA_ERROR_NO_DEVICE = 100,
+} CUresult;
 
-#include <cstdint>
-#include <cstdio>
+typedef enum CUdevice_attribute_enum {
+  CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75,
+  CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76,
+} CUdevice_attribute;
+
+typedef uint32_t CUdevice;
+
+CUresult (*cuInit)(unsigned int);
+CUresult (*cuDeviceGetCount)(int *);
+CUresult (*cuGetErrorString)(CUresult, const char **);
+CUresult (*cuDeviceGet)(CUdevice *, int);
+CUresult (*cuDeviceGetAttribute)(int *, CUdevice_attribute, CUdevice);
+
+constexpr const char *DynamicCudaPath = "libcuda.so";
+
+llvm::Error loadCUDA() {
+  std::string ErrMsg;
+  auto DynlibHandle = std::make_unique<llvm::sys::DynamicLibrary>(
+      llvm::sys::DynamicLibrary::getPermanentLibrary(DynamicCudaPath, &ErrMsg));
+  if (!DynlibHandle->isValid()) {
+    return llvm::createStringError(llvm::inconvertibleErrorCode(),
+                                   "Failed to 'dlopen' %s\n", DynamicCudaPath);
+  }
+#define DYNAMIC_INIT(SYMBOL)                                                   \
+  {                                                                            \
+    void *SymbolPtr = DynlibHandle->getAddressOfSymbol(#SYMBOL);               \
+    if (!SymbolPtr)                                                            \
+      return llvm::createStringError(llvm::inconvertibleErrorCode(),           \
+                                     "Failed to 'dlsym' " #SYMBOL);            \
+    SYMBOL = reinterpret_cast<decltype(SYMBOL)>(SymbolPtr);                    \
+  }
+  DYNAMIC_INIT(cuInit);
+  DYNAMIC_INIT(cuDeviceGetCount);
+  DYNAMIC_INIT(cuGetErrorString);
+  DYNAMIC_INIT(cuDeviceGet);
+  DYNAMIC_INIT(cuDeviceGetAttribute);
+#undef DYNAMIC_INIT
+  return llvm::Error::success();
+}
+#else
+llvm::Error loadCUDA() { return llvm::Error::success(); }
+#endif
 
 static int handleError(CUresult Err) {
   const char *ErrStr = nullptr;
@@ -38,7 +86,13 @@ static int handleError(CUresult Err) {
   return EXIT_FAILURE;
 }
 
-int main() {
+int main(int argc, char *argv[]) {
+  // Attempt to load the NVPTX driver runtime.
+  if (llvm::Error Err = loadCUDA()) {
+    logAllUnhandledErrors(std::move(Err), llvm::errs());
+    return EXIT_FAILURE;
+  }
+
   if (CUresult Err = cuInit(0)) {
     if (Err == CUDA_ERROR_NO_DEVICE)
       return EXIT_SUCCESS;
@@ -68,5 +122,3 @@ int main() {
   }
   return EXIT_SUCCESS;
 }
-
-#endif