Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18362
ghimport-source-id:
374b7ab97e2d6a894368007133201f510539296f
Stack from [ghstack](https://github.com/ezyang/ghstack):
* #18242 Test running a CUDA build on CPU machine.
* **#18362 Add ability to query if built with CUDA and MKL-DNN.**
Fixes #18108.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Differential Revision:
D14584430
fbshipit-source-id:
7605a1ac4e8f2a7c70d52e5a43ad7f03f0457473
#endif
}
+bool Context::hasMKLDNN() const {
+#if AT_MKLDNN_ENABLED()
+ return true;
+#else
+ return false;
+#endif
+}
+
bool Context::hasOpenMP() const {
#ifdef _OPENMP
return true;
bool hasOpenMP() const;
bool hasMKL() const;
bool hasLAPACK() const;
+ bool hasMKLDNN() const;
bool hasMAGMA() const {
return detail::getCUDAHooks().hasMAGMA();
}
return globalContext().hasMAGMA();
}
+static inline bool hasMKLDNN() {
+ return globalContext().hasMKLDNN();
+}
+
static inline void manual_seed(uint64_t seed) {
globalContext().defaultGenerator(DeviceType::CPU).manualSeed(seed);
// NB: Sometimes we build with CUDA, but we don't have any GPUs
import shutil
import torch
import torch.cuda
+import torch.backends.cuda
import tempfile
import unittest
import warnings
self.assertTrue(grid_b2.equal(expected_grid_b))
self.assertTrue(grid_c2.equal(expected_grid_c))
- @unittest.skipIf(torch.cuda.is_available() or IS_SANDCASTLE, "CUDA is available, can't test CUDA not built error")
+ # NB: we must not be built with CUDA; if we are built with CUDA but no CUDA
+ # is available, we get a different error.
+ @unittest.skipIf(torch.backends.cuda.is_built() or IS_SANDCASTLE, "CUDA is built, can't test CUDA not built error")
def test_cuda_not_built(self):
msg = "Torch not compiled with CUDA enabled"
self.assertRaisesRegex(AssertionError, msg, lambda: torch.cuda.current_device())
import torch
+def is_built():
+ r"""Returns whether PyTorch is built with CUDA support. Note that this
+ doesn't necessarily mean CUDA is available; just that if this PyTorch
+ binary were run a machine with working CUDA drivers and devices, we
+ would be able to use it."""
+ return torch._C.has_cuda
+
+
class ContextProp(object):
def __init__(self, getter, setter):
self.getter = getter
--- /dev/null
+import torch
+
+
+def is_available():
+ r"""Returns whether PyTorch is built with MKL-DNN support."""
+ return torch._C.has_mkldnn
ASSERT_TRUE(set_module_attr("has_mkl", at::hasMKL() ? Py_True : Py_False));
ASSERT_TRUE(set_module_attr("has_lapack", at::hasLAPACK() ? Py_True : Py_False));
+#ifdef USE_CUDA
+ PyObject *has_cuda = Py_True;
+#else
+ PyObject *has_cuda = Py_False;
+#endif
+ ASSERT_TRUE(set_module_attr("has_cuda", has_cuda));
+
+ ASSERT_TRUE(set_module_attr("has_mkldnn", at::hasMKLDNN() ? Py_True : Py_False));
+
#ifdef _GLIBCXX_USE_CXX11_ABI
ASSERT_TRUE(set_module_attr("_GLIBCXX_USE_CXX11_ABI", _GLIBCXX_USE_CXX11_ABI ? Py_True : Py_False));
#else