Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18826
ghimport-source-id:
7ffa3bc7ef7402a6d6eb6ba5849e197019d77bf8
Stack from [ghstack](https://github.com/ezyang/ghstack):
* **#18826 [jit] run cpp tests for non-cuda builds in test_jit.py**
We did all the work of nicely separating our cpp tests that don't require
CUDA, but they aren't run from test_jit.py if CUDA is missing.
Reviewed By: ZolotukhinM
Differential Revision:
D14766287
fbshipit-source-id:
9326b3a5c90f6c20fc8cfaf1a1885a363b91f30a
#endif
#define JIT_TEST(name) test##name();
-void runJITCPPTests() {
+void runJITCPPTests(bool runCuda) {
TH_FORALL_TESTS(JIT_TEST)
- TH_FORALL_TESTS_CUDA(JIT_TEST)
+ if (runCuda) {
+ TH_FORALL_TESTS_CUDA(JIT_TEST)
+ }
// This test is special since it requires prior setup in python.
// So it's included here but not in the pure cpp gtest suite
for node in g.nodes():
self.assertTrue(g2.findNode(node.kind()) is not None)
+ @unittest.skipIf(IS_WINDOWS, "NYI: JIT tests not yet supported on windows")
+ @unittest.skipIf(IS_SANDCASTLE, "gtest runs these in sandcastle")
+ @unittest.skipIf(RUN_CUDA, "covered by test_cpp_cuda")
+ @skipIfRocm
+ def test_cpp(self):
+ from cpp.jit import tests_setup
+ tests_setup.setup()
+ torch._C._jit_run_cpp_tests(run_cuda=False)
+ tests_setup.shutdown()
+
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "cpp tests require CUDA")
@skipIfRocm
def test_cpp_cuda(self):
from cpp.jit import tests_setup
tests_setup.setup()
- torch._C._jit_run_cpp_tests()
+ torch._C._jit_run_cpp_tests(run_cuda=True)
tests_setup.shutdown()
def test_batchnorm(self):
} // anonymous namespace
#if defined(_WIN32)
-void runJITCPPTests() {
+void runJITCPPTests(bool runCuda) {
AT_ERROR("JIT tests not yet supported on Windows");
}
#else
-void runJITCPPTests();
+void runJITCPPTests(bool runCuda);
#endif
void initJITBindings(PyObject* module) {
[](std::shared_ptr<Graph> graph) { CreateAutodiffSubgraphs(graph); })
.def(
"_jit_run_cpp_tests",
- [] {
+ [](bool runCuda) {
// We have to release the GIL inside this method, because if we
// happen to initialize the autograd engine in these tests, the
// newly spawned worker threads will try to initialize their
// PyThreadState*, and they need the GIL for this.
AutoNoGIL _no_gil;
- return runJITCPPTests();
- })
+ return runJITCPPTests(runCuda);
+ },
+ py::arg("run_cuda"))
.def(
"_jit_flatten",
[](py::handle& obj) {