From: Michael Suo Date: Thu, 4 Apr 2019 05:18:09 +0000 (-0700) Subject: run cpp tests for non-cuda builds in test_jit.py (#18826) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~429 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0a4117a36e4f6f6fa5928094000a868dc3784e77;p=platform%2Fupstream%2Fpytorch.git run cpp tests for non-cuda builds in test_jit.py (#18826) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18826 ghimport-source-id: 7ffa3bc7ef7402a6d6eb6ba5849e197019d77bf8 Stack from [ghstack](https://github.com/ezyang/ghstack): * **#18826 [jit] run cpp tests for non-cuda builds in test_jit.py** We did all the work of nicely separating our cpp tests that don't require CUDA, but they aren't run from test_jit.py if CUDA is missing. Reviewed By: ZolotukhinM Differential Revision: D14766287 fbshipit-source-id: 9326b3a5c90f6c20fc8cfaf1a1885a363b91f30a --- diff --git a/test/cpp/jit/test.cpp b/test/cpp/jit/test.cpp index 20cab91..cc5b26f 100644 --- a/test/cpp/jit/test.cpp +++ b/test/cpp/jit/test.cpp @@ -90,9 +90,11 @@ TH_FORALL_TESTS_CUDA(JIT_GTEST_CUDA) #endif #define JIT_TEST(name) test##name(); -void runJITCPPTests() { +void runJITCPPTests(bool runCuda) { TH_FORALL_TESTS(JIT_TEST) - TH_FORALL_TESTS_CUDA(JIT_TEST) + if (runCuda) { + TH_FORALL_TESTS_CUDA(JIT_TEST) + } // This test is special since it requires prior setup in python. // So it's included here but not in the pure cpp gtest suite diff --git a/test/test_jit.py b/test/test_jit.py index 5763645..98d8bd4 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -1484,13 +1484,23 @@ class TestJit(JitTestCase): for node in g.nodes(): self.assertTrue(g2.findNode(node.kind()) is not None) + @unittest.skipIf(IS_WINDOWS, "NYI: JIT tests not yet supported on windows") + @unittest.skipIf(IS_SANDCASTLE, "gtest runs these in sandcastle") + @unittest.skipIf(RUN_CUDA, "covered by test_cpp_cuda") + @skipIfRocm + def test_cpp(self): + from cpp.jit import tests_setup + tests_setup.setup() + torch._C._jit_run_cpp_tests(run_cuda=False) + tests_setup.shutdown() + @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "cpp tests require CUDA") @skipIfRocm def test_cpp_cuda(self): from cpp.jit import tests_setup tests_setup.setup() - torch._C._jit_run_cpp_tests() + torch._C._jit_run_cpp_tests(run_cuda=True) tests_setup.shutdown() def test_batchnorm(self): diff --git a/torch/csrc/jit/init.cpp b/torch/csrc/jit/init.cpp index e693a15..922873f 100644 --- a/torch/csrc/jit/init.cpp +++ b/torch/csrc/jit/init.cpp @@ -79,11 +79,11 @@ bool loadPythonClasses() { } // anonymous namespace #if defined(_WIN32) -void runJITCPPTests() { +void runJITCPPTests(bool runCuda) { AT_ERROR("JIT tests not yet supported on Windows"); } #else -void runJITCPPTests(); +void runJITCPPTests(bool runCuda); #endif void initJITBindings(PyObject* module) { @@ -182,14 +182,15 @@ void initJITBindings(PyObject* module) { [](std::shared_ptr graph) { CreateAutodiffSubgraphs(graph); }) .def( "_jit_run_cpp_tests", - [] { + [](bool runCuda) { // We have to release the GIL inside this method, because if we // happen to initialize the autograd engine in these tests, the // newly spawned worker threads will try to initialize their // PyThreadState*, and they need the GIL for this. AutoNoGIL _no_gil; - return runJITCPPTests(); - }) + return runJITCPPTests(runCuda); + }, + py::arg("run_cuda")) .def( "_jit_flatten", [](py::handle& obj) {