From 7bad9ac78ae430b60745543cffa6002afb85e415 Mon Sep 17 00:00:00 2001 From: Victor Quach Date: Tue, 17 Aug 2021 08:55:25 -0700 Subject: [PATCH] Fix flaky test for dp saved tensor hooks (#63324) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/63324 Fix for https://www.internalfb.com/tasks/?t=98258963 `catch_warnings` seem to only trigger once in certain cases where it should trigger twice. This test is only meant to test whether hooks are trigger / not trigger, so changing it to self.assertGreater is ok. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D30340833 Pulled By: Varal7 fbshipit-source-id: 1bfb9437befe9e8ab8f95efe5f513337fa9bdc5c --- test/test_autograd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_autograd.py b/test/test_autograd.py index 2b7db29..7200bd5 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -9373,7 +9373,7 @@ class TestMultithreadAutograd(TestCase): else: # DataParallel only uses one thread # so hooks should be called here - _self.assertEqual(len(w), 2) + _self.assertGreater(len(w), 0) x = torch.ones(5, 5, requires_grad=True) model = torch.nn.DataParallel(Model()) @@ -9383,7 +9383,7 @@ class TestMultithreadAutograd(TestCase): with warnings.catch_warnings(record=True) as w: y = x * x # hooks should be called here - _self.assertEqual(len(w), 2) + _self.assertGreater(len(w), 0) def test_python_thread_in_middle(self): # User might write a network that starts on one CPU thread, then runs its second half -- 2.7.4