From 3cd0a4ac153ea8404f0e960ddacf00098689e600 Mon Sep 17 00:00:00 2001 From: Erjia Guan Date: Thu, 2 Sep 2021 12:25:15 -0700 Subject: [PATCH] Fix test_ind_worker_queue by setting max_num_worker based on system resource (#63779) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/63779 Fixes #63657 Test Plan: Imported from OSS Reviewed By: gchanan Differential Revision: D30494185 Pulled By: ejguan fbshipit-source-id: d1bd24299b25d589889604aaf18ad347bdff4df4 --- test/test_dataloader.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/test/test_dataloader.py b/test/test_dataloader.py index c768246..5050fec 100644 --- a/test/test_dataloader.py +++ b/test/test_dataloader.py @@ -2320,9 +2320,24 @@ class TestIndividualWorkerQueue(TestCase): current_worker_idx = 0 def test_ind_worker_queue(self): + max_num_workers = None + if hasattr(os, 'sched_getaffinity'): + try: + max_num_workers = len(os.sched_getaffinity(0)) + except Exception: + pass + if max_num_workers is None: + cpu_count = os.cpu_count() + if cpu_count is not None: + # Use half number of CPUs + max_num_workers = cpu_count // 2 + + if max_num_workers is None: + max_num_workers = 1 + for batch_size in (8, 16, 32, 64): - for num_workers in range(1, 6): - self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers) + for num_workers in range(0, min(6, max_num_workers)): + self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers + 1) class SetAffinityDataset(IterableDataset): -- 2.7.4