From: Brennan Vincent Date: Wed, 12 Dec 2018 17:58:54 +0000 (-0800) Subject: fix infinite loop when get_max_threads is nonzero but num_threads is 1 X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~2296 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=9d2955c39c1266e5071119b52d28c458393228a6;p=platform%2Fupstream%2Fpytorch.git fix infinite loop when get_max_threads is nonzero but num_threads is 1 Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/15114 Differential Revision: D13431891 Pulled By: umanwizard fbshipit-source-id: f968b8e50cf776c346d4a28d72b12e7856c95839 --- diff --git a/aten/src/ATen/native/TensorIteratorReduce.cpp b/aten/src/ATen/native/TensorIteratorReduce.cpp index 41c0aa0..b5898c1 100644 --- a/aten/src/ATen/native/TensorIteratorReduce.cpp +++ b/aten/src/ATen/native/TensorIteratorReduce.cpp @@ -125,7 +125,7 @@ void TensorIterator::foreach_reduced_elt(const loop_subiter_t &loop) { if (tensor(0).numel() == 1) { loop(*this); } - else if (numel() < at::internal::GRAIN_SIZE || at::get_max_threads() == 1 || at::in_parallel_region()) { + else if (numel() < at::internal::GRAIN_SIZE || at::get_num_threads() <= 1 || at::in_parallel_region()) { auto reduce_dims = num_reduce_dims(); auto non_reduced_shape = shape.slice(reduce_dims, shape.size() - reduce_dims);