From: Igor Fedan Date: Tue, 2 Apr 2019 19:32:52 +0000 (-0700) Subject: Fix flake8 issues in gragrad test X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~474 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=36237c489373e907803354852d2001828339a51a;p=platform%2Fupstream%2Fpytorch.git Fix flake8 issues in gragrad test Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18727 Differential Revision: D14724887 Pulled By: ifedan fbshipit-source-id: 8c1db6460303e746e4aea0142302b8d61277c067 --- diff --git a/test/test_autograd.py b/test/test_autograd.py index 7bbb868..08b5530 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -2292,7 +2292,8 @@ class TestAutograd(TestCase): return output f_args_variable = torch.ones(S, S, requires_grad=True) - self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero', lambda: gradcheck(autograd_fn, f_args_variable, eps=1e-6, atol=PRECISION)) + self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero', + lambda: gradcheck(autograd_fn, f_args_variable, eps=1e-6, atol=PRECISION)) def test_variable_traverse(self): def get_out_and_unrefed_cycle(): diff --git a/torch/autograd/gradcheck.py b/torch/autograd/gradcheck.py index 6dd4cc7..4615464 100644 --- a/torch/autograd/gradcheck.py +++ b/torch/autograd/gradcheck.py @@ -203,8 +203,7 @@ def gradcheck(func, inputs, eps=1e-6, atol=1e-5, rtol=1e-3, raise_exception=True tupled_inputs = _as_tuple(inputs) if any(t.is_sparse for t in tupled_inputs if isinstance(t, torch.Tensor)) and not check_sparse_nnz: - return fail_test('gradcheck expects all tensor inputs ' - 'are dense when check_sparse_nnz is set to False.') + return fail_test('gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False.') # Make sure that gradients are saved for all inputs any_input_requiring_grad = False diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index 775485b..1fea9d5 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -12,7 +12,6 @@ from . import BroadcastOptions, AllreduceOptions, ReduceOptions, \ from . import ReduceOp from . import PrefixStore from . import ProcessGroupGloo -from . import Reducer _MPI_AVAILABLE = True