From 173f224570017b4b1a3a1a13d0bff280a54d9cd9 Mon Sep 17 00:00:00 2001 From: Edward Yang Date: Sat, 30 Mar 2019 08:58:10 -0700 Subject: [PATCH] Turn on F401: Unused import warning. (#18598) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18598 ghimport-source-id: c74597e5e7437e94a43c163cee0639b20d0d0c6a Stack from [ghstack](https://github.com/ezyang/ghstack): * **#18598 Turn on F401: Unused import warning.** This was requested by someone at Facebook; this lint is turned on for Facebook by default. "Sure, why not." I had to noqa a number of imports in __init__. Hypothetically we're supposed to use __all__ in this case, but I was too lazy to fix it. Left for future work. Be careful! flake8-2 and flake8-3 behave differently with respect to import resolution for # type: comments. flake8-3 will report an import unused; flake8-2 will not. For now, I just noqa'd all these sites. All the changes were done by hand. Signed-off-by: Edward Z. Yang Differential Revision: D14687478 fbshipit-source-id: 30d532381e914091aadfa0d2a5a89404819663e3 --- .circleci/cimodel/lib/visualization.py | 2 +- .flake8 | 2 +- .../pytorch/perf_test/compare_with_baseline.py | 1 - benchmarks/fastrnns/__init__.py | 4 +-- benchmarks/fastrnns/profile.py | 1 - benchmarks/fastrnns/test.py | 1 - docs/cpp/source/conf.py | 3 --- docs/source/conf.py | 2 +- setup.py | 3 +-- test/common_methods_invocations.py | 6 ++--- test/common_nn.py | 2 +- test/custom_operator/test_custom_ops.py | 1 - test/onnx/debug_embed_params.py | 2 -- test/onnx/export_onnx_tests_filter.py | 3 --- test/onnx/export_onnx_tests_generator.py | 1 - test/onnx/model_defs/__init__.py | 8 +++--- test/onnx/model_defs/squeezenet.py | 1 - test/onnx/model_defs/super_resolution.py | 1 - test/onnx/model_defs/word_language_model.py | 1 - test/onnx/test_models.py | 11 +------- test/onnx/test_operators.py | 4 +-- test/onnx/test_pytorch_common.py | 2 +- test/onnx/test_pytorch_helper.py | 1 - test/onnx/test_pytorch_onnx_caffe2.py | 1 - test/optim/test.py | 1 - test/test_autograd.py | 13 ++++----- test/test_cuda.py | 6 ++--- test/test_dataloader.py | 3 --- test/test_distributed.py | 2 -- test/test_docs_coverage.py | 2 -- test/test_indexing.py | 1 - test/test_jit.py | 15 ++--------- test/test_jit_fuser.py | 7 +---- test/test_module/future_div.py | 2 +- test/test_module/no_future_div.py | 2 +- test/test_multiprocessing.py | 1 - test/test_nn.py | 8 +++--- test/test_quantized.py | 6 +---- test/test_sparse.py | 1 - test/test_thd_distributed.py | 1 - test/test_torch.py | 6 +---- test/test_type_hints.py | 4 +-- test/test_utils.py | 9 +++---- tools/amd_build/build_amd.py | 1 - tools/autograd/gen_variable_type.py | 1 - tools/build_libtorch.py | 3 --- tools/build_pytorch_libs.py | 12 ++++----- tools/clang_format.py | 2 -- tools/cwrap/__init__.py | 2 +- tools/cwrap/plugins/ArgumentReferences.py | 1 - tools/cwrap/plugins/CuDNNPlugin.py | 2 -- tools/cwrap/plugins/GILRelease.py | 1 - tools/cwrap/plugins/OptionalArguments.py | 3 --- tools/cwrap/plugins/__init__.py | 24 ++++++++--------- tools/download_mnist.py | 1 - tools/jit/gen_jit_dispatch.py | 7 ++--- tools/nnwrap/__init__.py | 2 +- tools/nnwrap/generate_wrappers.py | 3 +-- tools/pyi/gen_pyi.py | 4 --- tools/setup_helpers/dist_check.py | 3 +-- tools/setup_helpers/miopen.py | 5 +--- tools/setup_helpers/nccl.py | 2 -- tools/setup_helpers/nvtoolext.py | 1 - tools/shared/__init__.py | 5 ++-- torch/__init__.py | 8 +++--- torch/_six.py | 4 +-- torch/_tensor_str.py | 4 +-- torch/_thnn/utils.py | 4 --- torch/autograd/__init__.py | 10 +++---- torch/autograd/_functions/__init__.py | 2 +- torch/autograd/_functions/utils.py | 1 - torch/autograd/gradcheck.py | 1 - torch/autograd/profiler.py | 5 ---- torch/cuda/__init__.py | 8 +++--- torch/cuda/comm.py | 5 ++-- torch/distributed/__init__.py | 4 +-- torch/distributed/distributed_c10d.py | 5 +++- torch/distributed/launch.py | 3 --- torch/distributions/__init__.py | 2 ++ torch/distributions/categorical.py | 2 +- torch/distributions/chi2.py | 1 - torch/distributions/fishersnedecor.py | 1 - torch/distributions/gamma.py | 2 +- torch/distributions/half_cauchy.py | 1 - torch/distributions/half_normal.py | 1 - torch/distributions/kl.py | 1 - torch/distributions/log_normal.py | 1 - torch/distributions/logistic_normal.py | 2 +- torch/distributions/pareto.py | 1 - torch/distributions/studentT.py | 1 - torch/distributions/utils.py | 1 - torch/for_onnx/__init__.py | 2 +- torch/hub.py | 6 +---- torch/jit/__init__.py | 14 ++++------ torch/jit/_pickle.py | 2 -- torch/jit/annotations.py | 31 +++++++++++++++++++++- torch/jit/frontend.py | 2 -- torch/jit/quantized.py | 6 +---- torch/multiprocessing/__init__.py | 8 +++--- torch/multiprocessing/reductions.py | 1 - torch/nn/__init__.py | 10 +++---- torch/nn/_functions/thnn/__init__.py | 6 ++--- torch/nn/_functions/thnn/auto.py | 2 +- torch/nn/_reduction.py | 1 - torch/nn/functional.py | 5 +--- torch/nn/init.py | 1 - torch/nn/modules/distance.py | 1 - torch/nn/modules/loss.py | 3 --- torch/nn/modules/normalization.py | 1 - torch/nn/modules/pooling.py | 2 -- torch/nn/modules/rnn.py | 1 - torch/nn/modules/sparse.py | 2 +- torch/nn/modules/upsampling.py | 3 --- torch/nn/parallel/__init__.py | 2 +- torch/nn/parallel/deprecated/distributed.py | 1 - torch/nn/parallel/distributed.py | 1 - torch/nn/utils/__init__.py | 12 ++++----- torch/nn/utils/spectral_norm.py | 1 - torch/onnx/__init__.py | 3 --- torch/onnx/symbolic.py | 7 +---- torch/onnx/utils.py | 3 --- torch/optim/__init__.py | 24 ++++++++--------- torch/optim/lr_scheduler.py | 1 - torch/optim/rprop.py | 1 - torch/random.py | 1 - torch/utils/bottleneck/__main__.py | 3 --- torch/utils/collect_env.py | 2 -- torch/utils/data/__init__.py | 8 +++--- torch/utils/data/_utils/__init__.py | 2 +- torch/utils/data/_utils/pin_memory.py | 2 +- torch/utils/data/_utils/signal_handling.py | 4 +-- 131 files changed, 175 insertions(+), 333 deletions(-) diff --git a/.circleci/cimodel/lib/visualization.py b/.circleci/cimodel/lib/visualization.py index c842bf5..c583fe4 100644 --- a/.circleci/cimodel/lib/visualization.py +++ b/.circleci/cimodel/lib/visualization.py @@ -22,7 +22,7 @@ def handle_missing_graphviz(f): calls to the draw() method of the returned object to do nothing. """ try: - import pygraphviz + import pygraphviz # noqa: F401 return f except ModuleNotFoundError: diff --git a/.flake8 b/.flake8 index 8105e91..68441f7 100644 --- a/.flake8 +++ b/.flake8 @@ -7,7 +7,7 @@ max-line-length = 120 # C408 ignored because we like the dict keyword argument syntax # E501 is not flexible enough, we're using B950 instead ignore = - E203,E305,E402,E501,E721,E741,F401,F403,F405,F821,F841,F999,W503,W504,C408, + E203,E305,E402,E501,E721,E741,F403,F405,F821,F841,F999,W503,W504,C408, # these ignores are from flake8-bugbear; please fix! B007,B008, # these ignores are from flake8-comprehensions; please fix! diff --git a/.jenkins/pytorch/perf_test/compare_with_baseline.py b/.jenkins/pytorch/perf_test/compare_with_baseline.py index e4f6001..95f60ed 100644 --- a/.jenkins/pytorch/perf_test/compare_with_baseline.py +++ b/.jenkins/pytorch/perf_test/compare_with_baseline.py @@ -1,7 +1,6 @@ import sys import json import math -import numpy import argparse parser = argparse.ArgumentParser() diff --git a/benchmarks/fastrnns/__init__.py b/benchmarks/fastrnns/__init__.py index f66d75a..743ceae 100644 --- a/benchmarks/fastrnns/__init__.py +++ b/benchmarks/fastrnns/__init__.py @@ -1,5 +1,5 @@ -from .cells import * -from .factory import * +from .cells import * # noqa: F401 +from .factory import * # noqa: F401 # (output, next_state) = cell(input, state) seqLength = 100 diff --git a/benchmarks/fastrnns/profile.py b/benchmarks/fastrnns/profile.py index 049287c..3ba4c03 100644 --- a/benchmarks/fastrnns/profile.py +++ b/benchmarks/fastrnns/profile.py @@ -1,5 +1,4 @@ import argparse -import os import subprocess import sys import time diff --git a/benchmarks/fastrnns/test.py b/benchmarks/fastrnns/test.py index 6c8e4e1..6cc68cc 100644 --- a/benchmarks/fastrnns/test.py +++ b/benchmarks/fastrnns/test.py @@ -2,7 +2,6 @@ import argparse import torch import torch.nn as nn -from .cells import lstm_cell from .factory import pytorch_lstm_creator, varlen_pytorch_lstm_creator from .runner import get_nn_runners diff --git a/docs/cpp/source/conf.py b/docs/cpp/source/conf.py index 5569a39..35051ed 100644 --- a/docs/cpp/source/conf.py +++ b/docs/cpp/source/conf.py @@ -20,11 +20,8 @@ import os # sys.path.insert(0, os.path.abspath('.')) -import sys import textwrap -import pytorch_sphinx_theme - # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. diff --git a/docs/source/conf.py b/docs/source/conf.py index 97d4ade..a479fc0 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -25,7 +25,7 @@ import os import torch try: - import torchvision + import torchvision # noqa: F401 except ImportError: import warnings warnings.warn('unable to load "torchvision" package') diff --git a/setup.py b/setup.py index d501c54..baa5db5 100644 --- a/setup.py +++ b/setup.py @@ -142,7 +142,7 @@ # we will search for libraries in these paths from __future__ import print_function -from setuptools import setup, Extension, distutils, Command, find_packages +from setuptools import setup, Extension, distutils, find_packages from distutils import core, dir_util from distutils.core import Distribution from distutils.errors import DistutilsArgError @@ -151,7 +151,6 @@ import setuptools.command.install import distutils.command.clean import distutils.sysconfig import filecmp -import platform import subprocess import shutil import sys diff --git a/test/common_methods_invocations.py b/test/common_methods_invocations.py index abb7856..43ef996 100644 --- a/test/common_methods_invocations.py +++ b/test/common_methods_invocations.py @@ -1,9 +1,9 @@ import torch -from torch._six import inf, nan, istuple -from functools import reduce, wraps +from torch._six import inf, istuple +from functools import reduce from operator import mul, itemgetter import collections -from torch.autograd import Variable, Function, detect_anomaly +from torch.autograd import Variable from torch.testing import make_non_contiguous from common_utils import (skipIfNoLapack, prod_single_zero, random_square_matrix_of_rank, diff --git a/test/common_nn.py b/test/common_nn.py index 08903bf..f7eeebd 100644 --- a/test/common_nn.py +++ b/test/common_nn.py @@ -13,7 +13,7 @@ import torch.nn as nn import torch.nn.functional as F from torch.nn.functional import _Reduction from common_utils import TestCase, to_gpu, freeze_rng_state, is_iterable, \ - TEST_WITH_ROCM, skipIfRocm + TEST_WITH_ROCM from common_cuda import TEST_CUDA from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors from torch.autograd import Variable diff --git a/test/custom_operator/test_custom_ops.py b/test/custom_operator/test_custom_ops.py index a7e7824..7b73592 100644 --- a/test/custom_operator/test_custom_ops.py +++ b/test/custom_operator/test_custom_ops.py @@ -1,4 +1,3 @@ -import argparse import os.path import tempfile import unittest diff --git a/test/onnx/debug_embed_params.py b/test/onnx/debug_embed_params.py index e5c3e34..1447031 100644 --- a/test/onnx/debug_embed_params.py +++ b/test/onnx/debug_embed_params.py @@ -4,12 +4,10 @@ from __future__ import print_function from __future__ import unicode_literals import sys -import itertools import torch import torch.jit from torch.autograd import Variable -import torch.autograd.function as function import onnx import caffe2.python.onnx.backend as c2 diff --git a/test/onnx/export_onnx_tests_filter.py b/test/onnx/export_onnx_tests_filter.py index 2e80c58..9ff82e8 100644 --- a/test/onnx/export_onnx_tests_filter.py +++ b/test/onnx/export_onnx_tests_filter.py @@ -5,12 +5,9 @@ from __future__ import unicode_literals import argparse import glob -import numpy as np import onnx.backend.test -import caffe2.python.onnx.backend as c2 import os import shutil -from onnx import numpy_helper from test_caffe2_common import run_generated_test import google.protobuf.text_format import test_onnx_common diff --git a/test/onnx/export_onnx_tests_generator.py b/test/onnx/export_onnx_tests_generator.py index a04dc42..79d525b 100644 --- a/test/onnx/export_onnx_tests_generator.py +++ b/test/onnx/export_onnx_tests_generator.py @@ -13,7 +13,6 @@ import shutil import torch import traceback -import test_pytorch_common import test_onnx_common from common_nn import module_tests from test_nn import new_module_tests diff --git a/test/onnx/model_defs/__init__.py b/test/onnx/model_defs/__init__.py index 8f07b0a..46a551c 100644 --- a/test/onnx/model_defs/__init__.py +++ b/test/onnx/model_defs/__init__.py @@ -1,4 +1,4 @@ -from .squeezenet import * -from .super_resolution import * -from .op_test import * -from .srresnet import * +from .squeezenet import * # noqa: F401 +from .super_resolution import * # noqa: F401 +from .op_test import * # noqa: F401 +from .srresnet import * # noqa: F401 diff --git a/test/onnx/model_defs/squeezenet.py b/test/onnx/model_defs/squeezenet.py index 2ee956b..984f724 100644 --- a/test/onnx/model_defs/squeezenet.py +++ b/test/onnx/model_defs/squeezenet.py @@ -1,4 +1,3 @@ -import math import torch import torch.nn as nn import torch.nn.init as init diff --git a/test/onnx/model_defs/super_resolution.py b/test/onnx/model_defs/super_resolution.py index 619d5f4..958d2f9 100644 --- a/test/onnx/model_defs/super_resolution.py +++ b/test/onnx/model_defs/super_resolution.py @@ -1,4 +1,3 @@ -import torch import torch.nn as nn import torch.nn.init as init diff --git a/test/onnx/model_defs/word_language_model.py b/test/onnx/model_defs/word_language_model.py index 34dc84c..ba400bc 100644 --- a/test/onnx/model_defs/word_language_model.py +++ b/test/onnx/model_defs/word_language_model.py @@ -3,7 +3,6 @@ import torch import torch.nn as nn -from torch.autograd import Variable class RNNModel(nn.Module): diff --git a/test/onnx/test_models.py b/test/onnx/test_models.py index f9bbc10..04aca5a 100644 --- a/test/onnx/test_models.py +++ b/test/onnx/test_models.py @@ -5,7 +5,6 @@ from torchvision.models.resnet import resnet50 from torchvision.models.vgg import vgg16, vgg16_bn, vgg19, vgg19_bn from model_defs.mnist import MNIST -from model_defs.word_language_model import RNNModel from model_defs.squeezenet import SqueezeNet from model_defs.super_resolution import SuperResolutionNet from model_defs.srresnet import SRResNet @@ -17,17 +16,9 @@ from test_pytorch_common import TestCase, run_tests, skipIfNoLapack import torch import torch.onnx import torch.onnx.utils -from torch.autograd import Variable, Function -from torch.nn import Module +from torch.autograd import Variable from torch.onnx import OperatorExportTypes -import onnx -import onnx.checker -import onnx.helper - -import google.protobuf.text_format - -import io import unittest import caffe2.python.onnx.backend as backend diff --git a/test/onnx/test_operators.py b/test/onnx/test_operators.py index 17f81eb..061c3b0 100644 --- a/test/onnx/test_operators.py +++ b/test/onnx/test_operators.py @@ -1,4 +1,4 @@ -from test_pytorch_common import TestCase, run_tests, skipIfNoLapack, flatten +from test_pytorch_common import TestCase, run_tests, flatten import torch import torch.onnx @@ -10,11 +10,9 @@ import itertools import io import unittest import inspect -import argparse import glob import os import shutil -import sys import common_utils as common diff --git a/test/onnx/test_pytorch_common.py b/test/onnx/test_pytorch_common.py index ce61b5c..4780187 100644 --- a/test/onnx/test_pytorch_common.py +++ b/test/onnx/test_pytorch_common.py @@ -13,7 +13,7 @@ import torch.autograd.function as function pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.insert(-1, pytorch_test_dir) -from common_utils import * +from common_utils import * # noqa: F401 torch.set_default_tensor_type('torch.FloatTensor') diff --git a/test/onnx/test_pytorch_helper.py b/test/onnx/test_pytorch_helper.py index 70687fa..b4e7856 100644 --- a/test/onnx/test_pytorch_helper.py +++ b/test/onnx/test_pytorch_helper.py @@ -1,7 +1,6 @@ # Some standard imports import numpy as np from torch import nn -from torch.autograd import Variable import torch.onnx import torch.nn.init as init from caffe2.python.model_helper import ModelHelper diff --git a/test/onnx/test_pytorch_onnx_caffe2.py b/test/onnx/test_pytorch_onnx_caffe2.py index 45d243d..92c7efb 100644 --- a/test/onnx/test_pytorch_onnx_caffe2.py +++ b/test/onnx/test_pytorch_onnx_caffe2.py @@ -3,7 +3,6 @@ from __future__ import division from __future__ import print_function from __future__ import unicode_literals -from functools import wraps import numpy as np import sys import unittest diff --git a/test/optim/test.py b/test/optim/test.py index 459bc0f..58e0377 100644 --- a/test/optim/test.py +++ b/test/optim/test.py @@ -1,7 +1,6 @@ import json import torch import torch.legacy.optim as optim -from pprint import pprint def rosenbrock(tensor): diff --git a/test/test_autograd.py b/test/test_autograd.py index aad83df..d966685 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -8,8 +8,8 @@ import warnings from copy import deepcopy from collections import OrderedDict from itertools import product -from operator import mul, itemgetter -from functools import reduce, wraps +from operator import mul +from functools import reduce from torch._six import inf, nan, istuple from torch.autograd.gradcheck import gradgradcheck, gradcheck from torch.autograd.function import once_differentiable @@ -17,14 +17,11 @@ from torch.autograd.profiler import profile from torch.utils.checkpoint import checkpoint from common_utils import (TEST_MKL, TestCase, run_tests, skipIfNoLapack, suppress_warnings, skipIfRocm, - prod_single_zero, random_square_matrix_of_rank, - random_symmetric_matrix, random_symmetric_psd_matrix, - random_symmetric_pd_matrix, make_nonzero_det, - random_fullrank_matrix_distinct_singular_value, load_tests) + load_tests) from common_cuda import TEST_CUDA from torch.autograd import Variable, Function, detect_anomaly from torch.autograd.function import InplaceFunction -from torch.testing import make_non_contiguous, randn_like +from torch.testing import randn_like from common_methods_invocations import (method_tests, create_input, unpack_variables, EXCLUDE_FUNCTIONAL, EXCLUDE_GRADCHECK, @@ -32,7 +29,7 @@ from common_methods_invocations import (method_tests, EXCLUDE_GRADGRADCHECK_BY_TEST_NAME, exclude_tensor_method, mask_not_all_zeros, - L, S) + S) # load_tests from common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings diff --git a/test/test_cuda.py b/test/test_cuda.py index 3d42369..0665421 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -1,7 +1,5 @@ import io -import math import tempfile -import re import unittest import sys from itertools import repeat @@ -19,9 +17,9 @@ from torch._six import inf, nan from test_torch import _TestTorchMixin from common_methods_invocations import tri_tests_args, tri_large_tests_args, \ - run_additional_tri_tests, _compare_trilu_indices, _compare_large_trilu_indices + _compare_trilu_indices, _compare_large_trilu_indices from common_utils import TestCase, get_gpu_type, to_gpu, freeze_rng_state, run_tests, \ - PY3, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, skipIfRocm, TEST_NUMPY, TEST_WITH_ROCM, load_tests, iter_indices + PY3, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, skipIfRocm, TEST_NUMPY, TEST_WITH_ROCM, load_tests # load_tests from common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings diff --git a/test/test_dataloader.py b/test/test_dataloader.py index 56053fa..fc5a158 100644 --- a/test/test_dataloader.py +++ b/test/test_dataloader.py @@ -3,13 +3,10 @@ import sys import errno import os import ctypes -import signal import torch import gc import time -import traceback import unittest -import subprocess import itertools import warnings from torch import multiprocessing as mp diff --git a/test/test_distributed.py b/test/test_distributed.py index 879b927..ffcdacb 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -16,11 +16,9 @@ import torch.cuda import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F -import torch.optim as optim from common_utils import TestCase, run_tests from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT -import common_utils as common BACKEND = os.environ["BACKEND"] TEMP_DIR = os.environ["TEMP_DIR"] diff --git a/test/test_docs_coverage.py b/test/test_docs_coverage.py index 3b565c3..8fff3f5 100644 --- a/test/test_docs_coverage.py +++ b/test/test_docs_coverage.py @@ -2,8 +2,6 @@ import torch import unittest import os import re -import ast -import _ast import textwrap diff --git a/test/test_indexing.py b/test/test_indexing.py index adfa778..fcc65bc 100644 --- a/test/test_indexing.py +++ b/test/test_indexing.py @@ -1,6 +1,5 @@ from common_utils import TestCase, run_tests import torch -import warnings from torch import tensor import unittest diff --git a/test/test_jit.py b/test/test_jit.py index 3f33683..379594a 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -12,9 +12,6 @@ from contextlib import contextmanager from itertools import product, chain import torch.jit.frontend from torch.autograd import Variable, Function -from torch.nn import Module -from torch.autograd.function import traceable -from torch.testing import assert_allclose from torch.onnx import OperatorExportTypes from torch._six import inf, PY2, builtins, StringIO from common_utils import TestCase, run_tests, IS_WINDOWS, TEST_WITH_UBSAN, \ @@ -25,7 +22,6 @@ from textwrap import dedent from functools import wraps import os import io -import itertools import sys import unittest import inspect @@ -46,14 +42,13 @@ from common_methods_invocations import method_tests as autograd_method_tests from common_methods_invocations import create_input, unpack_variables, \ exclude_tensor_method, non_differentiable, EXCLUDE_GRADCHECK, EXCLUDE_FUNCTIONAL from torch.testing import FileCheck -from torch._C import TensorType, TupleType, FloatType, IntType, \ - ListType, StringType, DictType +from torch._C import TensorType from copy import deepcopy import random from typing import List, Dict, Optional, Tuple from torch.jit.frontend import NotSupportedError from torch import Tensor -from torch.jit.annotations import BroadcastingList2, BroadcastingList3 +from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401 # For testing truediv in python 2 from test_module.future_div import div_int_future, div_float_future @@ -6727,8 +6722,6 @@ a") @unittest.skipIf(not PY35, "Python 3.5 needed") def test_type_annotation_py3(self): - import importlib.util - code = dedent(""" import torch from torch import Tensor @@ -9349,8 +9342,6 @@ a") foo(torch.ones([123])) # wrong size def test_builtin_error_messsage(self): - from torch.nn.modules.utils import _single, _pair, _triple, _quadruple - with self.assertRaisesRegex(RuntimeError, "arguments for call are not valid"): @torch.jit.script def close_match(x): @@ -11020,8 +11011,6 @@ class TestEndToEndHybridFrontendModels(JitTestCase): @staticmethod def _test_super_resolution(self, device, check_export_import=True): - import torch.nn.init as init - class Net(nn.Module): def __init__(self, upscale_factor): diff --git a/test/test_jit_fuser.py b/test/test_jit_fuser.py index 7a3c6cb..c435f8f 100644 --- a/test/test_jit_fuser.py +++ b/test/test_jit_fuser.py @@ -3,17 +3,12 @@ from __future__ import division from __future__ import print_function from __future__ import unicode_literals -import functools -import os import unittest -import sys import torch -import torch.autograd.function as function from torch import Tensor -from common_utils import TestCase, run_tests, IS_WINDOWS, \ +from common_utils import IS_WINDOWS, \ skipIfRocm, IS_SANDCASTLE -from typing import List, Dict, Optional, Tuple from test_jit import JitTestCase, enable_cpu_fuser, RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, \ backward_graph diff --git a/test/test_module/future_div.py b/test/test_module/future_div.py index 4329365..2cdbf9c 100644 --- a/test/test_module/future_div.py +++ b/test/test_module/future_div.py @@ -1,5 +1,5 @@ from __future__ import division -import torch +import torch # noqa: F401 def div_int_future(): diff --git a/test/test_module/no_future_div.py b/test/test_module/no_future_div.py index 1b1b9f0..32e008e 100644 --- a/test/test_module/no_future_div.py +++ b/test/test_module/no_future_div.py @@ -1,4 +1,4 @@ -import torch +import torch # noqa: F401 def div_int_nofuture(): diff --git a/test/test_multiprocessing.py b/test/test_multiprocessing.py index ec989c8..ce764b0 100644 --- a/test/test_multiprocessing.py +++ b/test/test_multiprocessing.py @@ -13,7 +13,6 @@ import torch.utils.hooks from torch.nn import Parameter from common_utils import (TestCase, run_tests, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ASAN, load_tests, slowTest) -from multiprocessing.reduction import ForkingPickler # load_tests from common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings diff --git a/test/test_nn.py b/test/test_nn.py index 09dceee..25fb4fd 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -11,8 +11,6 @@ from itertools import repeat, product from functools import wraps, reduce from operator import mul from collections import OrderedDict -import hashlib -import os import threading import torch @@ -29,9 +27,9 @@ from torch.autograd import Variable, gradcheck from torch.autograd.gradcheck import gradgradcheck from torch.nn import Parameter from torch.nn.parallel._functions import Broadcast -from common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, TEST_WITH_ROCM, \ - TEST_NUMPY, TEST_SCIPY, IS_WINDOWS, download_file, PY3, PY34, to_gpu, \ - get_function_arglist, skipCUDAMemoryLeakCheckIf, load_tests +from common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \ + TEST_NUMPY, TEST_SCIPY, download_file, PY3, PY34, to_gpu, \ + get_function_arglist, load_tests from common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, \ TEST_CUDNN_VERSION from common_nn import NNTestCase, ModuleTest, CriterionTest, TestBase, \ diff --git a/test/test_quantized.py b/test/test_quantized.py index 648be98..1ddc24b 100644 --- a/test/test_quantized.py +++ b/test/test_quantized.py @@ -1,13 +1,9 @@ import torch import torch.jit -import torch.nn as nn -import torch.nn.functional as F import numpy as np import unittest from caffe2.python import core -from common_utils import TestCase, run_tests, IS_WINDOWS, TEST_WITH_UBSAN, \ - skipIfRocm, skipIfNoLapack, suppress_warnings, load_tests, IS_SANDCASTLE, \ - freeze_rng_state, set_rng_seed +from common_utils import TestCase, run_tests def canonical(graph): diff --git a/test/test_sparse.py b/test/test_sparse.py index fd1db27..e2a5dfd 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -1,5 +1,4 @@ import torch -from torch import sparse import itertools import functools diff --git a/test/test_thd_distributed.py b/test/test_thd_distributed.py index 9c3c500..7af4c94 100644 --- a/test/test_thd_distributed.py +++ b/test/test_thd_distributed.py @@ -15,7 +15,6 @@ import torch.cuda import torch.distributed.deprecated as dist import torch.nn as nn import torch.nn.functional as F -import torch.optim as optim from common_utils import TestCase, run_tests from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR diff --git a/test/test_torch.py b/test/test_torch.py index 7ae8627..2abde8e 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -3,7 +3,6 @@ import io import os import math import random -import operator import copy import shutil import torch @@ -17,7 +16,7 @@ import gzip import types import textwrap import re -from torch._utils_internal import get_file_path, get_file_path_2 +from torch._utils_internal import get_file_path_2 from torch.utils.dlpack import from_dlpack, to_dlpack from torch._utils import _rebuild_tensor from torch._six import inf, nan, string_classes, istuple @@ -2032,7 +2031,6 @@ class _TestTorchMixin(object): def _test_int_pow(self, cast): if not TEST_NUMPY: return - import numpy as np def check_against_np(tensor, exp): tensor_np = tensor.cpu().numpy() @@ -4669,7 +4667,6 @@ class _TestTorchMixin(object): # Test non-contiguous inputs. if not TEST_NUMPY: return - import numpy from numpy.linalg import solve A = cast(random_fullrank_matrix_distinct_singular_value(2, 2)).permute(1, 0, 2) b = cast(torch.randn(2, 2, 2)).permute(2, 1, 0) @@ -6218,7 +6215,6 @@ class _TestTorchMixin(object): # Test non-contiguous inputs. if not TEST_NUMPY: return - import numpy from numpy.linalg import solve A = random_symmetric_pd_matrix(2, 2) b = torch.randn(2, 2, 2) diff --git a/test/test_type_hints.py b/test/test_type_hints.py index c2df94d..c244bcc 100644 --- a/test/test_type_hints.py +++ b/test/test_type_hints.py @@ -1,6 +1,6 @@ from __future__ import print_function import unittest -from common_utils import TestCase, run_tests, download_file +from common_utils import TestCase, run_tests import tempfile import torch import re @@ -10,7 +10,7 @@ import subprocess import inspect try: - import mypy + import mypy # noqa: F401 HAVE_MYPY = True except ImportError: HAVE_MYPY = False diff --git a/test/test_utils.py b/test/test_utils.py index d1ad25f..d042be1 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -2,22 +2,19 @@ from __future__ import print_function import sys import os import re -import math import shutil import random import tempfile import unittest -import traceback import torch import torch.nn as nn import torch.utils.data import torch.cuda -import warnings from torch.utils.checkpoint import checkpoint, checkpoint_sequential import torch.hub as hub from torch.autograd._functions.utils import prepare_onnx_paddings from torch.autograd._functions.utils import check_onnx_broadcast -from common_utils import IS_WINDOWS, IS_PPC, skipIfRocm, load_tests +from common_utils import skipIfRocm, load_tests # load_tests from common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings @@ -34,7 +31,7 @@ skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision") HAS_CUDA = torch.cuda.is_available() -from common_utils import TestCase, run_tests, download_file +from common_utils import TestCase, run_tests class RandomDatasetMock(object): @@ -326,7 +323,7 @@ test_dir = os.path.abspath(os.path.dirname(str(__file__))) class TestFFI(TestCase): def test_deprecated(self): with self.assertRaisesRegex(ImportError, "torch.utils.ffi is deprecated. Please use cpp extensions instead."): - from torch.utils.ffi import create_extension + from torch.utils.ffi import create_extension # noqa: F401 @unittest.skipIf('SKIP_TEST_BOTTLENECK' in os.environ.keys(), 'SKIP_TEST_BOTTLENECK is set') diff --git a/tools/amd_build/build_amd.py b/tools/amd_build/build_amd.py index a33559a..1e3d882 100644 --- a/tools/amd_build/build_amd.py +++ b/tools/amd_build/build_amd.py @@ -2,7 +2,6 @@ from __future__ import absolute_import, division, print_function import os -import sys import subprocess import argparse from functools import reduce diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index 777c9c0..f0753e0 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -23,7 +23,6 @@ # differentiable subcomponents. # from __future__ import print_function -import os import sys from .utils import CodeTemplate, nested_dict, write, uninplace_api_name from .gen_autograd import VIEW_FUNCTIONS diff --git a/tools/build_libtorch.py b/tools/build_libtorch.py index 1704d9e..e8a71c8 100644 --- a/tools/build_libtorch.py +++ b/tools/build_libtorch.py @@ -1,8 +1,5 @@ import argparse -import os from os.path import dirname, abspath -import shlex -import subprocess import sys # By appending pytorch_root to sys.path, this module can import other torch diff --git a/tools/build_pytorch_libs.py b/tools/build_pytorch_libs.py index 5a0c416..6c163f5 100644 --- a/tools/build_pytorch_libs.py +++ b/tools/build_pytorch_libs.py @@ -1,22 +1,20 @@ -from .setup_helpers.env import (IS_64BIT, IS_ARM, IS_DARWIN, IS_LINUX, IS_PPC, IS_WINDOWS, +from .setup_helpers.env import (IS_64BIT, IS_DARWIN, IS_WINDOWS, DEBUG, REL_WITH_DEB_INFO, USE_MKLDNN, - check_env_flag, check_negative_env_flag, hotpatch_build_env_vars) + check_env_flag, check_negative_env_flag) import os import sys import distutils import distutils.sysconfig -from distutils.file_util import copy_file -from distutils.dir_util import copy_tree -from subprocess import check_call, call, check_output +from subprocess import check_call, check_output from distutils.version import LooseVersion from .setup_helpers.cuda import USE_CUDA, CUDA_HOME from .setup_helpers.dist_check import USE_DISTRIBUTED, USE_GLOO_IBVERBS from .setup_helpers.nccl import USE_SYSTEM_NCCL, NCCL_INCLUDE_DIR, NCCL_ROOT_DIR, NCCL_SYSTEM_LIB, USE_NCCL -from .setup_helpers.rocm import ROCM_HOME, ROCM_VERSION, USE_ROCM +from .setup_helpers.rocm import USE_ROCM from .setup_helpers.nnpack import USE_NNPACK from .setup_helpers.qnnpack import USE_QNNPACK -from .setup_helpers.cudnn import CUDNN_INCLUDE_DIR, CUDNN_LIB_DIR, CUDNN_LIBRARY, USE_CUDNN +from .setup_helpers.cudnn import CUDNN_INCLUDE_DIR, CUDNN_LIBRARY, USE_CUDNN from pprint import pprint diff --git a/tools/clang_format.py b/tools/clang_format.py index 18b862a..1a43a27 100644 --- a/tools/clang_format.py +++ b/tools/clang_format.py @@ -12,9 +12,7 @@ Only files that are in CLANG_FORMAT_WHITELIST are checked. import subprocess import os import argparse -import fnmatch import difflib -import sys import re diff --git a/tools/cwrap/__init__.py b/tools/cwrap/__init__.py index 4fa8e29..e6a3054 100644 --- a/tools/cwrap/__init__.py +++ b/tools/cwrap/__init__.py @@ -1 +1 @@ -from .cwrap import cwrap +from .cwrap import cwrap # noqa: F401 diff --git a/tools/cwrap/plugins/ArgumentReferences.py b/tools/cwrap/plugins/ArgumentReferences.py index ab341b8..f374bdc 100644 --- a/tools/cwrap/plugins/ArgumentReferences.py +++ b/tools/cwrap/plugins/ArgumentReferences.py @@ -1,5 +1,4 @@ from . import CWrapPlugin -from string import Template class ArgumentReferences(CWrapPlugin): diff --git a/tools/cwrap/plugins/CuDNNPlugin.py b/tools/cwrap/plugins/CuDNNPlugin.py index 8c32e98..b026476 100644 --- a/tools/cwrap/plugins/CuDNNPlugin.py +++ b/tools/cwrap/plugins/CuDNNPlugin.py @@ -1,8 +1,6 @@ from string import Template import copy -from copy import deepcopy from . import CWrapPlugin -from itertools import product class CuDNNPlugin(CWrapPlugin): diff --git a/tools/cwrap/plugins/GILRelease.py b/tools/cwrap/plugins/GILRelease.py index 3a3e5a5..8860340 100644 --- a/tools/cwrap/plugins/GILRelease.py +++ b/tools/cwrap/plugins/GILRelease.py @@ -1,5 +1,4 @@ from . import CWrapPlugin -from string import Template class GILRelease(CWrapPlugin): diff --git a/tools/cwrap/plugins/OptionalArguments.py b/tools/cwrap/plugins/OptionalArguments.py index 0f51e22..527f81d 100644 --- a/tools/cwrap/plugins/OptionalArguments.py +++ b/tools/cwrap/plugins/OptionalArguments.py @@ -1,7 +1,4 @@ -import os -from copy import deepcopy from . import CWrapPlugin -from itertools import product from ...shared import cwrap_common diff --git a/tools/cwrap/plugins/__init__.py b/tools/cwrap/plugins/__init__.py index 9d956b8..cb1227f 100644 --- a/tools/cwrap/plugins/__init__.py +++ b/tools/cwrap/plugins/__init__.py @@ -420,15 +420,15 @@ class CWrapPlugin(object): return template -from .NNExtension import NNExtension -from .NullableArguments import NullableArguments -from .OptionalArguments import OptionalArguments -from .ArgcountChecker import ArgcountChecker -from .ArgumentReferences import ArgumentReferences -from .BeforeAfterCall import BeforeAfterCall -from .ConstantArguments import ConstantArguments -from .ReturnArguments import ReturnArguments -from .GILRelease import GILRelease -from .AutoGPU import AutoGPU -from .CuDNNPlugin import CuDNNPlugin -from .WrapDim import WrapDim +from .NNExtension import NNExtension # noqa: F401 +from .NullableArguments import NullableArguments # noqa: F401 +from .OptionalArguments import OptionalArguments # noqa: F401 +from .ArgcountChecker import ArgcountChecker # noqa: F401 +from .ArgumentReferences import ArgumentReferences # noqa: F401 +from .BeforeAfterCall import BeforeAfterCall # noqa: F401 +from .ConstantArguments import ConstantArguments # noqa: F401 +from .ReturnArguments import ReturnArguments # noqa: F401 +from .GILRelease import GILRelease # noqa: F401 +from .AutoGPU import AutoGPU # noqa: F401 +from .CuDNNPlugin import CuDNNPlugin # noqa: F401 +from .WrapDim import WrapDim # noqa: F401 diff --git a/tools/download_mnist.py b/tools/download_mnist.py index 2a5068f..80ada61 100644 --- a/tools/download_mnist.py +++ b/tools/download_mnist.py @@ -5,7 +5,6 @@ import argparse import gzip import os import sys -import urllib try: from urllib.error import URLError diff --git a/tools/jit/gen_jit_dispatch.py b/tools/jit/gen_jit_dispatch.py index d3016f8..149832b 100644 --- a/tools/jit/gen_jit_dispatch.py +++ b/tools/jit/gen_jit_dispatch.py @@ -12,14 +12,11 @@ generated. In the full build system, OUTPUT_DIR is torch/csrc/jit/generated/ """ -import os import argparse -import re import copy -from itertools import count, combinations, groupby -from ..autograd.utils import CodeTemplate, write, uninplace_api_name +from itertools import groupby +from ..autograd.utils import CodeTemplate, write from ..autograd.gen_autograd import load_aten_declarations -from collections import OrderedDict from ..autograd.gen_autograd import RETURNS_VIEWS_OF_INPUT # JIT has a type system of diff --git a/tools/nnwrap/__init__.py b/tools/nnwrap/__init__.py index d6457a5..e43ed37 100644 --- a/tools/nnwrap/__init__.py +++ b/tools/nnwrap/__init__.py @@ -1 +1 @@ -from .generate_wrappers import generate_wrappers, wrap_function, import_module +from .generate_wrappers import generate_wrappers, wrap_function, import_module # noqa: F401 diff --git a/tools/nnwrap/generate_wrappers.py b/tools/nnwrap/generate_wrappers.py index 966db73..930584d 100644 --- a/tools/nnwrap/generate_wrappers.py +++ b/tools/nnwrap/generate_wrappers.py @@ -1,6 +1,5 @@ import os -import sys -from string import Template, ascii_lowercase +from string import Template from ..cwrap import cwrap from ..cwrap.plugins import NNExtension, NullableArguments, AutoGPU from ..shared import import_module diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py index f9206eb..cd95a25 100644 --- a/tools/pyi/gen_pyi.py +++ b/tools/pyi/gen_pyi.py @@ -1,11 +1,7 @@ from __future__ import print_function -import multiprocessing -import sys import os -import inspect import collections import yaml -import types import re import argparse diff --git a/tools/setup_helpers/dist_check.py b/tools/setup_helpers/dist_check.py index 8859fe1..51bc166 100644 --- a/tools/setup_helpers/dist_check.py +++ b/tools/setup_helpers/dist_check.py @@ -2,8 +2,7 @@ import os import subprocess import glob -from .env import IS_CONDA, IS_LINUX, IS_WINDOWS, CONDA_DIR, check_env_flag, check_negative_env_flag, gather_paths -from .cuda import USE_CUDA +from .env import IS_CONDA, IS_WINDOWS, CONDA_DIR, check_env_flag, check_negative_env_flag, gather_paths # On ROCm, RCCL development isn't complete. https://github.com/ROCmSoftwarePlatform/rccl USE_DISTRIBUTED = not check_negative_env_flag("USE_DISTRIBUTED") and not IS_WINDOWS and not check_env_flag("USE_ROCM") diff --git a/tools/setup_helpers/miopen.py b/tools/setup_helpers/miopen.py index 59ca3b9..1de3a26 100644 --- a/tools/setup_helpers/miopen.py +++ b/tools/setup_helpers/miopen.py @@ -1,7 +1,4 @@ -import os -import glob - -from .env import IS_WINDOWS, IS_CONDA, CONDA_DIR, check_env_flag, gather_paths +from .env import check_env_flag from .rocm import USE_ROCM, ROCM_HOME diff --git a/tools/setup_helpers/nccl.py b/tools/setup_helpers/nccl.py index 5fbbf77..f76cd57 100644 --- a/tools/setup_helpers/nccl.py +++ b/tools/setup_helpers/nccl.py @@ -1,7 +1,5 @@ import os import glob -import warnings -from itertools import chain from .env import IS_WINDOWS, IS_DARWIN, IS_CONDA, CONDA_DIR, check_negative_env_flag, \ gather_paths diff --git a/tools/setup_helpers/nvtoolext.py b/tools/setup_helpers/nvtoolext.py index c22c2a6..27b5dc6 100644 --- a/tools/setup_helpers/nvtoolext.py +++ b/tools/setup_helpers/nvtoolext.py @@ -1,7 +1,6 @@ import os import platform import ctypes.util -from subprocess import Popen, PIPE from .cuda import USE_CUDA diff --git a/tools/shared/__init__.py b/tools/shared/__init__.py index 8494cde..44b8a46 100644 --- a/tools/shared/__init__.py +++ b/tools/shared/__init__.py @@ -1,3 +1,2 @@ -from .module_loader import import_module -from .cwrap_common import set_declaration_defaults, \ - sort_by_number_of_options, enumerate_options_due_to_default +from .module_loader import import_module # noqa: F401 +from .cwrap_common import set_declaration_defaults, sort_by_number_of_options, enumerate_options_due_to_default # noqa: F401 diff --git a/torch/__init__.py b/torch/__init__.py index b4c50e9..802925b 100644 --- a/torch/__init__.py +++ b/torch/__init__.py @@ -13,7 +13,7 @@ import sys import platform from ._utils import _import_dotted_name from ._utils_internal import get_file_path, prepare_multiprocessing_environment -from .version import __version__ +from .version import __version__ # noqa: F401 from ._six import string_classes as _string_classes __all__ = [ @@ -39,7 +39,7 @@ import os as _dl_flags # if we have numpy, it *must* be imported before the call to setdlopenflags() # or there is risk that later c modules will segfault when importing numpy try: - import numpy as _np + import numpy as _np # noqa: F401 except ImportError: pass @@ -281,7 +281,7 @@ del BoolStorageBase import torch.cuda import torch.autograd -from torch.autograd import no_grad, enable_grad, set_grad_enabled +from torch.autograd import no_grad, enable_grad, set_grad_enabled # noqa: F401 import torch.nn import torch.optim import torch.multiprocessing @@ -309,7 +309,7 @@ def compiled_with_cxx11_abi(): # Import the ops "namespace" -from torch._ops import ops +from torch._ops import ops # noqa: F401 # Import the quasi random sampler import torch.quasirandom diff --git a/torch/_six.py b/torch/_six.py index b062114..9a68821 100644 --- a/torch/_six.py +++ b/torch/_six.py @@ -53,9 +53,9 @@ else: if PY2: - import Queue as queue + import Queue as queue # noqa: F401 else: - import queue + import queue # noqa: F401 def with_metaclass(meta, *bases): diff --git a/torch/_tensor_str.py b/torch/_tensor_str.py index 50ac694..68ea13c 100644 --- a/torch/_tensor_str.py +++ b/torch/_tensor_str.py @@ -1,8 +1,6 @@ import math import torch -from functools import reduce -from sys import float_info -from torch._six import inf, nan +from torch._six import inf class __PrinterOptions(object): diff --git a/torch/_thnn/utils.py b/torch/_thnn/utils.py index 2c73b3c..55456d1 100644 --- a/torch/_thnn/utils.py +++ b/torch/_thnn/utils.py @@ -1,7 +1,3 @@ -import os -import itertools -import importlib - try: # when compiling a cffi extension, this works. When compiling # torch itself, it doesn't work because the parent module can't diff --git a/torch/autograd/__init__.py b/torch/autograd/__init__.py index 0fe63a8..f55ed63 100644 --- a/torch/autograd/__init__.py +++ b/torch/autograd/__init__.py @@ -8,11 +8,11 @@ import torch import warnings from .variable import Variable -from .function import Function, NestedIOFunction -from .gradcheck import gradcheck, gradgradcheck -from .grad_mode import no_grad, enable_grad, set_grad_enabled -from .anomaly_mode import detect_anomaly, set_detect_anomaly -from . import profiler +from .function import Function, NestedIOFunction # noqa: F401 +from .gradcheck import gradcheck, gradgradcheck # noqa: F401 +from .grad_mode import no_grad, enable_grad, set_grad_enabled # noqa: F401 +from .anomaly_mode import detect_anomaly, set_detect_anomaly # noqa: F401 +from . import profiler # noqa: F401 __all__ = ['Variable', 'Function', 'backward', 'grad_mode'] diff --git a/torch/autograd/_functions/__init__.py b/torch/autograd/_functions/__init__.py index c041700..be41919 100644 --- a/torch/autograd/_functions/__init__.py +++ b/torch/autograd/_functions/__init__.py @@ -1 +1 @@ -from .tensor import * +from .tensor import * # noqa: F401 diff --git a/torch/autograd/_functions/utils.py b/torch/autograd/_functions/utils.py index 55c85e7..cb571d1 100644 --- a/torch/autograd/_functions/utils.py +++ b/torch/autograd/_functions/utils.py @@ -1,4 +1,3 @@ -import torch from functools import reduce diff --git a/torch/autograd/gradcheck.py b/torch/autograd/gradcheck.py index 22cd8e2..20ea632 100644 --- a/torch/autograd/gradcheck.py +++ b/torch/autograd/gradcheck.py @@ -1,7 +1,6 @@ import torch from torch._six import container_abcs, istuple import torch.testing -import sys from itertools import product import warnings diff --git a/torch/autograd/profiler.py b/torch/autograd/profiler.py index 5670286..1feb171 100644 --- a/torch/autograd/profiler.py +++ b/torch/autograd/profiler.py @@ -1,12 +1,7 @@ -import subprocess -import re -import os -import sys import itertools from collections import defaultdict, namedtuple import torch -from torch._six import FileNotFoundError class range(object): diff --git a/torch/cuda/__init__.py b/torch/cuda/__init__.py index da7149e..c9e4076 100644 --- a/torch/cuda/__init__.py +++ b/torch/cuda/__init__.py @@ -648,7 +648,7 @@ torch._storage_classes.add(ByteStorage) torch._storage_classes.add(HalfStorage) torch._storage_classes.add(BoolStorage) -from . import sparse -from . import profiler -from . import nvtx -from .streams import Stream, Event +from . import sparse # noqa: F401 +from . import profiler # noqa: F401 +from . import nvtx # noqa: F401 +from .streams import Stream, Event # noqa: F401 diff --git a/torch/cuda/comm.py b/torch/cuda/comm.py index 63e29c4..985d1dd 100644 --- a/torch/cuda/comm.py +++ b/torch/cuda/comm.py @@ -1,8 +1,7 @@ import torch from . import nccl -from torch._utils import _accumulate, _take_tensors, _flatten_dense_tensors, \ - _flatten_sparse_tensors, _unflatten_dense_tensors, \ - _unflatten_sparse_tensors, _reorder_tensors_as +from torch._utils import _take_tensors, _flatten_dense_tensors, \ + _unflatten_dense_tensors, _reorder_tensors_as def broadcast(tensor, devices): diff --git a/torch/distributed/__init__.py b/torch/distributed/__init__.py index 2a7b004..9987df3 100644 --- a/torch/distributed/__init__.py +++ b/torch/distributed/__init__.py @@ -10,8 +10,8 @@ if is_available() and not torch._C._c10d_init(): if is_available(): - from .distributed_c10d import * + from .distributed_c10d import * # noqa: F401 # Variables prefixed with underscore are not auto imported # See the comment in `distributed_c10d.py` above `_backend` on why we expose # this. - from .distributed_c10d import _backend + from .distributed_c10d import _backend # noqa: F401 diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index 1461469..1fea9d5 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -3,7 +3,10 @@ import warnings from torch._six import string_classes from datetime import timedelta -from .rendezvous import rendezvous, register_rendezvous_handler +# This module is wildcard imported from torch.distributed. +# TODO: specify __all__ + +from .rendezvous import rendezvous, register_rendezvous_handler # noqa: F401 from . import BroadcastOptions, AllreduceOptions, ReduceOptions, \ ScatterOptions, GatherOptions from . import ReduceOp diff --git a/torch/distributed/launch.py b/torch/distributed/launch.py index 0504a6c..677bbb9 100644 --- a/torch/distributed/launch.py +++ b/torch/distributed/launch.py @@ -140,11 +140,8 @@ will not pass ``--local_rank`` when you specify this flag. import sys import subprocess import os -import socket from argparse import ArgumentParser, REMAINDER -import torch - def parse_args(): """ diff --git a/torch/distributions/__init__.py b/torch/distributions/__init__.py index 6727377..51bdf08 100644 --- a/torch/distributions/__init__.py +++ b/torch/distributions/__init__.py @@ -124,6 +124,8 @@ __all__ = [ 'Gamma', 'Geometric', 'Gumbel', + 'HalfCauchy', + 'HalfNormal', 'Independent', 'Laplace', 'LogNormal', diff --git a/torch/distributions/categorical.py b/torch/distributions/categorical.py index fcfea47..bde112c 100644 --- a/torch/distributions/categorical.py +++ b/torch/distributions/categorical.py @@ -2,7 +2,7 @@ import torch from torch._six import nan from torch.distributions import constraints from torch.distributions.distribution import Distribution -from torch.distributions.utils import probs_to_logits, logits_to_probs, lazy_property, broadcast_all +from torch.distributions.utils import probs_to_logits, logits_to_probs, lazy_property class Categorical(Distribution): diff --git a/torch/distributions/chi2.py b/torch/distributions/chi2.py index 7fdc5e8..781fd70 100644 --- a/torch/distributions/chi2.py +++ b/torch/distributions/chi2.py @@ -1,4 +1,3 @@ -import torch from torch.distributions import constraints from torch.distributions.gamma import Gamma diff --git a/torch/distributions/fishersnedecor.py b/torch/distributions/fishersnedecor.py index 1071d53..5fd523e 100644 --- a/torch/distributions/fishersnedecor.py +++ b/torch/distributions/fishersnedecor.py @@ -1,6 +1,5 @@ from numbers import Number import torch -import math from torch._six import nan from torch.distributions import constraints from torch.distributions.distribution import Distribution diff --git a/torch/distributions/gamma.py b/torch/distributions/gamma.py index d4641db..4c1c7f9 100644 --- a/torch/distributions/gamma.py +++ b/torch/distributions/gamma.py @@ -3,7 +3,7 @@ from numbers import Number import torch from torch.distributions import constraints from torch.distributions.exp_family import ExponentialFamily -from torch.distributions.utils import broadcast_all, lazy_property +from torch.distributions.utils import broadcast_all def _standard_gamma(concentration): diff --git a/torch/distributions/half_cauchy.py b/torch/distributions/half_cauchy.py index 916cb47..bc4bbb6 100644 --- a/torch/distributions/half_cauchy.py +++ b/torch/distributions/half_cauchy.py @@ -1,6 +1,5 @@ import math -import torch from torch._six import inf from torch.distributions import constraints from torch.distributions.transforms import AbsTransform diff --git a/torch/distributions/half_normal.py b/torch/distributions/half_normal.py index 00d0015..048703b 100644 --- a/torch/distributions/half_normal.py +++ b/torch/distributions/half_normal.py @@ -1,6 +1,5 @@ import math -import torch from torch._six import inf from torch.distributions import constraints from torch.distributions.transforms import AbsTransform diff --git a/torch/distributions/kl.py b/torch/distributions/kl.py index 9b77d33..ad86b0e 100644 --- a/torch/distributions/kl.py +++ b/torch/distributions/kl.py @@ -19,7 +19,6 @@ from .gumbel import Gumbel from .half_normal import HalfNormal from .independent import Independent from .laplace import Laplace -from .logistic_normal import LogisticNormal from .lowrank_multivariate_normal import (LowRankMultivariateNormal, _batch_lowrank_logdet, _batch_lowrank_mahalanobis) from .multivariate_normal import (MultivariateNormal, _batch_mahalanobis) diff --git a/torch/distributions/log_normal.py b/torch/distributions/log_normal.py index 38fe532..5301e08 100644 --- a/torch/distributions/log_normal.py +++ b/torch/distributions/log_normal.py @@ -1,4 +1,3 @@ -import torch from torch.distributions import constraints from torch.distributions.transforms import ExpTransform from torch.distributions.normal import Normal diff --git a/torch/distributions/logistic_normal.py b/torch/distributions/logistic_normal.py index c1be0d2..4fb259e 100644 --- a/torch/distributions/logistic_normal.py +++ b/torch/distributions/logistic_normal.py @@ -2,7 +2,7 @@ import torch from torch.distributions import constraints from torch.distributions.normal import Normal from torch.distributions.transformed_distribution import TransformedDistribution -from torch.distributions.transforms import ComposeTransform, ExpTransform, StickBreakingTransform +from torch.distributions.transforms import StickBreakingTransform class LogisticNormal(TransformedDistribution): diff --git a/torch/distributions/pareto.py b/torch/distributions/pareto.py index c860f07..363180b 100644 --- a/torch/distributions/pareto.py +++ b/torch/distributions/pareto.py @@ -1,4 +1,3 @@ -import torch from torch.distributions import constraints from torch.distributions.exponential import Exponential from torch.distributions.transformed_distribution import TransformedDistribution diff --git a/torch/distributions/studentT.py b/torch/distributions/studentT.py index 11c632c..85124af 100644 --- a/torch/distributions/studentT.py +++ b/torch/distributions/studentT.py @@ -1,5 +1,4 @@ import math -from numbers import Number import torch from torch._six import inf, nan diff --git a/torch/distributions/utils.py b/torch/distributions/utils.py index 0f5a2f1..ed2492c 100644 --- a/torch/distributions/utils.py +++ b/torch/distributions/utils.py @@ -1,6 +1,5 @@ from functools import update_wrapper from numbers import Number -import math import torch import torch.nn.functional as F diff --git a/torch/for_onnx/__init__.py b/torch/for_onnx/__init__.py index f55446b..c1e799c 100644 --- a/torch/for_onnx/__init__.py +++ b/torch/for_onnx/__init__.py @@ -1 +1 @@ -from .onnx import * +from .onnx import * # noqa: F401 diff --git a/torch/hub.py b/torch/hub.py index 77b1444..5d9ed2d 100644 --- a/torch/hub.py +++ b/torch/hub.py @@ -2,7 +2,6 @@ import importlib import os import shutil import sys -import tempfile import zipfile if sys.version_info[0] == 2: @@ -10,10 +9,7 @@ if sys.version_info[0] == 2: from urllib2 import urlopen # noqa f811 else: from urllib.request import urlopen - from urllib.parse import urlparse - -import torch -import torch.utils.model_zoo as model_zoo + from urllib.parse import urlparse # noqa: F401 MASTER_BRANCH = 'master' ENV_TORCH_HUB_DIR = 'TORCH_HUB_DIR' diff --git a/torch/jit/__init__.py b/torch/jit/__init__.py index 2671432..4032f78 100644 --- a/torch/jit/__init__.py +++ b/torch/jit/__init__.py @@ -1,35 +1,31 @@ import torch._C -from torch import Tensor from torch.autograd import Variable, function from torch.serialization import validate_cuda_device -from torch.nn import Module, ModuleList, ParameterList, Parameter, Sequential +from torch.nn import Module, ModuleList, Parameter, Sequential from torch.jit.frontend import get_jit_class_def, get_jit_def, get_default_args import torch.backends.cudnn as cudnn import torch.jit.annotations import torch._jit_internal as _jit_internal -from torch._six import raise_from, with_metaclass, get_function_from_type, \ +from torch._six import with_metaclass, get_function_from_type, \ string_classes -from torch._jit_internal import ignore -from torch.jit._pickle import Unpickler +from torch._jit_internal import ignore # noqa: F401 +from torch.jit._pickle import Unpickler # noqa: F401 from ..nn.modules.utils import _single, _pair, _triple, _quadruple, \ _list_with_default import torch.testing import math -from collections import defaultdict, OrderedDict, namedtuple +from collections import OrderedDict, namedtuple import textwrap import sys import warnings -import itertools import weakref import types import contextlib import os import functools import copy -import numbers import collections -import re import inspect import pickle if sys.version_info[0] > 2: diff --git a/torch/jit/_pickle.py b/torch/jit/_pickle.py index 24e7bf3..5643b45 100644 --- a/torch/jit/_pickle.py +++ b/torch/jit/_pickle.py @@ -1,5 +1,3 @@ -import torch -import functools import pickle diff --git a/torch/jit/annotations.py b/torch/jit/annotations.py index b318539..44985f2 100644 --- a/torch/jit/annotations.py +++ b/torch/jit/annotations.py @@ -1,4 +1,3 @@ -import re import sys import ast import inspect @@ -181,3 +180,33 @@ def ann_to_type(ann): elif ann is str: return StringType.get() raise ValueError("Unknown type annotation: '{}'".format(ann.__name__)) + + +__all__ = [ + 'List', + 'BroadcastingList1', + 'BroadcastingList2', + 'BroadcastingList3', + 'Tuple', + 'is_tuple', + 'is_list', + 'Dict', + 'is_dict', + 'TensorType', + 'TupleType', + 'FloatType', + 'IntType', + 'ListType', + 'StringType', + 'DictType', + 'Module', + # TODO: Consider not exporting these during wildcard import (reserve + # that for the types; for idiomatic typing code.) + 'get_signature', + 'get_num_params', + 'parse_type_line', + 'get_type_line', + 'split_type_line', + 'try_real_annotations', + 'ann_to_type', +] diff --git a/torch/jit/frontend.py b/torch/jit/frontend.py index 539d883..435e593 100644 --- a/torch/jit/frontend.py +++ b/torch/jit/frontend.py @@ -5,8 +5,6 @@ import ast import inspect import string from textwrap import dedent -from functools import partial -from collections import namedtuple from torch._six import PY2 from torch._C._jit_tree_views import * diff --git a/torch/jit/quantized.py b/torch/jit/quantized.py index b5775b9..fe79bab 100644 --- a/torch/jit/quantized.py +++ b/torch/jit/quantized.py @@ -1,11 +1,7 @@ import torch -import copy -import numbers -from typing import Tuple, Optional +from typing import Tuple, Optional # noqa: F401 from torch import Tensor -from torch.jit import ScriptModule -from torch.nn.utils.rnn import PackedSequence from torch.nn import _VF diff --git a/torch/multiprocessing/__init__.py b/torch/multiprocessing/__init__.py index 7ec5355..d17c7ec 100644 --- a/torch/multiprocessing/__init__.py +++ b/torch/multiprocessing/__init__.py @@ -22,7 +22,7 @@ __all__ = ['set_sharing_strategy', 'get_sharing_strategy', 'get_all_sharing_strategies'] -from multiprocessing import * +from multiprocessing import * # noqa: F401 __all__ += multiprocessing.__all__ @@ -36,13 +36,13 @@ torch._C._multiprocessing_init() if sys.version_info < (3, 3): """Override basic classes in Python 2.7 and Python 3.3 to use ForkingPickler for serialization. Later versions of Python already use ForkingPickler.""" - from .queue import Queue, SimpleQueue - from .pool import Pool + from .queue import Queue, SimpleQueue # noqa: F401 + from .pool import Pool # noqa: F401 """Add helper function to spawn N processes and wait for completion of any of them. This depends `mp.get_context` which was added in Python 3.4.""" -from .spawn import spawn, SpawnContext +from .spawn import spawn, SpawnContext # noqa: F401 if sys.platform == 'darwin' or sys.platform == 'win32': diff --git a/torch/multiprocessing/reductions.py b/torch/multiprocessing/reductions.py index 1bf2268..f93054c 100644 --- a/torch/multiprocessing/reductions.py +++ b/torch/multiprocessing/reductions.py @@ -1,7 +1,6 @@ import torch import torch.utils.hooks import os -import weakref import threading import multiprocessing from multiprocessing.reduction import ForkingPickler diff --git a/torch/nn/__init__.py b/torch/nn/__init__.py index b870a55..391ef6a 100644 --- a/torch/nn/__init__.py +++ b/torch/nn/__init__.py @@ -1,5 +1,5 @@ -from .modules import * -from .parameter import Parameter -from .parallel import DataParallel -from . import init -from . import utils +from .modules import * # noqa: F401 +from .parameter import Parameter # noqa: F401 +from .parallel import DataParallel # noqa: F401 +from . import init # noqa: F401 +from . import utils # noqa: F401 diff --git a/torch/nn/_functions/thnn/__init__.py b/torch/nn/_functions/thnn/__init__.py index fe68c9a..7336673 100644 --- a/torch/nn/_functions/thnn/__init__.py +++ b/torch/nn/_functions/thnn/__init__.py @@ -1,5 +1,5 @@ _all_functions = [] -from .auto import * -from .normalization import * -from .sparse import * +from .auto import * # noqa: F401 +from .normalization import * # noqa: F401 +from .sparse import * # noqa: F401 diff --git a/torch/nn/_functions/thnn/auto.py b/torch/nn/_functions/thnn/auto.py index 177b072..011a219 100644 --- a/torch/nn/_functions/thnn/auto.py +++ b/torch/nn/_functions/thnn/auto.py @@ -3,7 +3,7 @@ from collections import defaultdict import torch from torch._thnn.utils import parse_header, THNN_H_PATH -from torch.autograd.function import Function, InplaceFunction, once_differentiable +from torch.autograd.function import Function, InplaceFunction from torch._thnn import type2backend from .auto_double_backwards import double_backwards_fns from .auto_symbolic import symbolic_fns diff --git a/torch/nn/_reduction.py b/torch/nn/_reduction.py index 3ba7a47..5e7f5ad 100644 --- a/torch/nn/_reduction.py +++ b/torch/nn/_reduction.py @@ -1,6 +1,5 @@ import warnings from .._jit_internal import weak_script -import torch # NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h diff --git a/torch/nn/functional.py b/torch/nn/functional.py index 3517699..a286c22 100644 --- a/torch/nn/functional.py +++ b/torch/nn/functional.py @@ -3,16 +3,14 @@ from __future__ import division import warnings import math -import types import torch from torch._C import _infer_size, _add_docstr from . import _reduction as _Reduction -from . import _functions from .modules import utils from ._functions import vision from .modules.utils import _single, _pair, _triple, _list_with_default -from . import grad +from . import grad # noqa: F401 from . import _VF from .._jit_internal import weak_script, List @@ -2476,7 +2474,6 @@ def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corne .. include:: cuda_deterministic_backward.rst """ - from numbers import Integral from .modules.utils import _ntuple def _check_size_scale_factor(dim): diff --git a/torch/nn/init.py b/torch/nn/init.py index 6ea6f7a..731cd72 100644 --- a/torch/nn/init.py +++ b/torch/nn/init.py @@ -1,5 +1,4 @@ import math -import random import warnings import torch diff --git a/torch/nn/modules/distance.py b/torch/nn/modules/distance.py index 43948b0..f1e8722 100644 --- a/torch/nn/modules/distance.py +++ b/torch/nn/modules/distance.py @@ -1,4 +1,3 @@ -import torch from .module import Module from .. import functional as F from ..._jit_internal import weak_module, weak_script_method diff --git a/torch/nn/modules/loss.py b/torch/nn/modules/loss.py index 7ba5ffe..e97de27 100644 --- a/torch/nn/modules/loss.py +++ b/torch/nn/modules/loss.py @@ -1,9 +1,6 @@ import warnings -import torch from .module import Module -from .container import Sequential -from .activation import LogSoftmax from .. import functional as F from .. import _reduction as _Reduction from ..._jit_internal import weak_module, weak_script_method diff --git a/torch/nn/modules/normalization.py b/torch/nn/modules/normalization.py index 3437754..bc3e5ee 100644 --- a/torch/nn/modules/normalization.py +++ b/torch/nn/modules/normalization.py @@ -2,7 +2,6 @@ import torch import numbers from torch.nn.parameter import Parameter from .module import Module -from .batchnorm import _BatchNorm from .. import functional as F from .. import init from ..._jit_internal import weak_module, weak_script_method diff --git a/torch/nn/modules/pooling.py b/torch/nn/modules/pooling.py index 61082af..e4ed16d 100644 --- a/torch/nn/modules/pooling.py +++ b/torch/nn/modules/pooling.py @@ -1,5 +1,3 @@ -import torch - from .module import Module from .utils import _single, _pair, _triple from .. import functional as F diff --git a/torch/nn/modules/rnn.py b/torch/nn/modules/rnn.py index 15745bf..e19892d 100644 --- a/torch/nn/modules/rnn.py +++ b/torch/nn/modules/rnn.py @@ -1,7 +1,6 @@ import math import torch import warnings -import itertools import numbers from .module import Module diff --git a/torch/nn/modules/sparse.py b/torch/nn/modules/sparse.py index 7f0c494..cdd359e 100644 --- a/torch/nn/modules/sparse.py +++ b/torch/nn/modules/sparse.py @@ -4,7 +4,7 @@ from torch.nn.parameter import Parameter from .module import Module from .. import functional as F from .. import init -from torch._jit_internal import weak_module, weak_script, weak_script_method +from torch._jit_internal import weak_module, weak_script_method @weak_module diff --git a/torch/nn/modules/upsampling.py b/torch/nn/modules/upsampling.py index b41ad73..1b355c5 100644 --- a/torch/nn/modules/upsampling.py +++ b/torch/nn/modules/upsampling.py @@ -1,6 +1,3 @@ -from numbers import Integral -import warnings - from .module import Module from .. import functional as F from ..._jit_internal import weak_module, weak_script_method diff --git a/torch/nn/parallel/__init__.py b/torch/nn/parallel/__init__.py index 066f415..a0eea7c 100644 --- a/torch/nn/parallel/__init__.py +++ b/torch/nn/parallel/__init__.py @@ -4,7 +4,7 @@ from .data_parallel import DataParallel, data_parallel from .scatter_gather import scatter, gather from .distributed import DistributedDataParallel from .distributed_cpu import DistributedDataParallelCPU -import torch.nn.parallel.deprecated +import torch.nn.parallel.deprecated # noqa: F401 __all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel', 'DataParallel', 'DistributedDataParallel', 'DistributedDataParallelCPU'] diff --git a/torch/nn/parallel/deprecated/distributed.py b/torch/nn/parallel/deprecated/distributed.py index 4e78bc7..9e19a2e 100644 --- a/torch/nn/parallel/deprecated/distributed.py +++ b/torch/nn/parallel/deprecated/distributed.py @@ -1,5 +1,4 @@ import sys -import math import threading import copy diff --git a/torch/nn/parallel/distributed.py b/torch/nn/parallel/distributed.py index d02f88d..af30c9c 100644 --- a/torch/nn/parallel/distributed.py +++ b/torch/nn/parallel/distributed.py @@ -3,7 +3,6 @@ import copy import torch from torch.cuda.comm import broadcast_coalesced -from torch.cuda import nccl import torch.distributed as dist if dist.is_available(): diff --git a/torch/nn/utils/__init__.py b/torch/nn/utils/__init__.py index 472aaa8..2398766 100644 --- a/torch/nn/utils/__init__.py +++ b/torch/nn/utils/__init__.py @@ -1,6 +1,6 @@ -from . import rnn -from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_ -from .weight_norm import weight_norm, remove_weight_norm -from .convert_parameters import parameters_to_vector, vector_to_parameters -from .spectral_norm import spectral_norm, remove_spectral_norm -from .sync_batch_norm import convert_sync_batchnorm +from . import rnn # noqa: F401 +from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_ # noqa: F401 +from .weight_norm import weight_norm, remove_weight_norm # noqa: F401 +from .convert_parameters import parameters_to_vector, vector_to_parameters # noqa: F401 +from .spectral_norm import spectral_norm, remove_spectral_norm # noqa: F401 +from .sync_batch_norm import convert_sync_batchnorm # noqa: F401 diff --git a/torch/nn/utils/spectral_norm.py b/torch/nn/utils/spectral_norm.py index 242d24f..21bcf4a 100644 --- a/torch/nn/utils/spectral_norm.py +++ b/torch/nn/utils/spectral_norm.py @@ -3,7 +3,6 @@ Spectral Normalization from https://arxiv.org/abs/1802.05957 """ import torch from torch.nn.functional import normalize -from torch.nn.parameter import Parameter class SpectralNorm(object): diff --git a/torch/onnx/__init__.py b/torch/onnx/__init__.py index eac8d8c..5a9451e 100644 --- a/torch/onnx/__init__.py +++ b/torch/onnx/__init__.py @@ -1,6 +1,3 @@ -import functools -import types - import torch._C as _C TensorProtoDataType = _C._onnx.TensorProtoDataType diff --git a/torch/onnx/symbolic.py b/torch/onnx/symbolic.py index 9a1911f..f31c136 100644 --- a/torch/onnx/symbolic.py +++ b/torch/onnx/symbolic.py @@ -1,9 +1,6 @@ -import numbers - import torch -from torch._C import TensorType, ListType, OptionalType +from torch._C import ListType, OptionalType from torch.nn.modules.utils import _single, _pair, _triple -from torch.nn.utils.rnn import PackedSequence import warnings import torch.onnx @@ -11,9 +8,7 @@ import torch.onnx # ONNX symbolics import torch.onnx.utils -from collections import Iterable from functools import partial, wraps -import itertools import numpy import math diff --git a/torch/onnx/utils.py b/torch/onnx/utils.py index b6249b2..b0cf121 100644 --- a/torch/onnx/utils.py +++ b/torch/onnx/utils.py @@ -13,10 +13,7 @@ from torch._six import container_abcs import contextlib import numbers import warnings -import functools -import types from torch._six import string_classes -from torch.autograd import Function, function from torch.jit import _unique_state_dict from torch.onnx import ONNX_ARCHIVE_MODEL_PROTO_NAME, ExportTypes, OperatorExportTypes from torch._C import ListType diff --git a/torch/optim/__init__.py b/torch/optim/__init__.py index 03a5a68..6979515 100644 --- a/torch/optim/__init__.py +++ b/torch/optim/__init__.py @@ -5,18 +5,18 @@ enough, so that more sophisticated ones can be also easily integrated in the future. """ -from .adadelta import Adadelta -from .adagrad import Adagrad -from .adam import Adam -from .sparse_adam import SparseAdam -from .adamax import Adamax -from .asgd import ASGD -from .sgd import SGD -from .rprop import Rprop -from .rmsprop import RMSprop -from .optimizer import Optimizer -from .lbfgs import LBFGS -from . import lr_scheduler +from .adadelta import Adadelta # noqa: F401 +from .adagrad import Adagrad # noqa: F401 +from .adam import Adam # noqa: F401 +from .sparse_adam import SparseAdam # noqa: F401 +from .adamax import Adamax # noqa: F401 +from .asgd import ASGD # noqa: F401 +from .sgd import SGD # noqa: F401 +from .rprop import Rprop # noqa: F401 +from .rmsprop import RMSprop # noqa: F401 +from .optimizer import Optimizer # noqa: F401 +from .lbfgs import LBFGS # noqa: F401 +from . import lr_scheduler # noqa: F401 del adadelta del adagrad diff --git a/torch/optim/lr_scheduler.py b/torch/optim/lr_scheduler.py index b4dadb5..e8c391c 100644 --- a/torch/optim/lr_scheduler.py +++ b/torch/optim/lr_scheduler.py @@ -1,6 +1,5 @@ import types import math -import torch from torch._six import inf from collections import Counter from functools import partial diff --git a/torch/optim/rprop.py b/torch/optim/rprop.py index d24c407..0fbb9b20 100644 --- a/torch/optim/rprop.py +++ b/torch/optim/rprop.py @@ -1,4 +1,3 @@ -import math import torch from .optimizer import Optimizer diff --git a/torch/random.py b/torch/random.py index e71c425..628948c 100644 --- a/torch/random.py +++ b/torch/random.py @@ -1,4 +1,3 @@ -import torch import contextlib import warnings diff --git a/torch/utils/bottleneck/__main__.py b/torch/utils/bottleneck/__main__.py index ae5de6b..0cca3c4 100644 --- a/torch/utils/bottleneck/__main__.py +++ b/torch/utils/bottleneck/__main__.py @@ -1,11 +1,8 @@ import argparse import cProfile import pstats -import subprocess import sys import os -import re -import contextlib import torch from torch.autograd import profiler diff --git a/torch/utils/collect_env.py b/torch/utils/collect_env.py index 9c28124..2c3f380 100644 --- a/torch/utils/collect_env.py +++ b/torch/utils/collect_env.py @@ -4,8 +4,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera import re import subprocess import sys -import time -import datetime import os from collections import namedtuple diff --git a/torch/utils/data/__init__.py b/torch/utils/data/__init__.py index ee58707..1852aca 100644 --- a/torch/utils/data/__init__.py +++ b/torch/utils/data/__init__.py @@ -1,4 +1,4 @@ -from .sampler import Sampler, SequentialSampler, RandomSampler, SubsetRandomSampler, WeightedRandomSampler, BatchSampler -from .distributed import DistributedSampler -from .dataset import Dataset, TensorDataset, ConcatDataset, Subset, random_split -from .dataloader import DataLoader +from .sampler import Sampler, SequentialSampler, RandomSampler, SubsetRandomSampler, WeightedRandomSampler, BatchSampler # noqa: F401 +from .distributed import DistributedSampler # noqa: F401 +from .dataset import Dataset, TensorDataset, ConcatDataset, Subset, random_split # noqa: F401 +from .dataloader import DataLoader # noqa: F401 diff --git a/torch/utils/data/_utils/__init__.py b/torch/utils/data/_utils/__init__.py index 05b2b65..893b716 100644 --- a/torch/utils/data/_utils/__init__.py +++ b/torch/utils/data/_utils/__init__.py @@ -58,4 +58,4 @@ def _set_python_exit_flag(): atexit.register(_set_python_exit_flag) -from . import worker, signal_handling, pin_memory, collate +from . import worker, signal_handling, pin_memory, collate # noqa: F401 diff --git a/torch/utils/data/_utils/pin_memory.py b/torch/utils/data/_utils/pin_memory.py index f762aff..979044e 100644 --- a/torch/utils/data/_utils/pin_memory.py +++ b/torch/utils/data/_utils/pin_memory.py @@ -7,7 +7,7 @@ static methods. import torch from torch._six import queue, container_abcs, string_classes -from . import collate, MP_STATUS_CHECK_INTERVAL, ExceptionWrapper +from . import MP_STATUS_CHECK_INTERVAL, ExceptionWrapper def _pin_memory_loop(in_queue, out_queue, device_id, done_event): diff --git a/torch/utils/data/_utils/signal_handling.py b/torch/utils/data/_utils/signal_handling.py index 9364733..3f8a90e 100644 --- a/torch/utils/data/_utils/signal_handling.py +++ b/torch/utils/data/_utils/signal_handling.py @@ -32,9 +32,7 @@ multiprocessing data loading robust to errors. import signal import threading -import torch -from torch._C import _set_worker_pids, _remove_worker_pids, \ - _error_if_any_worker_fails, _set_worker_signal_handlers +from torch._C import _set_worker_pids, _remove_worker_pids, _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401 from . import IS_WINDOWS -- 2.7.4