From: driazati Date: Fri, 20 Aug 2021 23:38:42 +0000 (-0700) Subject: [clang-tidy] Enable more folders (#63380) X-Git-Tag: accepted/tizen/8.0/unified/20231005.095509~826 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7c0f5b9aa4dbdfefce02b10a07c1928d4ec1a66b;p=platform%2Fupstream%2Fpytorch.git [clang-tidy] Enable more folders (#63380) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/63380 Crosses off some more of #62011, see the test in the stacked PR #63381 Test Plan: Imported from OSS Reviewed By: malfet, seemethere Differential Revision: D30455843 Pulled By: driazati fbshipit-source-id: d473545d05ffa0b2476968f0b1c55f3a16a2c755 --- diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 720e76c..f036bc1 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -367,7 +367,12 @@ jobs: cd "${GITHUB_WORKSPACE}" python3 -m tools.linter.clang_tidy \ - --paths torch/csrc/fx \ + --paths \ + torch/csrc/fx \ + torch/csrc/utils \ + torch/csrc/generic \ + torch/csrc/deploy \ + torch/csrc/tensor \ --clang-tidy-exe "$(which clang-tidy)" \ --disable-progress-bar 2>&1 | tee "${GITHUB_WORKSPACE}"/clang-tidy-output.txt diff --git a/tools/linter/clang_tidy/__main__.py b/tools/linter/clang_tidy/__main__.py index fc9f2ab..b99c1f5 100644 --- a/tools/linter/clang_tidy/__main__.py +++ b/tools/linter/clang_tidy/__main__.py @@ -74,6 +74,7 @@ DEFAULTS = { "-torch/csrc/deploy/interpreter/interpreter.h", "-torch/csrc/deploy/interpreter/interpreter_impl.h", "-torch/csrc/deploy/interpreter/test_main.cpp", + "-torch/csrc/deploy/test_deploy_python_ext.cpp", ], "paths": ["torch/csrc/"], "include-dir": ["/usr/lib/llvm-11/include/openmp"] + clang_search_dirs(), diff --git a/torch/csrc/api/include/torch/nn/functional/pooling.h b/torch/csrc/api/include/torch/nn/functional/pooling.h index c853885..f06b68b 100644 --- a/torch/csrc/api/include/torch/nn/functional/pooling.h +++ b/torch/csrc/api/include/torch/nn/functional/pooling.h @@ -776,7 +776,7 @@ inline std::tuple fractional_max_pool2d_with_indices( Tensor _random_samples_ = _random_samples; if (!_random_samples_.defined()) { - auto n_batch = 1 ? input.dim() == 3 : input.size(0); + auto n_batch = input.dim() == 3; _random_samples_ = torch::rand({n_batch, input.size(-1), 2}, torch::TensorOptions().dtype(input.dtype()).device(input.device())); } return torch::fractional_max_pool2d(input, kernel_size, *output_size_, _random_samples_); diff --git a/torch/csrc/deploy/example/benchmark.cpp b/torch/csrc/deploy/example/benchmark.cpp index 348d84f..d2f1142 100644 --- a/torch/csrc/deploy/example/benchmark.cpp +++ b/torch/csrc/deploy/example/benchmark.cpp @@ -295,6 +295,7 @@ struct Benchmark { std::function run_one_work_item; }; +// NOLINTNEXTLINE(bugprone-exception-escape) int main(int argc, char* argv[]) { int max_thread = atoi(argv[1]); cuda = std::string(argv[2]) == "cuda"; diff --git a/torch/csrc/deploy/test_deploy.cpp b/torch/csrc/deploy/test_deploy.cpp index f88a23c..a004db1 100644 --- a/torch/csrc/deploy/test_deploy.cpp +++ b/torch/csrc/deploy/test_deploy.cpp @@ -63,7 +63,7 @@ TEST(TorchpyTest, InitTwice) { TEST(TorchpyTest, DifferentInterps) { torch::deploy::InterpreterManager m(2); m.register_module_source("check_none", "check = id(None)\n"); - int64_t id0, id1; + int64_t id0 = 0, id1 = 0; { auto I = m.all_instances()[0].acquire_session(); id0 = I.global("check_none", "check").toIValue().toInt(); @@ -312,6 +312,7 @@ TEST(TorchpyTest, SharedLibraryLoad) { I.global("sys", "path").attr("append")({"torch/csrc/deploy"}); I.global("test_deploy_python", "setup")({getenv("PATH")}); } else { + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) char buf[PATH_MAX]; strncpy(buf, test_lib_path, PATH_MAX); dirname(buf); diff --git a/torch/csrc/deploy/test_deploy_python_ext.cpp b/torch/csrc/deploy/test_deploy_python_ext.cpp index 42700ea..59a04f5 100644 --- a/torch/csrc/deploy/test_deploy_python_ext.cpp +++ b/torch/csrc/deploy/test_deploy_python_ext.cpp @@ -7,7 +7,7 @@ bool run() { torch::deploy::InterpreterManager m(2); m.register_module_source("check_none", "check = id(None)\n"); - int64_t id0, id1; + int64_t id0 = 0, id1 = 0; { auto I = m.all_instances()[0].acquire_session(); id0 = I.global("check_none", "check").toIValue().toInt();