[clang-tidy] Enable more folders (#63380)
authordriazati <driazati@users.noreply.github.com>
Fri, 20 Aug 2021 23:38:42 +0000 (16:38 -0700)
committerFacebook GitHub Bot <facebook-github-bot@users.noreply.github.com>
Fri, 20 Aug 2021 23:40:42 +0000 (16:40 -0700)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/63380

Crosses off some more of #62011, see the test in the stacked PR #63381

Test Plan: Imported from OSS

Reviewed By: malfet, seemethere

Differential Revision: D30455843

Pulled By: driazati

fbshipit-source-id: d473545d05ffa0b2476968f0b1c55f3a16a2c755

.github/workflows/lint.yml
tools/linter/clang_tidy/__main__.py
torch/csrc/api/include/torch/nn/functional/pooling.h
torch/csrc/deploy/example/benchmark.cpp
torch/csrc/deploy/test_deploy.cpp
torch/csrc/deploy/test_deploy_python_ext.cpp

index 720e76c..f036bc1 100644 (file)
@@ -367,7 +367,12 @@ jobs:
           cd "${GITHUB_WORKSPACE}"
 
           python3 -m tools.linter.clang_tidy \
-            --paths torch/csrc/fx \
+            --paths \
+              torch/csrc/fx \
+              torch/csrc/utils \
+              torch/csrc/generic \
+              torch/csrc/deploy \
+              torch/csrc/tensor \
             --clang-tidy-exe "$(which clang-tidy)" \
             --disable-progress-bar 2>&1 | tee "${GITHUB_WORKSPACE}"/clang-tidy-output.txt
 
index fc9f2ab..b99c1f5 100644 (file)
@@ -74,6 +74,7 @@ DEFAULTS = {
         "-torch/csrc/deploy/interpreter/interpreter.h",
         "-torch/csrc/deploy/interpreter/interpreter_impl.h",
         "-torch/csrc/deploy/interpreter/test_main.cpp",
+        "-torch/csrc/deploy/test_deploy_python_ext.cpp",
     ],
     "paths": ["torch/csrc/"],
     "include-dir": ["/usr/lib/llvm-11/include/openmp"] + clang_search_dirs(),
index c853885..f06b68b 100644 (file)
@@ -776,7 +776,7 @@ inline std::tuple<Tensor, Tensor> fractional_max_pool2d_with_indices(
 
   Tensor _random_samples_ = _random_samples;
   if (!_random_samples_.defined()) {
-    auto n_batch = 1 ? input.dim() == 3 : input.size(0);
+    auto n_batch = input.dim() == 3;
     _random_samples_ = torch::rand({n_batch, input.size(-1), 2}, torch::TensorOptions().dtype(input.dtype()).device(input.device()));
   }
   return torch::fractional_max_pool2d(input, kernel_size, *output_size_, _random_samples_);
index 348d84f..d2f1142 100644 (file)
@@ -295,6 +295,7 @@ struct Benchmark {
   std::function<void(int)> run_one_work_item;
 };
 
+// NOLINTNEXTLINE(bugprone-exception-escape)
 int main(int argc, char* argv[]) {
   int max_thread = atoi(argv[1]);
   cuda = std::string(argv[2]) == "cuda";
index f88a23c..a004db1 100644 (file)
@@ -63,7 +63,7 @@ TEST(TorchpyTest, InitTwice) {
 TEST(TorchpyTest, DifferentInterps) {
   torch::deploy::InterpreterManager m(2);
   m.register_module_source("check_none", "check = id(None)\n");
-  int64_t id0, id1;
+  int64_t id0 = 0, id1 = 0;
   {
     auto I = m.all_instances()[0].acquire_session();
     id0 = I.global("check_none", "check").toIValue().toInt();
@@ -312,6 +312,7 @@ TEST(TorchpyTest, SharedLibraryLoad) {
       I.global("sys", "path").attr("append")({"torch/csrc/deploy"});
       I.global("test_deploy_python", "setup")({getenv("PATH")});
     } else {
+      // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
       char buf[PATH_MAX];
       strncpy(buf, test_lib_path, PATH_MAX);
       dirname(buf);
index 42700ea..59a04f5 100644 (file)
@@ -7,7 +7,7 @@
 bool run() {
   torch::deploy::InterpreterManager m(2);
   m.register_module_source("check_none", "check = id(None)\n");
-  int64_t id0, id1;
+  int64_t id0 = 0, id1 = 0;
   {
     auto I = m.all_instances()[0].acquire_session();
     id0 = I.global("check_none", "check").toIValue().toInt();