Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/63380
Crosses off some more of #62011, see the test in the stacked PR #63381
Test Plan: Imported from OSS
Reviewed By: malfet, seemethere
Differential Revision:
D30455843
Pulled By: driazati
fbshipit-source-id:
d473545d05ffa0b2476968f0b1c55f3a16a2c755
cd "${GITHUB_WORKSPACE}"
python3 -m tools.linter.clang_tidy \
- --paths torch/csrc/fx \
+ --paths \
+ torch/csrc/fx \
+ torch/csrc/utils \
+ torch/csrc/generic \
+ torch/csrc/deploy \
+ torch/csrc/tensor \
--clang-tidy-exe "$(which clang-tidy)" \
--disable-progress-bar 2>&1 | tee "${GITHUB_WORKSPACE}"/clang-tidy-output.txt
"-torch/csrc/deploy/interpreter/interpreter.h",
"-torch/csrc/deploy/interpreter/interpreter_impl.h",
"-torch/csrc/deploy/interpreter/test_main.cpp",
+ "-torch/csrc/deploy/test_deploy_python_ext.cpp",
],
"paths": ["torch/csrc/"],
"include-dir": ["/usr/lib/llvm-11/include/openmp"] + clang_search_dirs(),
Tensor _random_samples_ = _random_samples;
if (!_random_samples_.defined()) {
- auto n_batch = 1 ? input.dim() == 3 : input.size(0);
+ auto n_batch = input.dim() == 3;
_random_samples_ = torch::rand({n_batch, input.size(-1), 2}, torch::TensorOptions().dtype(input.dtype()).device(input.device()));
}
return torch::fractional_max_pool2d(input, kernel_size, *output_size_, _random_samples_);
std::function<void(int)> run_one_work_item;
};
+// NOLINTNEXTLINE(bugprone-exception-escape)
int main(int argc, char* argv[]) {
int max_thread = atoi(argv[1]);
cuda = std::string(argv[2]) == "cuda";
TEST(TorchpyTest, DifferentInterps) {
torch::deploy::InterpreterManager m(2);
m.register_module_source("check_none", "check = id(None)\n");
- int64_t id0, id1;
+ int64_t id0 = 0, id1 = 0;
{
auto I = m.all_instances()[0].acquire_session();
id0 = I.global("check_none", "check").toIValue().toInt();
I.global("sys", "path").attr("append")({"torch/csrc/deploy"});
I.global("test_deploy_python", "setup")({getenv("PATH")});
} else {
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
char buf[PATH_MAX];
strncpy(buf, test_lib_path, PATH_MAX);
dirname(buf);
bool run() {
torch::deploy::InterpreterManager m(2);
m.register_module_source("check_none", "check = id(None)\n");
- int64_t id0, id1;
+ int64_t id0 = 0, id1 = 0;
{
auto I = m.all_instances()[0].acquire_session();
id0 = I.global("check_none", "check").toIValue().toInt();