From d6c53328f9c9f0b569f1d32e4df06fdf5b573c66 Mon Sep 17 00:00:00 2001 From: Peter Goldsborough Date: Fri, 7 Dec 2018 12:22:49 -0800 Subject: [PATCH] Large scale fix of python-related files in torch/csrc/ Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/14515 Differential Revision: D13247966 Pulled By: goldsborough fbshipit-source-id: 7a127c508fc576a7a92626dd6b729f660162d628 --- .clang-tidy | 15 ++-- tools/run-clang-tidy-in-ci.sh | 14 ++-- torch/csrc/DataLoader.cpp | 12 ++-- torch/csrc/Device.cpp | 46 ++++++------ torch/csrc/Device.h | 1 + torch/csrc/Dtype.cpp | 54 +++++++-------- torch/csrc/Exceptions.h | 10 +-- torch/csrc/Generator.cpp | 53 +++++++------- torch/csrc/Layout.cpp | 58 ++++++++-------- torch/csrc/Module.cpp | 2 +- torch/csrc/PtrWrapper.cpp | 56 +++++++-------- torch/csrc/Size.cpp | 56 +++++++-------- torch/csrc/Storage.cpp | 1 - torch/csrc/THP.h | 1 - torch/csrc/TypeInfo.cpp | 96 +++++++++++++------------- torch/csrc/autograd/python_anomaly_mode.h | 6 +- torch/csrc/autograd/python_cpp_function.cpp | 4 +- torch/csrc/autograd/python_engine.cpp | 56 +++++++-------- torch/csrc/autograd/python_engine.h | 8 +-- torch/csrc/autograd/python_function.cpp | 50 +++++++------- torch/csrc/autograd/python_function.h | 10 +-- torch/csrc/autograd/python_hook.h | 4 +- torch/csrc/autograd/python_legacy_variable.cpp | 58 ++++++++-------- torch/csrc/autograd/python_variable.cpp | 52 +++++++------- torch/csrc/autograd/python_variable.h | 3 +- torch/csrc/byte_order.cpp | 5 +- torch/csrc/byte_order.h | 2 +- torch/csrc/copy_utils.h | 6 +- torch/csrc/cuda/Module.cpp | 1 - torch/csrc/cuda/Storage.cpp | 1 - torch/csrc/cuda/Tensor.cpp | 1 - torch/csrc/distributed/c10d/init.cpp | 2 +- torch/csrc/generic/Storage.cpp | 58 ++++++++-------- torch/csrc/jit/batched/BatchTensor.h | 1 - torch/csrc/jit/constants.cpp | 7 +- torch/csrc/jit/init.cpp | 5 +- torch/csrc/jit/interpreter.h | 7 +- torch/csrc/jit/passes/alias_analysis.cpp | 4 +- torch/csrc/jit/passes/alias_analysis.h | 2 +- torch/csrc/jit/passes/onnx.cpp | 2 +- torch/csrc/jit/passes/onnx/peephole.cpp | 15 ++-- torch/csrc/jit/passes/to_batch.cpp | 7 +- torch/csrc/jit/python_arg_flatten.h | 2 +- torch/csrc/jit/python_interpreter.cpp | 8 +-- torch/csrc/jit/python_ir.cpp | 7 +- torch/csrc/jit/register_prim_ops.cpp | 6 +- torch/csrc/jit/script/compiler.h | 2 +- torch/csrc/jit/script/init.cpp | 3 +- torch/csrc/jit/script/python_tree_views.cpp | 4 +- torch/csrc/serialization.cpp | 2 +- torch/csrc/tensor/python_tensor.cpp | 1 + torch/csrc/utils.cpp | 6 +- torch/csrc/utils/invalid_arguments.cpp | 26 +++---- torch/csrc/utils/python_arg_parser.cpp | 3 +- torch/csrc/utils/python_arg_parser.h | 1 + torch/csrc/utils/tensor_dtypes.cpp | 2 +- torch/csrc/utils/tensor_layouts.cpp | 2 +- 57 files changed, 465 insertions(+), 462 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 44cf447..6d02359 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -3,26 +3,27 @@ Checks: ' -* ,bugprone-* - ,-bugprone-macro-parentheses ,-bugprone-forward-declaration-namespace + ,-bugprone-macro-parentheses ,cppcoreguidelines-* + ,-cppcoreguidelines-interfaces-global-init + ,-cppcoreguidelines-owning-memory ,-cppcoreguidelines-pro-bounds-array-to-pointer-decay - ,-cppcoreguidelines-pro-type-static-cast-downcast - ,-cppcoreguidelines-pro-bounds-pointer-arithmetic ,-cppcoreguidelines-pro-bounds-constant-array-index + ,-cppcoreguidelines-pro-bounds-pointer-arithmetic ,-cppcoreguidelines-pro-type-cstyle-cast ,-cppcoreguidelines-pro-type-reinterpret-cast + ,-cppcoreguidelines-pro-type-static-cast-downcast + ,-cppcoreguidelines-pro-type-union-access ,-cppcoreguidelines-pro-type-vararg ,-cppcoreguidelines-special-member-functions - ,-cppcoreguidelines-interfaces-global-init - ,-cppcoreguidelines-owning-memory - ,hicpp-signed-bitwise ,hicpp-exception-baseclass ,hicpp-avoid-goto ,modernize-* - ,-modernize-use-default-member-init ,-modernize-return-braced-init-list ,-modernize-use-auto + ,-modernize-use-default-member-init + ,-modernize-use-using ' WarningsAsErrors: '*' HeaderFilterRegex: 'torch/csrc/.*' diff --git a/tools/run-clang-tidy-in-ci.sh b/tools/run-clang-tidy-in-ci.sh index 022b9d6..4d384da 100755 --- a/tools/run-clang-tidy-in-ci.sh +++ b/tools/run-clang-tidy-in-ci.sh @@ -38,11 +38,11 @@ fi # Run Clang-Tidy # The negative filters below are to exclude files that include onnx_pb.h, # otherwise we'd have to build ONNX protos as part of this CI job. -time python tools/clang_tidy.py \ - --verbose \ - --paths torch/csrc \ - --diff "$BASE_BRANCH" \ - -g"-torch/csrc/jit/init.cpp" \ - -g"-torch/csrc/jit/export.cpp" \ - -g"-torch/csrc/jit/import.cpp" \ +time python tools/clang_tidy.py \ + --verbose \ + --paths torch/csrc \ + --diff "$BASE_BRANCH" \ + -g"-torch/csrc/distributed/Module.cpp" \ + -g"-torch/csrc/jit/export.cpp" \ + -g"-torch/csrc/jit/import.cpp" \ "$@" diff --git a/torch/csrc/DataLoader.cpp b/torch/csrc/DataLoader.cpp index c5cdf64..28f3b51 100644 --- a/torch/csrc/DataLoader.cpp +++ b/torch/csrc/DataLoader.cpp @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include @@ -35,7 +35,7 @@ static void HANDLER_NAME(int sig, siginfo_t *info, void *ctx) \ { \ auto _w = write(STDERR_FILENO, ERROR_MSG, sizeof(ERROR_MSG) / sizeof(char));\ (void)_w; \ - struct sigaction sa; \ + struct sigaction sa{}; \ sa.sa_handler = SIG_DFL; \ sa.sa_flags = 0; \ if (sigemptyset(&sa.sa_mask) != 0 || sigaction(SIGNAL, &sa, nullptr) != 0) { \ @@ -49,7 +49,7 @@ static void HANDLER_NAME(int sig, siginfo_t *info, void *ctx) \ // http://man7.org/linux/man-pages/man2/signal.2.html static inline void setSignalHandler(int signal, void(*handler)(int, siginfo_t *, void *), struct sigaction *old_sa_ptr) { - struct sigaction sa; + struct sigaction sa{}; sa.sa_sigaction = handler; sa.sa_flags = SA_RESTART|SA_SIGINFO|SA_NOCLDSTOP|SA_NODEFER; if (sigemptyset(&sa.sa_mask) != 0 || sigaction(signal, &sa, old_sa_ptr) != 0) { @@ -77,7 +77,7 @@ static void handler_SIGTERM(int sig, siginfo_t *info, void *ctx) if (info->si_pid == getppid()) { _exit(EXIT_SUCCESS); } - struct sigaction sa; + struct sigaction sa{}; sa.sa_handler = SIG_DFL; sa.sa_flags = 0; if (sigemptyset(&sa.sa_mask) != 0 || sigaction(SIGTERM, &sa, nullptr) != 0) { @@ -107,8 +107,8 @@ static PyObject *THPModule_errorIfAnyWorkerFails(PyObject *module) { siginfo_t infop; // Only check the pids we care about - for (auto it = worker_pids.begin(); it != worker_pids.end(); ++it) { - pid_set = &(it->second); + for (auto& w : worker_pids) { + pid_set = &(w.second); for (auto pid_it = pid_set->begin(); pid_it != pid_set->end(); ++pid_it) { worker_pid = *pid_it; // Use waitid rather than waitpid so that we can set NOWAIT, and that Python diff --git a/torch/csrc/Device.cpp b/torch/csrc/Device.cpp index 2c7e331..08aa250 100644 --- a/torch/csrc/Device.cpp +++ b/torch/csrc/Device.cpp @@ -181,39 +181,39 @@ PyTypeObject THPDeviceType = { "torch.device", /* tp_name */ sizeof(THPDevice), /* tp_basicsize */ 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ + nullptr, /* tp_dealloc */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ (reprfunc)THPDevice_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ (hashfunc)THPDevice_hash, /* tp_hash */ - 0, /* tp_call */ + nullptr, /* tp_call */ (reprfunc)THPDevice_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ (richcmpfunc)THPDevice_rc, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ THPDevice_methods, /* tp_methods */ - 0, /* tp_members */ + nullptr, /* tp_members */ THPDevice_properties, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPDevice_pynew, /* tp_new */ }; diff --git a/torch/csrc/Device.h b/torch/csrc/Device.h index d14c400..3a8b698 100644 --- a/torch/csrc/Device.h +++ b/torch/csrc/Device.h @@ -4,6 +4,7 @@ #include +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct THPDevice { PyObject_HEAD at::Device device; diff --git a/torch/csrc/Dtype.cpp b/torch/csrc/Dtype.cpp index 2e68959..2f6f8e2 100644 --- a/torch/csrc/Dtype.cpp +++ b/torch/csrc/Dtype.cpp @@ -60,40 +60,40 @@ PyTypeObject THPDtypeType = { "torch.dtype", /* tp_name */ sizeof(THPDtype), /* tp_basicsize */ 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ + nullptr, /* tp_dealloc */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ (reprfunc)THPDtype_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ THPDtype_methods, /* tp_methods */ - 0, /* tp_members */ + nullptr, /* tp_members */ THPDtype_properties, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ + nullptr, /* tp_new */ }; void THPDtype_init(PyObject *module) diff --git a/torch/csrc/Exceptions.h b/torch/csrc/Exceptions.h index 35816ce..8ebc0af 100644 --- a/torch/csrc/Exceptions.h +++ b/torch/csrc/Exceptions.h @@ -57,7 +57,7 @@ struct python_error : public std::exception { other.traceback = nullptr; } - ~python_error() { + ~python_error() override { if (type || value || traceback) { AutoGIL gil; Py_XDECREF(type); @@ -105,7 +105,7 @@ THP_CLASS std::string processErrorMsg(std::string str); // Abstract base class for exceptions which translate to specific Python types struct PyTorchError : public std::exception { virtual PyObject* python_type() = 0; - virtual const char* what() const noexcept override { + const char* what() const noexcept override { return msg.c_str(); } std::string msg; @@ -114,7 +114,7 @@ struct PyTorchError : public std::exception { // Translates to Python IndexError struct IndexError : public PyTorchError { IndexError(const char *format, ...); - virtual PyObject* python_type() override { + PyObject* python_type() override { return PyExc_IndexError; } }; @@ -122,7 +122,7 @@ struct IndexError : public PyTorchError { // Translates to Python TypeError struct TypeError : public PyTorchError { TypeError(const char *format, ...); - virtual PyObject* python_type() override { + PyObject* python_type() override { return PyExc_TypeError; } }; @@ -130,7 +130,7 @@ struct TypeError : public PyTorchError { // Translates to Python ValueError struct ValueError : public PyTorchError { ValueError(const char *format, ...); - virtual PyObject* python_type() override { + PyObject* python_type() override { return PyExc_ValueError; } }; diff --git a/torch/csrc/Generator.cpp b/torch/csrc/Generator.cpp index dd774e1..ecf8598 100644 --- a/torch/csrc/Generator.cpp +++ b/torch/csrc/Generator.cpp @@ -3,7 +3,6 @@ #include #include -#include #include #include "THP.h" #include "torch/csrc/Exceptions.h" @@ -140,38 +139,38 @@ PyTypeObject THPGeneratorType = { sizeof(THPGenerator), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)THPGenerator_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ THPGenerator_methods, /* tp_methods */ THPGenerator_members, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_getset */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPGenerator_pynew, /* tp_new */ }; diff --git a/torch/csrc/Layout.cpp b/torch/csrc/Layout.cpp index 2aa6d7a..be5d132 100644 --- a/torch/csrc/Layout.cpp +++ b/torch/csrc/Layout.cpp @@ -32,40 +32,40 @@ PyTypeObject THPLayoutType = { "torch.layout", /* tp_name */ sizeof(THPLayout), /* tp_basicsize */ 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ + nullptr, /* tp_dealloc */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ (reprfunc)THPLayout_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ + nullptr, /* tp_methods */ + nullptr, /* tp_members */ + nullptr, /* tp_getset */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ + nullptr, /* tp_new */ }; void THPLayout_init(PyObject *module) diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp index dbf17ca..1175de9 100644 --- a/torch/csrc/Module.cpp +++ b/torch/csrc/Module.cpp @@ -5,7 +5,6 @@ #include #endif -#include #include #include #include @@ -303,6 +302,7 @@ void DLPack_Capsule_Destructor(PyObject* data) { DLManagedTensor * dlMTensor = (DLManagedTensor *)PyCapsule_GetPointer(data, "dltensor"); if (dlMTensor) { // the dlMTensor has not been consumed, call deleter ourselves + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) dlMTensor->deleter(const_cast(dlMTensor)); } else { // the dlMTensor has been consumed diff --git a/torch/csrc/PtrWrapper.cpp b/torch/csrc/PtrWrapper.cpp index 9429a2d..a1763c7 100644 --- a/torch/csrc/PtrWrapper.cpp +++ b/torch/csrc/PtrWrapper.cpp @@ -57,38 +57,38 @@ PyTypeObject THPWrapperType = { sizeof(THPWrapper), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)THPWrapper_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ + nullptr, /* tp_methods */ + nullptr, /* tp_members */ + nullptr, /* tp_getset */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPWrapper_pynew, /* tp_new */ }; diff --git a/torch/csrc/Size.cpp b/torch/csrc/Size.cpp index 476b262..ea08170 100644 --- a/torch/csrc/Size.cpp +++ b/torch/csrc/Size.cpp @@ -124,17 +124,17 @@ static PySequenceMethods THPSize_as_sequence = { #if PY_MAJOR_VERSION == 2 wrap_tuple_fn, #else - 0, /* sq_slice */ + nullptr, /* sq_slice */ #endif - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ + nullptr, /* sq_ass_item */ + nullptr, /* sq_ass_slice */ PyTuple_Type.tp_as_sequence->sq_contains }; static PyMappingMethods THPSize_as_mapping = { PyTuple_Type.tp_as_mapping->mp_length, wrap_tuple_fn, - 0 + nullptr }; static PyObject *THPSize_numel(THPSize *self) @@ -159,39 +159,39 @@ PyTypeObject THPSizeType = { "torch.Size", /* tp_name */ sizeof(THPSize), /* tp_basicsize */ 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ + nullptr, /* tp_dealloc */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ (reprfunc)THPSize_repr, /* tp_repr */ - 0, /* tp_as_number */ + nullptr, /* tp_as_number */ &THPSize_as_sequence, /* tp_as_sequence */ &THPSize_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ THPSize_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ + nullptr, /* tp_members */ + nullptr, /* tp_getset */ &PyTuple_Type, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPSize_pynew, /* tp_new */ }; diff --git a/torch/csrc/Storage.cpp b/torch/csrc/Storage.cpp index 4aae5f1..9c2e62d 100644 --- a/torch/csrc/Storage.cpp +++ b/torch/csrc/Storage.cpp @@ -8,7 +8,6 @@ #define THP_HOST_HALF -#include #include // See Note [TH abstraction violation] // - Used to get at the allocator associated with a storage diff --git a/torch/csrc/THP.h b/torch/csrc/THP.h index fe52534..5cbc09c 100644 --- a/torch/csrc/THP.h +++ b/torch/csrc/THP.h @@ -2,7 +2,6 @@ #define THP_H #include "torch/csrc/python_headers.h" -#include #include #include diff --git a/torch/csrc/TypeInfo.cpp b/torch/csrc/TypeInfo.cpp index 839f298..57c184a 100644 --- a/torch/csrc/TypeInfo.cpp +++ b/torch/csrc/TypeInfo.cpp @@ -161,39 +161,39 @@ PyTypeObject THPFInfoType = { PyVarObject_HEAD_INIT(nullptr, 0) "torch.finfo", /* tp_name */ sizeof(THPFInfo), /* tp_basicsize */ 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ + nullptr, /* tp_dealloc */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ (reprfunc)THPFInfo_str, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ (reprfunc)THPFInfo_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ (richcmpfunc)THPDTypeInfo_compare, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ THPFInfo_methods, /* tp_methods */ - 0, /* tp_members */ + nullptr, /* tp_members */ THPFInfo_properties, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPFInfo_pynew, /* tp_new */ }; @@ -210,39 +210,39 @@ PyTypeObject THPIInfoType = { PyVarObject_HEAD_INIT(nullptr, 0) "torch.iinfo", /* tp_name */ sizeof(THPIInfo), /* tp_basicsize */ 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ + nullptr, /* tp_dealloc */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ (reprfunc)THPIInfo_str, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ (reprfunc)THPIInfo_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ (richcmpfunc)THPDTypeInfo_compare, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ THPIInfo_methods, /* tp_methods */ - 0, /* tp_members */ + nullptr, /* tp_members */ THPIInfo_properties, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPIInfo_pynew, /* tp_new */ }; diff --git a/torch/csrc/autograd/python_anomaly_mode.h b/torch/csrc/autograd/python_anomaly_mode.h index f53bc48..ca6b960 100644 --- a/torch/csrc/autograd/python_anomaly_mode.h +++ b/torch/csrc/autograd/python_anomaly_mode.h @@ -13,12 +13,12 @@ struct PyAnomalyMetadata : public AnomalyMetadata { AutoGIL gil; dict_ = PyDict_New(); } - ~PyAnomalyMetadata() { + ~PyAnomalyMetadata() override { AutoGIL gil; Py_DECREF(dict_); } - virtual void store_stack() override; - virtual void print_stack() override; + void store_stack() override; + void print_stack() override; PyObject* dict() { return dict_; diff --git a/torch/csrc/autograd/python_cpp_function.cpp b/torch/csrc/autograd/python_cpp_function.cpp index 7ce59e6..3fbf68e 100644 --- a/torch/csrc/autograd/python_cpp_function.cpp +++ b/torch/csrc/autograd/python_cpp_function.cpp @@ -2,7 +2,7 @@ #include "torch/csrc/python_headers.h" #include -#include +#include #include #include @@ -190,7 +190,7 @@ PyTypeObject* _initFunctionPyTypeObject(PyTypeObject& type, const char* name, static std::unordered_map cpp_function_types; struct DefaultFunctionType { - DefaultFunctionType() { + DefaultFunctionType() : type() { _initFunctionPyTypeObject(type, "CppFunction", nullptr, nullptr); Py_INCREF(&type); } diff --git a/torch/csrc/autograd/python_engine.cpp b/torch/csrc/autograd/python_engine.cpp index 767842a..59065e2 100644 --- a/torch/csrc/autograd/python_engine.cpp +++ b/torch/csrc/autograd/python_engine.cpp @@ -229,39 +229,39 @@ PyTypeObject THPEngineType = { "torch._C._EngineBase", /* tp_name */ sizeof(THPEngine), /* tp_basicsize */ 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_dealloc */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ THPEngine_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_members */ + nullptr, /* tp_getset */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPEngine_new /* tp_new */ }; diff --git a/torch/csrc/autograd/python_engine.h b/torch/csrc/autograd/python_engine.h index 0db36ab..a1c70cf 100644 --- a/torch/csrc/autograd/python_engine.h +++ b/torch/csrc/autograd/python_engine.h @@ -11,15 +11,15 @@ bool THPEngine_initModule(PyObject *module); namespace torch { namespace autograd { namespace python { struct PythonEngine : public Engine { - virtual void thread_init(int device) override; - virtual void thread_on_exception(FunctionTask& task, std::exception& e) override; - virtual variable_list execute( + void thread_init(int device) override; + void thread_on_exception(FunctionTask& task, std::exception& e) override; + variable_list execute( const edge_list& roots, const variable_list& inputs, bool keep_graph, bool create_graph, const edge_list& outputs = {}) override; - virtual std::unique_ptr make_anomaly_metadata() override; + std::unique_ptr make_anomaly_metadata() override; }; }}} // namespace torch::autograd::python diff --git a/torch/csrc/autograd/python_function.cpp b/torch/csrc/autograd/python_function.cpp index 8296153..b5715f4 100644 --- a/torch/csrc/autograd/python_function.cpp +++ b/torch/csrc/autograd/python_function.cpp @@ -705,7 +705,7 @@ PyObject *THPFunction_do_forward(THPFunction *self, PyObject *_inputs) PyObject *THPFunction_apply(PyObject *cls, PyObject *inputs) { HANDLE_TH_ERRORS - torch::autograd::profiler::RecordFunction record(((PyTypeObject*)cls)->tp_name, + torch::autograd::profiler::RecordFunction record(((PyTypeObject*)cls)->tp_name, Function::peek_at_next_sequence_nr()); THPObjectPtr backward_cls(PyObject_GetAttrString(cls, "_backward_cls")); @@ -1030,38 +1030,38 @@ PyTypeObject THPFunctionType = { sizeof(THPFunction), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)THPFunction_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */ nullptr, /* tp_doc */ (traverseproc)THPFunction_traverse, /* tp_traverse */ (inquiry)THPFunction_clear, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ THPFunction_methods, /* tp_methods */ - 0, /* tp_members */ + nullptr, /* tp_members */ THPFunction_properties, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPFunction_new /* tp_new */ }; diff --git a/torch/csrc/autograd/python_function.h b/torch/csrc/autograd/python_function.h index fb0c48b..8a86b18 100644 --- a/torch/csrc/autograd/python_function.h +++ b/torch/csrc/autograd/python_function.h @@ -34,13 +34,13 @@ struct VariableInfo { struct PyFunction : public Function { PyFunction(PyObject* obj) : obj(obj) {} - virtual variable_list apply(variable_list&& inputs) override; + variable_list apply(variable_list&& inputs) override; variable_list legacy_apply(const variable_list& inputs); - virtual void release_variables() override; - virtual std::string name() const override; - virtual std::shared_ptr get_shared_ptr() override; - virtual bool is_traceable() override; + void release_variables() override; + std::string name() const override; + std::shared_ptr get_shared_ptr() override; + bool is_traceable() override; // THPFunction this Function is wrapping. PyObject* obj; diff --git a/torch/csrc/autograd/python_hook.h b/torch/csrc/autograd/python_hook.h index 4cc00d8..b7021f5 100644 --- a/torch/csrc/autograd/python_hook.h +++ b/torch/csrc/autograd/python_hook.h @@ -8,7 +8,7 @@ namespace torch { namespace autograd { struct PyFunctionPreHook : public FunctionPreHook { PyFunctionPreHook(PyObject* dict, int value_idx); - ~PyFunctionPreHook(); + ~PyFunctionPreHook() override; variable_list operator()(const variable_list& values) override; PyObject* dict; int value_idx; @@ -16,7 +16,7 @@ struct PyFunctionPreHook : public FunctionPreHook { struct PyFunctionPostHook : public FunctionPostHook { PyFunctionPostHook(PyObject* dict); - ~PyFunctionPostHook(); + ~PyFunctionPostHook() override; variable_list operator()(const variable_list& outputs, const variable_list& inputs) override; PyObject* dict; }; diff --git a/torch/csrc/autograd/python_legacy_variable.cpp b/torch/csrc/autograd/python_legacy_variable.cpp index 4c6ac18..a1557a5 100644 --- a/torch/csrc/autograd/python_legacy_variable.cpp +++ b/torch/csrc/autograd/python_legacy_variable.cpp @@ -83,39 +83,39 @@ PyTypeObject THPLegacyVariableType = { "torch._C._LegacyVariableBase", /* tp_name */ 0, /* tp_basicsize */ 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_dealloc */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ + nullptr, /* tp_methods */ + nullptr, /* tp_members */ + nullptr, /* tp_getset */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPVariable_pynew /* tp_new */ }; diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp index 70d80d2..b5c2e6c 100644 --- a/torch/csrc/autograd/python_variable.cpp +++ b/torch/csrc/autograd/python_variable.cpp @@ -139,7 +139,7 @@ static PyObject* THPVariable_make_subclass(PyObject* _ignored, PyObject* args, P static PythonArgParser parser({ "_make_subclass(PyObject* cls, Tensor data, bool require_grad=False)", }); - ParsedArgs<3> parsed_args; + ParsedArgs<3> parsed_args{}; auto r = parser.parse(args, kwargs, parsed_args); PyObject* cls = r.pyobject(0); if (!PyType_Check(cls)) { @@ -239,7 +239,7 @@ int THPVariable_set_grad(THPVariable *self, PyObject *py_grad) auto& grad = ((THPVariable*)py_grad)->cdata; bool gradIsSparse = false; auto backend = var.is_cuda() ? Backend::SparseCUDA : Backend::SparseCPU; - auto typeOpt = at::globalContext().getNonVariableTypeOpt(backend, var.type().scalarType()); + auto typeOpt = at::globalContext().getNonVariableTypeOpt(backend, var.type().scalarType()); if (typeOpt) { auto& sparseType = at::globalContext().getNonVariableType(backend, var.type().scalarType()); gradIsSparse = grad.type() == sparseType; @@ -435,38 +435,38 @@ PyTypeObject THPVariableType = { sizeof(THPVariable), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)THPVariable_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ &THPVariable_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */ nullptr, /* tp_doc */ (traverseproc)THPVariable_traverse, /* tp_traverse */ (inquiry)THPVariable_clear, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ + nullptr, /* tp_methods */ + nullptr, /* tp_members */ THPVariable_properties, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPVariable_pynew /* tp_new */ }; diff --git a/torch/csrc/autograd/python_variable.h b/torch/csrc/autograd/python_variable.h index 5a21264..6e5d403 100644 --- a/torch/csrc/autograd/python_variable.h +++ b/torch/csrc/autograd/python_variable.h @@ -8,13 +8,14 @@ #include "torch/csrc/THP_export.h" // Python object that backs torch.autograd.Variable +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) struct THPVariable { PyObject_HEAD // Payload torch::autograd::Variable cdata; // Hooks to be run on backwards pass (corresponds to Python attr // '_backwards_hooks', set by 'register_hook') - PyObject* backward_hooks; + PyObject* backward_hooks = nullptr; }; THP_API PyObject *THPVariableClass; diff --git a/torch/csrc/byte_order.cpp b/torch/csrc/byte_order.cpp index 7818ca2..51a8c49 100644 --- a/torch/csrc/byte_order.cpp +++ b/torch/csrc/byte_order.cpp @@ -1,6 +1,6 @@ #include "byte_order.h" -#include +#include #if defined(_MSC_VER) #include @@ -125,6 +125,7 @@ void THP_decodeInt64Buffer(int64_t* dst, const uint8_t* src, THPByteOrder order, void THP_decodeHalfBuffer(THHalf* dst, const uint8_t* src, THPByteOrder order, size_t len) { for (size_t i = 0; i < len; i++) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) union { uint16_t x; THHalf f; }; x = (order == THP_BIG_ENDIAN ? decodeUInt16BE(src) : decodeUInt16LE(src)); dst[i] = f; @@ -135,6 +136,7 @@ void THP_decodeHalfBuffer(THHalf* dst, const uint8_t* src, THPByteOrder order, s void THP_decodeFloatBuffer(float* dst, const uint8_t* src, THPByteOrder order, size_t len) { for (size_t i = 0; i < len; i++) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) union { uint32_t x; float f; }; x = (order == THP_BIG_ENDIAN ? decodeUInt32BE(src) : decodeUInt32LE(src)); dst[i] = f; @@ -145,6 +147,7 @@ void THP_decodeFloatBuffer(float* dst, const uint8_t* src, THPByteOrder order, s void THP_decodeDoubleBuffer(double* dst, const uint8_t* src, THPByteOrder order, size_t len) { for (size_t i = 0; i < len; i++) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) union { uint64_t x; double d; }; x = (order == THP_BIG_ENDIAN ? decodeUInt64BE(src) : decodeUInt64LE(src)); dst[i] = d; diff --git a/torch/csrc/byte_order.h b/torch/csrc/byte_order.h index 9699556..0d0b80b 100644 --- a/torch/csrc/byte_order.h +++ b/torch/csrc/byte_order.h @@ -2,7 +2,7 @@ #define THP_BYTE_ORDER_H #include -#include +#include #include enum THPByteOrder { diff --git a/torch/csrc/copy_utils.h b/torch/csrc/copy_utils.h index 9fb8bb3..8a2a6b1 100644 --- a/torch/csrc/copy_utils.h +++ b/torch/csrc/copy_utils.h @@ -15,9 +15,9 @@ typedef std::vector THPCopyList; inline bool tryTHPCopy(const THPCopyList& v, PyObject* dst, PyObject* src, bool non_blocking, bool broadcast) { - for (auto it = v.begin(); it != v.end(); ++it) { - if (it->non_blocking == non_blocking && PyType_IsSubtype(Py_TYPE(src), it->srcType)) { - (it->copy)(dst, src, broadcast); + for (auto& i : v) { + if (i.non_blocking == non_blocking && PyType_IsSubtype(Py_TYPE(src), i.srcType)) { + (i.copy)(dst, src, broadcast); return true; } } diff --git a/torch/csrc/cuda/Module.cpp b/torch/csrc/cuda/Module.cpp index 0cc5ebe..5d414e8 100644 --- a/torch/csrc/cuda/Module.cpp +++ b/torch/csrc/cuda/Module.cpp @@ -1,6 +1,5 @@ #include "torch/csrc/python_headers.h" -#include #include #include #include diff --git a/torch/csrc/cuda/Storage.cpp b/torch/csrc/cuda/Storage.cpp index da6f15c..6933719 100644 --- a/torch/csrc/cuda/Storage.cpp +++ b/torch/csrc/cuda/Storage.cpp @@ -3,7 +3,6 @@ #include "torch/csrc/python_headers.h" #include -#include // See Note [TH abstraction violation] // - Used to get at allocator from storage #include diff --git a/torch/csrc/cuda/Tensor.cpp b/torch/csrc/cuda/Tensor.cpp index 0706e43..04164be 100644 --- a/torch/csrc/cuda/Tensor.cpp +++ b/torch/csrc/cuda/Tensor.cpp @@ -4,7 +4,6 @@ #include #include -#include #include #include #include diff --git a/torch/csrc/distributed/c10d/init.cpp b/torch/csrc/distributed/c10d/init.cpp index 2b42e1e..9099c56 100644 --- a/torch/csrc/distributed/c10d/init.cpp +++ b/torch/csrc/distributed/c10d/init.cpp @@ -383,7 +383,7 @@ They are used in specifying strategies for reduction collectives, e.g., // Use the hostname to resolve the network address to // use. Note: if the hostname does not resolve to an address (e.g. // because of misconfigured /etc/hosts file), this will not work. - std::array hostname; + std::array hostname{}; auto rv = gethostname(hostname.data(), hostname.size()); if (rv != 0) { throw std::system_error(errno, std::system_category()); diff --git a/torch/csrc/generic/Storage.cpp b/torch/csrc/generic/Storage.cpp index 0249dde..55ffa89 100644 --- a/torch/csrc/generic/Storage.cpp +++ b/torch/csrc/generic/Storage.cpp @@ -163,11 +163,11 @@ static PyObject * THPStorage_(get)(THPStorage *self, PyObject *index) Py_ssize_t start, stop, slicelength, step; int64_t len = THWStorage_(size)(LIBRARY_STATE self->cdata); if (!THPUtils_parseSlice(index, len, &start, &stop, &step, &slicelength)) - return NULL; + return nullptr; if (step != 1) { THPUtils_setError("Trying to slice with a step of %" PRId64 ", but only a step of " "1 is supported", (int64_t)step); - return NULL; + return nullptr; } scalar_t *data = THWStorage_(data)(LIBRARY_STATE self->cdata); @@ -243,38 +243,38 @@ PyTypeObject THPStorageType = { sizeof(THPStorage), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)THPStorage_(dealloc), /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_reserved */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ + nullptr, /* tp_print */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + nullptr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ &THPStorage_(mappingmethods), /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ nullptr, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ + nullptr, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* will be assigned in init */ /* tp_methods */ - 0, /* will be assigned in init */ /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ + nullptr, /* will be assigned in init */ /* tp_methods */ + nullptr, /* will be assigned in init */ /* tp_members */ + nullptr, /* tp_getset */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ THPStorage_(pynew), /* tp_new */ }; diff --git a/torch/csrc/jit/batched/BatchTensor.h b/torch/csrc/jit/batched/BatchTensor.h index dd624c3..cb7f0b0 100644 --- a/torch/csrc/jit/batched/BatchTensor.h +++ b/torch/csrc/jit/batched/BatchTensor.h @@ -12,7 +12,6 @@ public: // expand a tensor to a batchtensor given batch_size BatchTensor(at::Tensor data, int64_t batch_size); BatchTensor(const std::vector datalist, at::Tensor dims); - ~BatchTensor(){}; const char * toString() const { return "BatchTensor"; } diff --git a/torch/csrc/jit/constants.cpp b/torch/csrc/jit/constants.cpp index 99d4a0a..5332043 100644 --- a/torch/csrc/jit/constants.cpp +++ b/torch/csrc/jit/constants.cpp @@ -14,9 +14,10 @@ Value* insertConstant( c10::optional scope) { Node * n = g.create(prim::Constant); if(val.isTensor()) { - at::Tensor ref = std::move(val).toTensor(); + at::Tensor ref = val.toTensor(); if(!ref.defined()) { - return insertConstant(g, val, loc, scope); + n->destroy(); + return g.insertNode(g.createUndefined())->output(); } if (ref.is_variable()) { ref = autograd::Variable(ref).data(); @@ -69,7 +70,7 @@ Value* insertConstant( RegisterOperators reg({ // Implementation of constant node, computes and IValue Operator( - FunctionSchema(prim::Constant, {}, {}, /*vararg=*/false, /*varret=*/true), + FunctionSchema(prim::Constant, {}, {}, /*is_vararg=*/false, /*is_varret=*/true), [](const Node* node) -> Operation { TypePtr type = node->output()->type(); if(type->isSubtypeOf(DynamicType::get())) { diff --git a/torch/csrc/jit/init.cpp b/torch/csrc/jit/init.cpp index 308f70a..4d7c075 100644 --- a/torch/csrc/jit/init.cpp +++ b/torch/csrc/jit/init.cpp @@ -90,7 +90,7 @@ void initJITBindings(PyObject *module) { py::register_exception(m, "JITException"); - py::class_(m, "IODescriptor"); + py::class_(m, "IODescriptor"); // NOLINT(bugprone-unused-raii) m.def("_jit_init", loadPythonClasses) .def("_jit_pass_onnx", ToONNX) @@ -172,12 +172,14 @@ void initJITBindings(PyObject *module) { checkAliasAnnotation(g, std::move(stack), unqualified_op_name); }); + // NOLINTNEXTLINE(bugprone-unused-raii) py::class_(m, "CompleteArgumentSpec") .def("__repr__", [](CompleteArgumentSpec& self) { std::ostringstream s; s << self; return s.str(); }); + // NOLINTNEXTLINE(bugprone-unused-raii) py::class_(m, "ArgumentSpec"); py::class_(m, "Code") .def("grad_executors", [](Code& c) { @@ -337,6 +339,7 @@ void initJITBindings(PyObject *module) { }); }); + // NOLINTNEXTLINE(bugprone-unused-raii) py::class_(m, "Future"); m.def("fork", [](script::Module &sm, py::args args) { diff --git a/torch/csrc/jit/interpreter.h b/torch/csrc/jit/interpreter.h index 1c04374..ac39de3 100644 --- a/torch/csrc/jit/interpreter.h +++ b/torch/csrc/jit/interpreter.h @@ -61,11 +61,12 @@ private: // Created by wait() struct Suspend : public std::exception { - virtual const char* what() const noexcept override { + const char* what() const noexcept override { return "Suspend"; } - explicit Suspend(c10::intrusive_ptr future_) : future(future_) {} + explicit Suspend(c10::intrusive_ptr future_) + : future(std::move(future_)) {} c10::intrusive_ptr future; }; @@ -74,7 +75,7 @@ struct InterpreterContinuation { InterpreterContinuation(InterpreterState state_, Stack stack_) : state(std::move(state_)), stack(std::move(stack_)) {} - void operator()(void) { + void operator()() { state.runAsync(stack); } diff --git a/torch/csrc/jit/passes/alias_analysis.cpp b/torch/csrc/jit/passes/alias_analysis.cpp index a47fdcc..d6a8f0d 100644 --- a/torch/csrc/jit/passes/alias_analysis.cpp +++ b/torch/csrc/jit/passes/alias_analysis.cpp @@ -20,7 +20,7 @@ bool shouldAnnotate(const Value* v) { } } // namespace -AliasDb::AliasDb(std::shared_ptr graph) : graph_(graph) { +AliasDb::AliasDb(std::shared_ptr graph) : graph_(std::move(graph)) { analyze(graph_); // Build helper indices @@ -36,7 +36,7 @@ AliasDb::AliasDb(std::shared_ptr graph) : graph_(graph) { } } // - Set of all nodes with a wildcard - buildWildcardIndex(graph->block()); + buildWildcardIndex(graph_->block()); } void AliasDb::buildWildcardIndex(const Block* b) { diff --git a/torch/csrc/jit/passes/alias_analysis.h b/torch/csrc/jit/passes/alias_analysis.h index 47a60a1..5c543a7 100644 --- a/torch/csrc/jit/passes/alias_analysis.h +++ b/torch/csrc/jit/passes/alias_analysis.h @@ -87,7 +87,7 @@ class AliasDb { }; inline TORCH_API AliasDb AliasAnalysis(std::shared_ptr graph) { - return AliasDb(graph); + return AliasDb(std::move(graph)); } } // namespace jit } // namespace torch diff --git a/torch/csrc/jit/passes/onnx.cpp b/torch/csrc/jit/passes/onnx.cpp index 87fc759..133adfa 100644 --- a/torch/csrc/jit/passes/onnx.cpp +++ b/torch/csrc/jit/passes/onnx.cpp @@ -19,7 +19,7 @@ std::shared_ptr ToONNX(std::shared_ptr& graph, ::torch::onnx::Oper } void BlockToONNX(Block* old_block, Block* new_block, ::torch::onnx::OperatorExportTypes operator_export_type, std::unordered_map env) { - torch::autograd::SymbolicContext ctx; + torch::autograd::SymbolicContext ctx{}; ctx.block = new_block; py::object onnx = py::module::import("torch.onnx"); diff --git a/torch/csrc/jit/passes/onnx/peephole.cpp b/torch/csrc/jit/passes/onnx/peephole.cpp index d13e77a..3f98abf 100644 --- a/torch/csrc/jit/passes/onnx/peephole.cpp +++ b/torch/csrc/jit/passes/onnx/peephole.cpp @@ -36,9 +36,9 @@ std::vector composeTransposes(const std::vector & t1, JIT_ASSERT(t1.size() == t2.size()); std::vector ret; ret.reserve(t1.size()); - for (size_t i = 0; i < t2.size(); i++) { - JIT_ASSERT(t2[i] < int64_t(t1.size())); - ret.push_back(t1[t2[i]]); + for (const auto& i : t2) { + JIT_ASSERT(i < int64_t(t1.size())); + ret.push_back(t1[i]); } return ret; } @@ -515,18 +515,15 @@ static void eraseListConstruct(Block* block) { concat_node->insertBefore(lc_node); // make concat node output as new input, then ListConstruct should become dead - replacements.push_back(std::make_tuple( - i, - std::vector({concat_node->output()}) - )); + replacements.emplace_back(i, std::vector({concat_node->output()})); } else { // Tensor lists are used mostly for inputs to cat/stack. They are already handled // in those symbolics, and should become dead afterwards. - replacements.push_back(std::make_tuple( + replacements.emplace_back( i, std::vector( - lc_node->inputs().begin(), lc_node->inputs().end()))); + lc_node->inputs().begin(), lc_node->inputs().end())); } } diff --git a/torch/csrc/jit/passes/to_batch.cpp b/torch/csrc/jit/passes/to_batch.cpp index 9219b5e..9218ae2 100644 --- a/torch/csrc/jit/passes/to_batch.cpp +++ b/torch/csrc/jit/passes/to_batch.cpp @@ -35,16 +35,15 @@ void ToBatch::visitAten(Node* n, Block* block, Block* res_block){ } // transform scalar to tensor before pass to batch operator script - for(size_t i = 0; i < new_inputs.size(); i++){ - auto input = new_inputs[i]; + for (auto& input : new_inputs) { if(input->type() == IntType::get() || input->type() == FloatType::get()){ auto to_tensor_node = res_graph->createNumToTensor(input); res_graph->insertNode(to_tensor_node); - new_inputs[i] = to_tensor_node->output(); + input = to_tensor_node->output(); } else if(input->type() == BoolType::get()) { auto to_tensor_node = res_graph->createBoolToTensor(input); res_graph->insertNode(to_tensor_node); - new_inputs[i] = to_tensor_node->output(); + input = to_tensor_node->output(); } } diff --git a/torch/csrc/jit/python_arg_flatten.h b/torch/csrc/jit/python_arg_flatten.h index 74a5212..1d129a8 100644 --- a/torch/csrc/jit/python_arg_flatten.h +++ b/torch/csrc/jit/python_arg_flatten.h @@ -58,7 +58,7 @@ struct IODescriptor { // different than the number of 'v's in structure. std::string structure; std::vector metadata; - bool grad_enabled; + bool grad_enabled = false; }; static inline std::ostream& operator<<(std::ostream& out, const IODescriptor::VariableMetadata& meta) { diff --git a/torch/csrc/jit/python_interpreter.cpp b/torch/csrc/jit/python_interpreter.cpp index 889d201..f958338 100644 --- a/torch/csrc/jit/python_interpreter.cpp +++ b/torch/csrc/jit/python_interpreter.cpp @@ -31,8 +31,8 @@ namespace { Operation createPythonOperation(const Node* op_) { AutoGIL gil; const PythonOp* op = static_cast(op_); - const py::function func = - py::reinterpret_borrow(py::handle(const_cast(op)->pyobj.get())); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + const py::function func = py::reinterpret_borrow(py::handle(const_cast(op)->pyobj.get())); size_t num_inputs = 0; for(auto arg_type : op->cconv) { @@ -50,8 +50,8 @@ Operation createPythonOperation(const Node* op_) { size_t next_tensor = 0; for (auto arg_type : op->cconv) { if (arg_type == 'c') { - py_inputs[i] = - py::reinterpret_borrow(const_cast(op)->scalar_args[next_scalar++].get()); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + py_inputs[i] = py::reinterpret_borrow(const_cast(op)->scalar_args[next_scalar++].get()); } else if (arg_type == 'd') { py_inputs[i] = toPyObject(std::move(peek(stack, next_tensor, num_inputs))); next_tensor++; diff --git a/torch/csrc/jit/python_ir.cpp b/torch/csrc/jit/python_ir.cpp index 758738a..34b4603 100644 --- a/torch/csrc/jit/python_ir.cpp +++ b/torch/csrc/jit/python_ir.cpp @@ -80,7 +80,7 @@ struct ConcretePythonOp : public PythonOp { return getPythonName(pyobj.get()); } } - virtual void cloneFrom(Node * other_) override { + void cloneFrom(Node * other_) override { Node::cloneFrom(other_); auto other = other_->cast(); this->cconv = other->cconv; @@ -391,9 +391,8 @@ void initPythonIRBindings(PyObject * module_) { return n.t(Symbol::attr(name)); }) .def("zs_",[](Node & n, const char * name, TensorsAttr::ValueType v) { - // NOLINTNEXTLINE(modernize-loop-convert) - for (size_t i = 0; i < v.size(); ++ i) { - v[i] = autograd::Variable(v[i].view({})).data(); + for (auto& i : v) { + i = autograd::Variable(i.view({})).data(); } return n.ts_(Symbol::attr(name), std::move(v)); }) diff --git a/torch/csrc/jit/register_prim_ops.cpp b/torch/csrc/jit/register_prim_ops.cpp index 301ef80..c4bc070 100644 --- a/torch/csrc/jit/register_prim_ops.cpp +++ b/torch/csrc/jit/register_prim_ops.cpp @@ -709,7 +709,7 @@ RegisterOperators reg({ return [=](Stack& stack) { \ int64_t a, b; \ pop(stack, a, b); \ - push(stack, op); \ + push(stack, op); /* NOLINT(hicpp-signed-bitwise) */ \ return 0; \ }; \ }), @@ -1009,8 +1009,8 @@ Operator( \ at::Tensor t; \ c_type other; \ pop(stack, t, other); \ - std::move(t) = other; \ - push(stack, std::move(t)); \ + std::move(t) = other; /* NOLINT(bugprone-use-after-move) */ \ + push(stack, std::move(t)); /* NOLINT(bugprone-use-after-move) */ \ return 0; \ }; \ }), diff --git a/torch/csrc/jit/script/compiler.h b/torch/csrc/jit/script/compiler.h index 3d592c9..c856f0e 100644 --- a/torch/csrc/jit/script/compiler.h +++ b/torch/csrc/jit/script/compiler.h @@ -144,7 +144,7 @@ struct TORCH_API BuiltinModule : public SugaredValue { BuiltinModule(std::string name, c10::optional version = at::nullopt) : name(std::move(name)) - , version(version) {} + , version(std::move(version)) {} std::string kind() const override { return "builtin module"; diff --git a/torch/csrc/jit/script/init.cpp b/torch/csrc/jit/script/init.cpp index 830e30c..9db94d0 100644 --- a/torch/csrc/jit/script/init.cpp +++ b/torch/csrc/jit/script/init.cpp @@ -486,8 +486,7 @@ FunctionSchema getSchemaWithNameAndDefaults( } else { value = toIValue(it->second, arg.type()); } - new_args.emplace_back( - Argument(arg.name(), arg.type(), arg.N(), value, arg.kwarg_only())); + new_args.emplace_back(arg.name(), arg.type(), arg.N(), value, arg.kwarg_only()); } catch (py::cast_error& e) { throw ErrorReport(range) << "Expected a default value of type " << arg.type()->str() diff --git a/torch/csrc/jit/script/python_tree_views.cpp b/torch/csrc/jit/script/python_tree_views.cpp index 5382161..c9fb797 100644 --- a/torch/csrc/jit/script/python_tree_views.cpp +++ b/torch/csrc/jit/script/python_tree_views.cpp @@ -101,8 +101,8 @@ void initTreeViewBindings(PyObject *module) { return Expr(Compound::create(TK_NONE, range, {})); }); - py::class_(m, "Stmt"); - py::class_(m, "Expr"); + py::class_(m, "Stmt"); // NOLINT(bugprone-unused-raii) + py::class_(m, "Expr"); // NOLINT(bugprone-unused-raii) py::class_(m, "Def") .def(py::init([](const Ident& name, Decl decl, diff --git a/torch/csrc/serialization.cpp b/torch/csrc/serialization.cpp index de98d27..b8f4888 100644 --- a/torch/csrc/serialization.cpp +++ b/torch/csrc/serialization.cpp @@ -45,7 +45,7 @@ static inline bool isUnsupportedOperation() { THPObjectPtr io(PyImport_ImportModule("io")); if (!io) throw python_error(); THPObjectPtr exception(PyObject_GetAttrString(io, "UnsupportedOperation")); - if (!exception) python_error(); + if (!exception) throw python_error(); return PyErr_ExceptionMatches(exception.get()); } diff --git a/torch/csrc/tensor/python_tensor.cpp b/torch/csrc/tensor/python_tensor.cpp index 00936c3..918ee32 100644 --- a/torch/csrc/tensor/python_tensor.cpp +++ b/torch/csrc/tensor/python_tensor.cpp @@ -371,6 +371,7 @@ void set_default_tensor_type(const at::Type& type) { // get the storage first, so if it doesn't exist we don't change the default tensor type THPObjectPtr storage = get_storage_obj(type); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) default_tensor_type = const_cast(&type); at::set_default_dtype(default_tensor_type->typeMeta()); diff --git a/torch/csrc/utils.cpp b/torch/csrc/utils.cpp index 36b1ee7..6b722e0 100644 --- a/torch/csrc/utils.cpp +++ b/torch/csrc/utils.cpp @@ -1,5 +1,5 @@ #include "torch/csrc/python_headers.h" -#include +#include #include #include #include @@ -140,7 +140,7 @@ void THPUtils_addPyMethodDefs(std::vector& vector, PyMethodDef* met // remove nullptr terminator vector.pop_back(); } - while (1) { + while (true) { vector.push_back(*methods); if (!methods->ml_name) { break; @@ -183,7 +183,7 @@ void THPUtils_invalidArguments(PyObject *given_args, PyObject *given_kwargs, va_list option_list; va_start(option_list, num_options); for (size_t i = 0; i < num_options; i++) - option_strings.push_back(va_arg(option_list, const char*)); + option_strings.emplace_back(va_arg(option_list, const char*)); va_end(option_list); PyErr_SetString(PyExc_TypeError, torch::format_invalid_args( diff --git a/torch/csrc/utils/invalid_arguments.cpp b/torch/csrc/utils/invalid_arguments.cpp index 0160bdd..ceed1cb 100644 --- a/torch/csrc/utils/invalid_arguments.cpp +++ b/torch/csrc/utils/invalid_arguments.cpp @@ -2,6 +2,8 @@ #include "python_strings.h" +#include + #include #include #include @@ -22,7 +24,7 @@ struct Type { struct SimpleType: public Type { SimpleType(std::string& name): name(name) {}; - bool is_matching(PyObject *object) { + bool is_matching(PyObject *object) override { return py_typename(object) == name; } @@ -33,7 +35,7 @@ struct MultiType: public Type { MultiType(std::initializer_list accepted_types): types(accepted_types) {}; - bool is_matching(PyObject *object) { + bool is_matching(PyObject *object) override { auto it = std::find(types.begin(), types.end(), py_typename(object)); return it != types.end(); } @@ -44,7 +46,7 @@ struct MultiType: public Type { struct NullableType: public Type { NullableType(std::unique_ptr type): type(std::move(type)) {}; - bool is_matching(PyObject *object) { + bool is_matching(PyObject *object) override { return object == Py_None || type->is_matching(object); } @@ -55,7 +57,7 @@ struct TupleType: public Type { TupleType(std::vector> types): types(std::move(types)) {}; - bool is_matching(PyObject *object) { + bool is_matching(PyObject *object) override { if (!PyTuple_Check(object)) return false; auto num_elements = PyTuple_GET_SIZE(object); if (num_elements != (long)types.size()) return false; @@ -73,7 +75,7 @@ struct SequenceType: public Type { SequenceType(std::unique_ptr type): type(std::move(type)) {}; - bool is_matching(PyObject *object) { + bool is_matching(PyObject *object) override { if (!PySequence_Check(object)) return false; auto num_elements = PySequence_Length(object); for (int i = 0; i < num_elements; i++) { @@ -88,7 +90,7 @@ struct SequenceType: public Type { struct Argument { Argument(std::string name, std::unique_ptr type): - name(name), type(std::move(type)) {}; + name(std::move(name)), type(std::move(type)) {}; std::string name; std::unique_ptr type; @@ -124,25 +126,25 @@ std::vector _splitString(const std::string &s, const std::string& d std::unique_ptr _buildType(std::string type_name, bool is_nullable) { std::unique_ptr result; if (type_name == "float") { - result.reset(new MultiType({"float", "int", "long"})); + result = torch::make_unique(MultiType{"float", "int", "long"}); } else if (type_name == "int") { - result.reset(new MultiType({"int", "long"})); + result = torch::make_unique(MultiType{"int", "long"}); } else if (type_name.find("tuple[") == 0) { auto type_list = type_name.substr(6); type_list.pop_back(); std::vector> types; for (auto& type: _splitString(type_list, ",")) types.emplace_back(_buildType(type, false)); - result.reset(new TupleType(std::move(types))); + result = torch::make_unique(std::move(types)); } else if (type_name.find("sequence[") == 0) { auto subtype = type_name.substr(9); subtype.pop_back(); - result.reset(new SequenceType(_buildType(subtype, false))); + result = torch::make_unique(_buildType(subtype, false)); } else { - result.reset(new SimpleType(type_name)); + result = torch::make_unique(type_name); } if (is_nullable) - result.reset(new NullableType(std::move(result))); + result = torch::make_unique(std::move(result)); return result; } diff --git a/torch/csrc/utils/python_arg_parser.cpp b/torch/csrc/utils/python_arg_parser.cpp index e73555d..d280c85 100644 --- a/torch/csrc/utils/python_arg_parser.cpp +++ b/torch/csrc/utils/python_arg_parser.cpp @@ -47,6 +47,7 @@ static bool should_allow_numbers_as_tensors(const std::string& name) { return allowed.find(name) != allowed.end(); } +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) FunctionParameter::FunctionParameter(const std::string& fmt, bool keyword_only) : optional(false) , allow_none(false) @@ -519,7 +520,7 @@ PythonArgParser::PythonArgParser(std::vector fmts, bool traceable) , traceable(traceable) { for (auto& fmt : fmts) { - signatures_.push_back(FunctionSignature(fmt)); + signatures_.emplace_back(fmt); } for (auto& signature : signatures_) { if (signature.max_args > max_args) { diff --git a/torch/csrc/utils/python_arg_parser.h b/torch/csrc/utils/python_arg_parser.h index cb93bf1..a273f3b 100644 --- a/torch/csrc/utils/python_arg_parser.h +++ b/torch/csrc/utils/python_arg_parser.h @@ -80,6 +80,7 @@ struct PythonArgs; // Contains bound Python arguments in declaration order template struct ParsedArgs { + ParsedArgs() : args() { } PyObject* args[N]; }; diff --git a/torch/csrc/utils/tensor_dtypes.cpp b/torch/csrc/utils/tensor_dtypes.cpp index f38b986..61cec31 100644 --- a/torch/csrc/utils/tensor_dtypes.cpp +++ b/torch/csrc/utils/tensor_dtypes.cpp @@ -43,7 +43,7 @@ static std::pair getDtypeNames(at::ScalarType scalarTy void initializeDtypes() { auto torch_module = THPObjectPtr(PyImport_ImportModule("torch")); - if (!torch_module) python_error(); + if (!torch_module) throw python_error(); #define DEFINE_SCALAR_TYPE(_1,n,_2) at::ScalarType::n, diff --git a/torch/csrc/utils/tensor_layouts.cpp b/torch/csrc/utils/tensor_layouts.cpp index 980e093..5bb80ab 100644 --- a/torch/csrc/utils/tensor_layouts.cpp +++ b/torch/csrc/utils/tensor_layouts.cpp @@ -13,7 +13,7 @@ namespace torch { namespace utils { void initializeLayouts() { auto torch_module = THPObjectPtr(PyImport_ImportModule("torch")); - if (!torch_module) python_error(); + if (!torch_module) throw python_error(); PyObject *strided_layout = THPLayout_New(at::Layout::Strided, "torch.strided"); Py_INCREF(strided_layout); -- 2.7.4