Checks: '
-*
,bugprone-*
- ,-bugprone-macro-parentheses
,-bugprone-forward-declaration-namespace
+ ,-bugprone-macro-parentheses
,cppcoreguidelines-*
+ ,-cppcoreguidelines-interfaces-global-init
+ ,-cppcoreguidelines-owning-memory
,-cppcoreguidelines-pro-bounds-array-to-pointer-decay
- ,-cppcoreguidelines-pro-type-static-cast-downcast
- ,-cppcoreguidelines-pro-bounds-pointer-arithmetic
,-cppcoreguidelines-pro-bounds-constant-array-index
+ ,-cppcoreguidelines-pro-bounds-pointer-arithmetic
,-cppcoreguidelines-pro-type-cstyle-cast
,-cppcoreguidelines-pro-type-reinterpret-cast
+ ,-cppcoreguidelines-pro-type-static-cast-downcast
+ ,-cppcoreguidelines-pro-type-union-access
,-cppcoreguidelines-pro-type-vararg
,-cppcoreguidelines-special-member-functions
- ,-cppcoreguidelines-interfaces-global-init
- ,-cppcoreguidelines-owning-memory
- ,hicpp-signed-bitwise
,hicpp-exception-baseclass
,hicpp-avoid-goto
,modernize-*
- ,-modernize-use-default-member-init
,-modernize-return-braced-init-list
,-modernize-use-auto
+ ,-modernize-use-default-member-init
+ ,-modernize-use-using
'
WarningsAsErrors: '*'
HeaderFilterRegex: 'torch/csrc/.*'
# Run Clang-Tidy
# The negative filters below are to exclude files that include onnx_pb.h,
# otherwise we'd have to build ONNX protos as part of this CI job.
-time python tools/clang_tidy.py \
- --verbose \
- --paths torch/csrc \
- --diff "$BASE_BRANCH" \
- -g"-torch/csrc/jit/init.cpp" \
- -g"-torch/csrc/jit/export.cpp" \
- -g"-torch/csrc/jit/import.cpp" \
+time python tools/clang_tidy.py \
+ --verbose \
+ --paths torch/csrc \
+ --diff "$BASE_BRANCH" \
+ -g"-torch/csrc/distributed/Module.cpp" \
+ -g"-torch/csrc/jit/export.cpp" \
+ -g"-torch/csrc/jit/import.cpp" \
"$@"
#include <atomic>
#include <map>
#include <set>
-#include <signal.h>
+#include <csignal>
#include <sstream>
#include <sys/wait.h>
{ \
auto _w = write(STDERR_FILENO, ERROR_MSG, sizeof(ERROR_MSG) / sizeof(char));\
(void)_w; \
- struct sigaction sa; \
+ struct sigaction sa{}; \
sa.sa_handler = SIG_DFL; \
sa.sa_flags = 0; \
if (sigemptyset(&sa.sa_mask) != 0 || sigaction(SIGNAL, &sa, nullptr) != 0) { \
// http://man7.org/linux/man-pages/man2/signal.2.html
static inline void setSignalHandler(int signal, void(*handler)(int, siginfo_t *, void *), struct sigaction *old_sa_ptr)
{
- struct sigaction sa;
+ struct sigaction sa{};
sa.sa_sigaction = handler;
sa.sa_flags = SA_RESTART|SA_SIGINFO|SA_NOCLDSTOP|SA_NODEFER;
if (sigemptyset(&sa.sa_mask) != 0 || sigaction(signal, &sa, old_sa_ptr) != 0) {
if (info->si_pid == getppid()) {
_exit(EXIT_SUCCESS);
}
- struct sigaction sa;
+ struct sigaction sa{};
sa.sa_handler = SIG_DFL;
sa.sa_flags = 0;
if (sigemptyset(&sa.sa_mask) != 0 || sigaction(SIGTERM, &sa, nullptr) != 0) {
siginfo_t infop;
// Only check the pids we care about
- for (auto it = worker_pids.begin(); it != worker_pids.end(); ++it) {
- pid_set = &(it->second);
+ for (auto& w : worker_pids) {
+ pid_set = &(w.second);
for (auto pid_it = pid_set->begin(); pid_it != pid_set->end(); ++pid_it) {
worker_pid = *pid_it;
// Use waitid rather than waitpid so that we can set NOWAIT, and that Python
"torch.device", /* tp_name */
sizeof(THPDevice), /* tp_basicsize */
0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
+ nullptr, /* tp_dealloc */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
(reprfunc)THPDevice_repr, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
(hashfunc)THPDevice_hash, /* tp_hash */
- 0, /* tp_call */
+ nullptr, /* tp_call */
(reprfunc)THPDevice_str, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
(richcmpfunc)THPDevice_rc, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
THPDevice_methods, /* tp_methods */
- 0, /* tp_members */
+ nullptr, /* tp_members */
THPDevice_properties, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPDevice_pynew, /* tp_new */
};
#include <ATen/Device.h>
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THPDevice {
PyObject_HEAD
at::Device device;
"torch.dtype", /* tp_name */
sizeof(THPDtype), /* tp_basicsize */
0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
+ nullptr, /* tp_dealloc */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
(reprfunc)THPDtype_repr, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
THPDtype_methods, /* tp_methods */
- 0, /* tp_members */
+ nullptr, /* tp_members */
THPDtype_properties, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
+ nullptr, /* tp_new */
};
void THPDtype_init(PyObject *module)
other.traceback = nullptr;
}
- ~python_error() {
+ ~python_error() override {
if (type || value || traceback) {
AutoGIL gil;
Py_XDECREF(type);
// Abstract base class for exceptions which translate to specific Python types
struct PyTorchError : public std::exception {
virtual PyObject* python_type() = 0;
- virtual const char* what() const noexcept override {
+ const char* what() const noexcept override {
return msg.c_str();
}
std::string msg;
// Translates to Python IndexError
struct IndexError : public PyTorchError {
IndexError(const char *format, ...);
- virtual PyObject* python_type() override {
+ PyObject* python_type() override {
return PyExc_IndexError;
}
};
// Translates to Python TypeError
struct TypeError : public PyTorchError {
TypeError(const char *format, ...);
- virtual PyObject* python_type() override {
+ PyObject* python_type() override {
return PyExc_TypeError;
}
};
// Translates to Python ValueError
struct ValueError : public PyTorchError {
ValueError(const char *format, ...);
- virtual PyObject* python_type() override {
+ PyObject* python_type() override {
return PyExc_ValueError;
}
};
#include <structmember.h>
#include <ATen/ATen.h>
-#include <stdbool.h>
#include <TH/TH.h>
#include "THP.h"
#include "torch/csrc/Exceptions.h"
sizeof(THPGenerator), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)THPGenerator_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
+ nullptr, /* tp_repr */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
THPGenerator_methods, /* tp_methods */
THPGenerator_members, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_getset */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPGenerator_pynew, /* tp_new */
};
"torch.layout", /* tp_name */
sizeof(THPLayout), /* tp_basicsize */
0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
+ nullptr, /* tp_dealloc */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
(reprfunc)THPLayout_repr, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
+ nullptr, /* tp_methods */
+ nullptr, /* tp_members */
+ nullptr, /* tp_getset */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
+ nullptr, /* tp_new */
};
void THPLayout_init(PyObject *module)
#include <sys/socket.h>
#endif
-#include <stdbool.h>
#include <unordered_map>
#include <cstdlib>
#include <libshm.h>
DLManagedTensor * dlMTensor = (DLManagedTensor *)PyCapsule_GetPointer(data, "dltensor");
if (dlMTensor) {
// the dlMTensor has not been consumed, call deleter ourselves
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
dlMTensor->deleter(const_cast<DLManagedTensor*>(dlMTensor));
} else {
// the dlMTensor has been consumed
sizeof(THPWrapper), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)THPWrapper_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
+ nullptr, /* tp_repr */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
+ nullptr, /* tp_methods */
+ nullptr, /* tp_members */
+ nullptr, /* tp_getset */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPWrapper_pynew, /* tp_new */
};
#if PY_MAJOR_VERSION == 2
wrap_tuple_fn<decltype(&sq_slice), &sq_slice>,
#else
- 0, /* sq_slice */
+ nullptr, /* sq_slice */
#endif
- 0, /* sq_ass_item */
- 0, /* sq_ass_slice */
+ nullptr, /* sq_ass_item */
+ nullptr, /* sq_ass_slice */
PyTuple_Type.tp_as_sequence->sq_contains
};
static PyMappingMethods THPSize_as_mapping = {
PyTuple_Type.tp_as_mapping->mp_length,
wrap_tuple_fn<decltype(&mp_subscript), &mp_subscript>,
- 0
+ nullptr
};
static PyObject *THPSize_numel(THPSize *self)
"torch.Size", /* tp_name */
sizeof(THPSize), /* tp_basicsize */
0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
+ nullptr, /* tp_dealloc */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
(reprfunc)THPSize_repr, /* tp_repr */
- 0, /* tp_as_number */
+ nullptr, /* tp_as_number */
&THPSize_as_sequence, /* tp_as_sequence */
&THPSize_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
THPSize_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
+ nullptr, /* tp_members */
+ nullptr, /* tp_getset */
&PyTuple_Type, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPSize_pynew, /* tp_new */
};
#define THP_HOST_HALF
-#include <stdbool.h>
#include <TH/TH.h>
// See Note [TH abstraction violation]
// - Used to get at the allocator associated with a storage
#define THP_H
#include "torch/csrc/python_headers.h"
-#include <stdbool.h>
#include <TH/TH.h>
#include <TH/THTensor.hpp>
PyVarObject_HEAD_INIT(nullptr, 0) "torch.finfo", /* tp_name */
sizeof(THPFInfo), /* tp_basicsize */
0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
+ nullptr, /* tp_dealloc */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
(reprfunc)THPFInfo_str, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
(reprfunc)THPFInfo_str, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
(richcmpfunc)THPDTypeInfo_compare, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
THPFInfo_methods, /* tp_methods */
- 0, /* tp_members */
+ nullptr, /* tp_members */
THPFInfo_properties, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPFInfo_pynew, /* tp_new */
};
PyVarObject_HEAD_INIT(nullptr, 0) "torch.iinfo", /* tp_name */
sizeof(THPIInfo), /* tp_basicsize */
0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
+ nullptr, /* tp_dealloc */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
(reprfunc)THPIInfo_str, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
(reprfunc)THPIInfo_str, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
(richcmpfunc)THPDTypeInfo_compare, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
THPIInfo_methods, /* tp_methods */
- 0, /* tp_members */
+ nullptr, /* tp_members */
THPIInfo_properties, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPIInfo_pynew, /* tp_new */
};
AutoGIL gil;
dict_ = PyDict_New();
}
- ~PyAnomalyMetadata() {
+ ~PyAnomalyMetadata() override {
AutoGIL gil;
Py_DECREF(dict_);
}
- virtual void store_stack() override;
- virtual void print_stack() override;
+ void store_stack() override;
+ void print_stack() override;
PyObject* dict() {
return dict_;
#include "torch/csrc/python_headers.h"
#include <memory>
-#include <stdio.h>
+#include <cstdio>
#include <typeindex>
#include <unordered_map>
static std::unordered_map<std::type_index, THPObjectPtr> cpp_function_types;
struct DefaultFunctionType {
- DefaultFunctionType() {
+ DefaultFunctionType() : type() {
_initFunctionPyTypeObject(type, "CppFunction", nullptr, nullptr);
Py_INCREF(&type);
}
"torch._C._EngineBase", /* tp_name */
sizeof(THPEngine), /* tp_basicsize */
0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_dealloc */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
+ nullptr, /* tp_repr */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
THPEngine_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_members */
+ nullptr, /* tp_getset */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPEngine_new /* tp_new */
};
namespace torch { namespace autograd { namespace python {
struct PythonEngine : public Engine {
- virtual void thread_init(int device) override;
- virtual void thread_on_exception(FunctionTask& task, std::exception& e) override;
- virtual variable_list execute(
+ void thread_init(int device) override;
+ void thread_on_exception(FunctionTask& task, std::exception& e) override;
+ variable_list execute(
const edge_list& roots,
const variable_list& inputs,
bool keep_graph,
bool create_graph,
const edge_list& outputs = {}) override;
- virtual std::unique_ptr<AnomalyMetadata> make_anomaly_metadata() override;
+ std::unique_ptr<AnomalyMetadata> make_anomaly_metadata() override;
};
}}} // namespace torch::autograd::python
PyObject *THPFunction_apply(PyObject *cls, PyObject *inputs)
{
HANDLE_TH_ERRORS
- torch::autograd::profiler::RecordFunction record(((PyTypeObject*)cls)->tp_name,
+ torch::autograd::profiler::RecordFunction record(((PyTypeObject*)cls)->tp_name,
Function::peek_at_next_sequence_nr());
THPObjectPtr backward_cls(PyObject_GetAttrString(cls, "_backward_cls"));
sizeof(THPFunction), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)THPFunction_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
+ nullptr, /* tp_repr */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
nullptr, /* tp_doc */
(traverseproc)THPFunction_traverse, /* tp_traverse */
(inquiry)THPFunction_clear, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
THPFunction_methods, /* tp_methods */
- 0, /* tp_members */
+ nullptr, /* tp_members */
THPFunction_properties, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPFunction_new /* tp_new */
};
struct PyFunction : public Function {
PyFunction(PyObject* obj) : obj(obj) {}
- virtual variable_list apply(variable_list&& inputs) override;
+ variable_list apply(variable_list&& inputs) override;
variable_list legacy_apply(const variable_list& inputs);
- virtual void release_variables() override;
- virtual std::string name() const override;
- virtual std::shared_ptr<Function> get_shared_ptr() override;
- virtual bool is_traceable() override;
+ void release_variables() override;
+ std::string name() const override;
+ std::shared_ptr<Function> get_shared_ptr() override;
+ bool is_traceable() override;
// THPFunction this Function is wrapping.
PyObject* obj;
struct PyFunctionPreHook : public FunctionPreHook {
PyFunctionPreHook(PyObject* dict, int value_idx);
- ~PyFunctionPreHook();
+ ~PyFunctionPreHook() override;
variable_list operator()(const variable_list& values) override;
PyObject* dict;
int value_idx;
struct PyFunctionPostHook : public FunctionPostHook {
PyFunctionPostHook(PyObject* dict);
- ~PyFunctionPostHook();
+ ~PyFunctionPostHook() override;
variable_list operator()(const variable_list& outputs, const variable_list& inputs) override;
PyObject* dict;
};
"torch._C._LegacyVariableBase", /* tp_name */
0, /* tp_basicsize */
0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_dealloc */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
+ nullptr, /* tp_repr */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
+ nullptr, /* tp_methods */
+ nullptr, /* tp_members */
+ nullptr, /* tp_getset */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPVariable_pynew /* tp_new */
};
static PythonArgParser parser({
"_make_subclass(PyObject* cls, Tensor data, bool require_grad=False)",
});
- ParsedArgs<3> parsed_args;
+ ParsedArgs<3> parsed_args{};
auto r = parser.parse(args, kwargs, parsed_args);
PyObject* cls = r.pyobject(0);
if (!PyType_Check(cls)) {
auto& grad = ((THPVariable*)py_grad)->cdata;
bool gradIsSparse = false;
auto backend = var.is_cuda() ? Backend::SparseCUDA : Backend::SparseCPU;
- auto typeOpt = at::globalContext().getNonVariableTypeOpt(backend, var.type().scalarType());
+ auto typeOpt = at::globalContext().getNonVariableTypeOpt(backend, var.type().scalarType());
if (typeOpt) {
auto& sparseType = at::globalContext().getNonVariableType(backend, var.type().scalarType());
gradIsSparse = grad.type() == sparseType;
sizeof(THPVariable), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)THPVariable_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
+ nullptr, /* tp_repr */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
&THPVariable_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /* tp_flags */
nullptr, /* tp_doc */
(traverseproc)THPVariable_traverse, /* tp_traverse */
(inquiry)THPVariable_clear, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
+ nullptr, /* tp_methods */
+ nullptr, /* tp_members */
THPVariable_properties, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPVariable_pynew /* tp_new */
};
#include "torch/csrc/THP_export.h"
// Python object that backs torch.autograd.Variable
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
struct THPVariable {
PyObject_HEAD
// Payload
torch::autograd::Variable cdata;
// Hooks to be run on backwards pass (corresponds to Python attr
// '_backwards_hooks', set by 'register_hook')
- PyObject* backward_hooks;
+ PyObject* backward_hooks = nullptr;
};
THP_API PyObject *THPVariableClass;
#include "byte_order.h"
-#include <string.h>
+#include <cstring>
#if defined(_MSC_VER)
#include <stdlib.h>
void THP_decodeHalfBuffer(THHalf* dst, const uint8_t* src, THPByteOrder order, size_t len)
{
for (size_t i = 0; i < len; i++) {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
union { uint16_t x; THHalf f; };
x = (order == THP_BIG_ENDIAN ? decodeUInt16BE(src) : decodeUInt16LE(src));
dst[i] = f;
void THP_decodeFloatBuffer(float* dst, const uint8_t* src, THPByteOrder order, size_t len)
{
for (size_t i = 0; i < len; i++) {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
union { uint32_t x; float f; };
x = (order == THP_BIG_ENDIAN ? decodeUInt32BE(src) : decodeUInt32LE(src));
dst[i] = f;
void THP_decodeDoubleBuffer(double* dst, const uint8_t* src, THPByteOrder order, size_t len)
{
for (size_t i = 0; i < len; i++) {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
union { uint64_t x; double d; };
x = (order == THP_BIG_ENDIAN ? decodeUInt64BE(src) : decodeUInt64LE(src));
dst[i] = d;
#define THP_BYTE_ORDER_H
#include <cstdint>
-#include <stddef.h>
+#include <cstddef>
#include <THHalf.h>
enum THPByteOrder {
inline bool tryTHPCopy(const THPCopyList& v, PyObject* dst, PyObject* src, bool non_blocking, bool broadcast)
{
- for (auto it = v.begin(); it != v.end(); ++it) {
- if (it->non_blocking == non_blocking && PyType_IsSubtype(Py_TYPE(src), it->srcType)) {
- (it->copy)(dst, src, broadcast);
+ for (auto& i : v) {
+ if (i.non_blocking == non_blocking && PyType_IsSubtype(Py_TYPE(src), i.srcType)) {
+ (i.copy)(dst, src, broadcast);
return true;
}
}
#include "torch/csrc/python_headers.h"
-#include <stdbool.h>
#include <unordered_map>
#include <thread>
#include <chrono>
#include "torch/csrc/python_headers.h"
#include <structmember.h>
-#include <stdbool.h>
// See Note [TH abstraction violation]
// - Used to get at allocator from storage
#include <TH/THTensor.hpp>
#include <structmember.h>
#include <TH/THMath.h>
-#include <stdbool.h>
#include <vector>
#include <stack>
#include <tuple>
// Use the hostname to resolve the network address to
// use. Note: if the hostname does not resolve to an address (e.g.
// because of misconfigured /etc/hosts file), this will not work.
- std::array<char, HOST_NAME_MAX> hostname;
+ std::array<char, HOST_NAME_MAX> hostname{};
auto rv = gethostname(hostname.data(), hostname.size());
if (rv != 0) {
throw std::system_error(errno, std::system_category());
Py_ssize_t start, stop, slicelength, step;
int64_t len = THWStorage_(size)(LIBRARY_STATE self->cdata);
if (!THPUtils_parseSlice(index, len, &start, &stop, &step, &slicelength))
- return NULL;
+ return nullptr;
if (step != 1) {
THPUtils_setError("Trying to slice with a step of %" PRId64 ", but only a step of "
"1 is supported", (int64_t)step);
- return NULL;
+ return nullptr;
}
scalar_t *data = THWStorage_(data)(LIBRARY_STATE self->cdata);
sizeof(THPStorage), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)THPStorage_(dealloc), /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_reserved */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
+ nullptr, /* tp_print */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
+ nullptr, /* tp_repr */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
&THPStorage_(mappingmethods), /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
nullptr, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
+ nullptr, /* tp_richcompare */
0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* will be assigned in init */ /* tp_methods */
- 0, /* will be assigned in init */ /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
+ nullptr, /* will be assigned in init */ /* tp_methods */
+ nullptr, /* will be assigned in init */ /* tp_members */
+ nullptr, /* tp_getset */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
THPStorage_(pynew), /* tp_new */
};
// expand a tensor to a batchtensor given batch_size
BatchTensor(at::Tensor data, int64_t batch_size);
BatchTensor(const std::vector<at::Tensor> datalist, at::Tensor dims);
- ~BatchTensor(){};
const char * toString() const {
return "BatchTensor";
}
c10::optional<ScopePtr> scope) {
Node * n = g.create(prim::Constant);
if(val.isTensor()) {
- at::Tensor ref = std::move(val).toTensor();
+ at::Tensor ref = val.toTensor();
if(!ref.defined()) {
- return insertConstant(g, val, loc, scope);
+ n->destroy();
+ return g.insertNode(g.createUndefined())->output();
}
if (ref.is_variable()) {
ref = autograd::Variable(ref).data();
RegisterOperators reg({
// Implementation of constant node, computes and IValue
Operator(
- FunctionSchema(prim::Constant, {}, {}, /*vararg=*/false, /*varret=*/true),
+ FunctionSchema(prim::Constant, {}, {}, /*is_vararg=*/false, /*is_varret=*/true),
[](const Node* node) -> Operation {
TypePtr type = node->output()->type();
if(type->isSubtypeOf(DynamicType::get())) {
py::register_exception<JITException>(m, "JITException");
- py::class_<python::IODescriptor>(m, "IODescriptor");
+ py::class_<python::IODescriptor>(m, "IODescriptor"); // NOLINT(bugprone-unused-raii)
m.def("_jit_init", loadPythonClasses)
.def("_jit_pass_onnx", ToONNX)
checkAliasAnnotation(g, std::move(stack), unqualified_op_name);
});
+ // NOLINTNEXTLINE(bugprone-unused-raii)
py::class_<CompleteArgumentSpec>(m, "CompleteArgumentSpec")
.def("__repr__", [](CompleteArgumentSpec& self) {
std::ostringstream s;
s << self;
return s.str();
});
+ // NOLINTNEXTLINE(bugprone-unused-raii)
py::class_<ArgumentSpec>(m, "ArgumentSpec");
py::class_<Code>(m, "Code")
.def("grad_executors", [](Code& c) {
});
});
+ // NOLINTNEXTLINE(bugprone-unused-raii)
py::class_<detail::Future>(m, "Future");
m.def("fork", [](script::Module &sm, py::args args) {
// Created by wait()
struct Suspend : public std::exception {
- virtual const char* what() const noexcept override {
+ const char* what() const noexcept override {
return "Suspend";
}
- explicit Suspend(c10::intrusive_ptr<Future> future_) : future(future_) {}
+ explicit Suspend(c10::intrusive_ptr<Future> future_)
+ : future(std::move(future_)) {}
c10::intrusive_ptr<Future> future;
};
InterpreterContinuation(InterpreterState state_, Stack stack_)
: state(std::move(state_)), stack(std::move(stack_)) {}
- void operator()(void) {
+ void operator()() {
state.runAsync(stack);
}
}
} // namespace
-AliasDb::AliasDb(std::shared_ptr<Graph> graph) : graph_(graph) {
+AliasDb::AliasDb(std::shared_ptr<Graph> graph) : graph_(std::move(graph)) {
analyze(graph_);
// Build helper indices
}
}
// - Set of all nodes with a wildcard
- buildWildcardIndex(graph->block());
+ buildWildcardIndex(graph_->block());
}
void AliasDb::buildWildcardIndex(const Block* b) {
};
inline TORCH_API AliasDb AliasAnalysis(std::shared_ptr<Graph> graph) {
- return AliasDb(graph);
+ return AliasDb(std::move(graph));
}
} // namespace jit
} // namespace torch
}
void BlockToONNX(Block* old_block, Block* new_block, ::torch::onnx::OperatorExportTypes operator_export_type, std::unordered_map<Value*, Value*> env) {
- torch::autograd::SymbolicContext ctx;
+ torch::autograd::SymbolicContext ctx{};
ctx.block = new_block;
py::object onnx = py::module::import("torch.onnx");
JIT_ASSERT(t1.size() == t2.size());
std::vector<int64_t> ret;
ret.reserve(t1.size());
- for (size_t i = 0; i < t2.size(); i++) {
- JIT_ASSERT(t2[i] < int64_t(t1.size()));
- ret.push_back(t1[t2[i]]);
+ for (const auto& i : t2) {
+ JIT_ASSERT(i < int64_t(t1.size()));
+ ret.push_back(t1[i]);
}
return ret;
}
concat_node->insertBefore(lc_node);
// make concat node output as new input, then ListConstruct should become dead
- replacements.push_back(std::make_tuple(
- i,
- std::vector<Value*>({concat_node->output()})
- ));
+ replacements.emplace_back(i, std::vector<Value*>({concat_node->output()}));
} else {
// Tensor lists are used mostly for inputs to cat/stack. They are already handled
// in those symbolics, and should become dead afterwards.
- replacements.push_back(std::make_tuple(
+ replacements.emplace_back(
i,
std::vector<Value*>(
- lc_node->inputs().begin(), lc_node->inputs().end())));
+ lc_node->inputs().begin(), lc_node->inputs().end()));
}
}
}
// transform scalar to tensor before pass to batch operator script
- for(size_t i = 0; i < new_inputs.size(); i++){
- auto input = new_inputs[i];
+ for (auto& input : new_inputs) {
if(input->type() == IntType::get() || input->type() == FloatType::get()){
auto to_tensor_node = res_graph->createNumToTensor(input);
res_graph->insertNode(to_tensor_node);
- new_inputs[i] = to_tensor_node->output();
+ input = to_tensor_node->output();
} else if(input->type() == BoolType::get()) {
auto to_tensor_node = res_graph->createBoolToTensor(input);
res_graph->insertNode(to_tensor_node);
- new_inputs[i] = to_tensor_node->output();
+ input = to_tensor_node->output();
}
}
// different than the number of 'v's in structure.
std::string structure;
std::vector<VariableMetadata> metadata;
- bool grad_enabled;
+ bool grad_enabled = false;
};
static inline std::ostream& operator<<(std::ostream& out, const IODescriptor::VariableMetadata& meta) {
Operation createPythonOperation(const Node* op_) {
AutoGIL gil;
const PythonOp* op = static_cast<const PythonOp*>(op_);
- const py::function func =
- py::reinterpret_borrow<const py::function>(py::handle(const_cast<PythonOp*>(op)->pyobj.get()));
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ const py::function func = py::reinterpret_borrow<const py::function>(py::handle(const_cast<PythonOp*>(op)->pyobj.get()));
size_t num_inputs = 0;
for(auto arg_type : op->cconv) {
size_t next_tensor = 0;
for (auto arg_type : op->cconv) {
if (arg_type == 'c') {
- py_inputs[i] =
- py::reinterpret_borrow<const py::object>(const_cast<PythonOp*>(op)->scalar_args[next_scalar++].get());
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ py_inputs[i] = py::reinterpret_borrow<const py::object>(const_cast<PythonOp*>(op)->scalar_args[next_scalar++].get());
} else if (arg_type == 'd') {
py_inputs[i] = toPyObject(std::move(peek(stack, next_tensor, num_inputs)));
next_tensor++;
return getPythonName(pyobj.get());
}
}
- virtual void cloneFrom(Node * other_) override {
+ void cloneFrom(Node * other_) override {
Node::cloneFrom(other_);
auto other = other_->cast<PythonOp>();
this->cconv = other->cconv;
return n.t(Symbol::attr(name));
})
.def("zs_",[](Node & n, const char * name, TensorsAttr::ValueType v) {
- // NOLINTNEXTLINE(modernize-loop-convert)
- for (size_t i = 0; i < v.size(); ++ i) {
- v[i] = autograd::Variable(v[i].view({})).data();
+ for (auto& i : v) {
+ i = autograd::Variable(i.view({})).data();
}
return n.ts_(Symbol::attr(name), std::move(v));
})
return [=](Stack& stack) { \
int64_t a, b; \
pop(stack, a, b); \
- push(stack, op); \
+ push(stack, op); /* NOLINT(hicpp-signed-bitwise) */ \
return 0; \
}; \
}),
at::Tensor t; \
c_type other; \
pop(stack, t, other); \
- std::move(t) = other; \
- push(stack, std::move(t)); \
+ std::move(t) = other; /* NOLINT(bugprone-use-after-move) */ \
+ push(stack, std::move(t)); /* NOLINT(bugprone-use-after-move) */ \
return 0; \
}; \
}),
BuiltinModule(std::string name,
c10::optional<int64_t> version = at::nullopt)
: name(std::move(name))
- , version(version) {}
+ , version(std::move(version)) {}
std::string kind() const override {
return "builtin module";
} else {
value = toIValue(it->second, arg.type());
}
- new_args.emplace_back(
- Argument(arg.name(), arg.type(), arg.N(), value, arg.kwarg_only()));
+ new_args.emplace_back(arg.name(), arg.type(), arg.N(), value, arg.kwarg_only());
} catch (py::cast_error& e) {
throw ErrorReport(range)
<< "Expected a default value of type " << arg.type()->str()
return Expr(Compound::create(TK_NONE, range, {}));
});
- py::class_<Stmt, TreeView>(m, "Stmt");
- py::class_<Expr, TreeView>(m, "Expr");
+ py::class_<Stmt, TreeView>(m, "Stmt"); // NOLINT(bugprone-unused-raii)
+ py::class_<Expr, TreeView>(m, "Expr"); // NOLINT(bugprone-unused-raii)
py::class_<Def, TreeView>(m, "Def")
.def(py::init([](const Ident& name,
Decl decl,
THPObjectPtr io(PyImport_ImportModule("io"));
if (!io) throw python_error();
THPObjectPtr exception(PyObject_GetAttrString(io, "UnsupportedOperation"));
- if (!exception) python_error();
+ if (!exception) throw python_error();
return PyErr_ExceptionMatches(exception.get());
}
// get the storage first, so if it doesn't exist we don't change the default tensor type
THPObjectPtr storage = get_storage_obj(type);
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
default_tensor_type = const_cast<Type*>(&type);
at::set_default_dtype(default_tensor_type->typeMeta());
#include "torch/csrc/python_headers.h"
-#include <stdarg.h>
+#include <cstdarg>
#include <string>
#include <vector>
#include <sstream>
// remove nullptr terminator
vector.pop_back();
}
- while (1) {
+ while (true) {
vector.push_back(*methods);
if (!methods->ml_name) {
break;
va_list option_list;
va_start(option_list, num_options);
for (size_t i = 0; i < num_options; i++)
- option_strings.push_back(va_arg(option_list, const char*));
+ option_strings.emplace_back(va_arg(option_list, const char*));
va_end(option_list);
PyErr_SetString(PyExc_TypeError, torch::format_invalid_args(
#include "python_strings.h"
+#include <torch/csrc/utils/memory.h>
+
#include <algorithm>
#include <unordered_map>
#include <memory>
struct SimpleType: public Type {
SimpleType(std::string& name): name(name) {};
- bool is_matching(PyObject *object) {
+ bool is_matching(PyObject *object) override {
return py_typename(object) == name;
}
MultiType(std::initializer_list<std::string> accepted_types):
types(accepted_types) {};
- bool is_matching(PyObject *object) {
+ bool is_matching(PyObject *object) override {
auto it = std::find(types.begin(), types.end(), py_typename(object));
return it != types.end();
}
struct NullableType: public Type {
NullableType(std::unique_ptr<Type> type): type(std::move(type)) {};
- bool is_matching(PyObject *object) {
+ bool is_matching(PyObject *object) override {
return object == Py_None || type->is_matching(object);
}
TupleType(std::vector<std::unique_ptr<Type>> types):
types(std::move(types)) {};
- bool is_matching(PyObject *object) {
+ bool is_matching(PyObject *object) override {
if (!PyTuple_Check(object)) return false;
auto num_elements = PyTuple_GET_SIZE(object);
if (num_elements != (long)types.size()) return false;
SequenceType(std::unique_ptr<Type> type):
type(std::move(type)) {};
- bool is_matching(PyObject *object) {
+ bool is_matching(PyObject *object) override {
if (!PySequence_Check(object)) return false;
auto num_elements = PySequence_Length(object);
for (int i = 0; i < num_elements; i++) {
struct Argument {
Argument(std::string name, std::unique_ptr<Type> type):
- name(name), type(std::move(type)) {};
+ name(std::move(name)), type(std::move(type)) {};
std::string name;
std::unique_ptr<Type> type;
std::unique_ptr<Type> _buildType(std::string type_name, bool is_nullable) {
std::unique_ptr<Type> result;
if (type_name == "float") {
- result.reset(new MultiType({"float", "int", "long"}));
+ result = torch::make_unique<MultiType>(MultiType{"float", "int", "long"});
} else if (type_name == "int") {
- result.reset(new MultiType({"int", "long"}));
+ result = torch::make_unique<MultiType>(MultiType{"int", "long"});
} else if (type_name.find("tuple[") == 0) {
auto type_list = type_name.substr(6);
type_list.pop_back();
std::vector<std::unique_ptr<Type>> types;
for (auto& type: _splitString(type_list, ","))
types.emplace_back(_buildType(type, false));
- result.reset(new TupleType(std::move(types)));
+ result = torch::make_unique<TupleType>(std::move(types));
} else if (type_name.find("sequence[") == 0) {
auto subtype = type_name.substr(9);
subtype.pop_back();
- result.reset(new SequenceType(_buildType(subtype, false)));
+ result = torch::make_unique<SequenceType>(_buildType(subtype, false));
} else {
- result.reset(new SimpleType(type_name));
+ result = torch::make_unique<SimpleType>(type_name);
}
if (is_nullable)
- result.reset(new NullableType(std::move(result)));
+ result = torch::make_unique<NullableType>(std::move(result));
return result;
}
return allowed.find(name) != allowed.end();
}
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
FunctionParameter::FunctionParameter(const std::string& fmt, bool keyword_only)
: optional(false)
, allow_none(false)
, traceable(traceable)
{
for (auto& fmt : fmts) {
- signatures_.push_back(FunctionSignature(fmt));
+ signatures_.emplace_back(fmt);
}
for (auto& signature : signatures_) {
if (signature.max_args > max_args) {
// Contains bound Python arguments in declaration order
template<int N>
struct ParsedArgs {
+ ParsedArgs() : args() { }
PyObject* args[N];
};
void initializeDtypes() {
auto torch_module = THPObjectPtr(PyImport_ImportModule("torch"));
- if (!torch_module) python_error();
+ if (!torch_module) throw python_error();
#define DEFINE_SCALAR_TYPE(_1,n,_2) at::ScalarType::n,
void initializeLayouts() {
auto torch_module = THPObjectPtr(PyImport_ImportModule("torch"));
- if (!torch_module) python_error();
+ if (!torch_module) throw python_error();
PyObject *strided_layout = THPLayout_New(at::Layout::Strided, "torch.strided");
Py_INCREF(strided_layout);