From c705d9eb1edd68ddb6e39160a1ac92d011ebcec4 Mon Sep 17 00:00:00 2001 From: Roy Li Date: Thu, 4 Apr 2019 02:21:09 -0700 Subject: [PATCH] Introduce DeprecatedTypeProperties class (#17991) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/17991 changes: -Breaks bc: Tensor::type() now returns DeprecatedTypeProperties& rather than Type&. -Added DeprecatedTypeProperties, it serves as a temporary replacement for Type as the return value of Tensor::type(). This contributes to making Type just for dispatch purposes so that we can make it dtype agnostic. -Tensor::dispatch_type() now returns Type& like Tensor::type() used to do. -Changed callsites of Tensor::type() appropriately. Reviewed By: ezyang Differential Revision: D14443117 fbshipit-source-id: 239ccb7a09626279a71d1a37f8f82e7f57bf7d9e --- aten/src/ATen/DLConvertor.cpp | 6 +- aten/src/ATen/Dispatch.h | 4 +- aten/src/ATen/SparseTensorImpl.cpp | 3 +- aten/src/ATen/SparseTensorUtils.h | 7 +- aten/src/ATen/core/DeprecatedTypeProperties.h | 67 ++ .../ATen/core/DeprecatedTypePropertiesRegistry.cpp | 12 + .../ATen/core/DeprecatedTypePropertiesRegistry.h | 46 ++ aten/src/ATen/core/Formatting.cpp | 7 +- aten/src/ATen/core/Formatting.h | 1 + aten/src/ATen/core/LegacyTypeDispatch.cpp | 6 +- aten/src/ATen/core/Tensor.cpp | 4 +- aten/src/ATen/core/Tensor.h | 7 +- aten/src/ATen/core/TensorMethods.h | 836 ++++++++++----------- aten/src/ATen/function_wrapper.py | 2 +- aten/src/ATen/native/BatchLinearAlgebra.cpp | 6 +- aten/src/ATen/native/Indexing.cpp | 11 +- aten/src/ATen/native/LegacyBridge.cpp | 6 +- aten/src/ATen/native/LinearAlgebra.cpp | 7 +- aten/src/ATen/native/LossCTC.cpp | 2 +- aten/src/ATen/native/Memory.cpp | 2 +- aten/src/ATen/native/NNPACK.cpp | 8 +- aten/src/ATen/native/ReduceOps.cpp | 2 +- aten/src/ATen/native/TensorConversions.cpp | 4 +- aten/src/ATen/native/TensorFactories.cpp | 12 +- aten/src/ATen/native/TensorIterator.h | 2 +- aten/src/ATen/native/TensorIteratorReduce.cpp | 2 +- aten/src/ATen/native/TypeProperties.cpp | 4 +- aten/src/ATen/native/cuda/Distributions.cu | 1 - aten/src/ATen/native/cuda/LossCTC.cu | 14 +- aten/src/ATen/native/cuda/MiscUtils.h | 4 +- aten/src/ATen/templates/Tensor.h | 7 +- aten/src/ATen/templates/TensorMethods.h | 18 +- aten/src/ATen/test/cuda_tensor_interop_test.cpp | 4 +- aten/src/ATen/test/scalar_test.cpp | 4 +- aten/src/ATen/test/tensor_interop_test.cpp | 4 +- aten/src/ATen/test/undefined_tensor_test.cpp | 6 +- test/test_nn.py | 2 +- tools/autograd/derivatives.yaml | 6 +- tools/autograd/templates/Functions.cpp | 14 +- tools/autograd/templates/Functions.h | 2 +- .../autograd/templates/python_torch_functions.cpp | 4 +- .../autograd/templates/python_variable_methods.cpp | 14 +- torch/csrc/DynamicTypes.cpp | 4 - torch/csrc/DynamicTypes.h | 1 - torch/csrc/Generator.cpp | 2 +- torch/csrc/autograd/VariableTypeManual.cpp | 10 +- torch/csrc/autograd/engine.cpp | 2 +- torch/csrc/autograd/function.h | 2 +- torch/csrc/autograd/input_metadata.h | 2 +- torch/csrc/autograd/python_function.cpp | 2 +- torch/csrc/autograd/python_hook.cpp | 2 +- torch/csrc/autograd/python_variable_indexing.cpp | 4 +- torch/csrc/autograd/variable.cpp | 4 +- torch/csrc/cuda/comm.cpp | 2 +- torch/csrc/cuda/nccl.cpp | 4 +- torch/csrc/jit/node_hashing.cpp | 2 +- torch/csrc/tensor/python_tensor.cpp | 4 +- torch/csrc/utils/tensor_apply.cpp | 6 +- torch/csrc/utils/tensor_flatten.cpp | 9 +- torch/csrc/utils/tensor_flatten.h | 2 +- torch/csrc/utils/tensor_new.cpp | 12 +- torch/csrc/utils/tensor_numpy.cpp | 2 +- torch/lib/c10d/ProcessGroupGloo.cpp | 2 +- torch/lib/c10d/Utils.hpp | 8 +- 64 files changed, 699 insertions(+), 569 deletions(-) create mode 100644 aten/src/ATen/core/DeprecatedTypeProperties.h create mode 100644 aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp create mode 100644 aten/src/ATen/core/DeprecatedTypePropertiesRegistry.h diff --git a/aten/src/ATen/DLConvertor.cpp b/aten/src/ATen/DLConvertor.cpp index a40e872..cf9daa5 100644 --- a/aten/src/ATen/DLConvertor.cpp +++ b/aten/src/ATen/DLConvertor.cpp @@ -56,10 +56,10 @@ static DLDataType getDLDataType(const Tensor& t) { return dtype; } -static DLContext getDLContext(const Type& type, const int64_t& device_id) { +static DLContext getDLContext(const Tensor& tensor, const int64_t& device_id) { DLContext ctx; ctx.device_id = device_id; - if (type.is_cuda()) { + if (tensor.is_cuda()) { ctx.device_type = DLDeviceType::kDLGPU; } else { ctx.device_type = DLDeviceType::kDLCPU; @@ -161,7 +161,7 @@ DLManagedTensor* toDLPack(const Tensor& src) { if (src.is_cuda()) { device_id = src.get_device(); } - atDLMTensor->tensor.dl_tensor.ctx = getDLContext(src.type(), device_id); + atDLMTensor->tensor.dl_tensor.ctx = getDLContext(src, device_id); atDLMTensor->tensor.dl_tensor.ndim = src.dim(); atDLMTensor->tensor.dl_tensor.dtype = getDLDataType(src); atDLMTensor->tensor.dl_tensor.shape = diff --git a/aten/src/ATen/Dispatch.h b/aten/src/ATen/Dispatch.h index b22a60d..d6130d6 100644 --- a/aten/src/ATen/Dispatch.h +++ b/aten/src/ATen/Dispatch.h @@ -41,9 +41,9 @@ inline at::ScalarType scalar_type(at::ScalarType s) { return s; } -C10_DEPRECATED_MESSAGE("passing at::Type to an AT_DISPATCH macro is deprecated, " \ +C10_DEPRECATED_MESSAGE("passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, " \ "pass an at::ScalarType instead") -inline at::ScalarType scalar_type(const at::Type &t) { +inline at::ScalarType scalar_type(const at::DeprecatedTypeProperties &t) { return t.scalarType(); } diff --git a/aten/src/ATen/SparseTensorImpl.cpp b/aten/src/ATen/SparseTensorImpl.cpp index 2cdca79..48ca1b3 100644 --- a/aten/src/ATen/SparseTensorImpl.cpp +++ b/aten/src/ATen/SparseTensorImpl.cpp @@ -88,7 +88,8 @@ void SparseTensorImpl::set_indices_and_values_unsafe(const Tensor& indices, cons AT_CHECK(!indices.is_sparse(), "expected indices to be a dense tensor, but got indices of layout ", indices.layout()); AT_CHECK(!values.is_sparse(), "expected values to be a dense tensor, but got values of layout ", values.layout()); - AT_CHECK(values.type().toSparse() == legacyTensorType(*this), "values type must match sparse tensor type"); + AT_CHECK(values.device().type() == device().type(), "device type of values (", values.device().type(), ") must match device type of device().type()", device().type(), ")"); + AT_CHECK(values.scalar_type() == typeMetaToScalarType(dtype()), "dtype of values (", values.scalar_type(), ") must match dtype of sparse tensor (", typeMetaToScalarType(dtype()), ")"); AT_CHECK(indices.scalar_type() == kLong, "indices must be an int64 tensor"); AT_CHECK(indices.type().backend() == values.type().backend(), "backend of indices (", indices.type().backend(), ") must match backend of values (", values.type().backend(), ")"); AT_CHECK(!indices.is_cuda() || indices.get_device() == values.get_device(), "device of indices (", indices.get_device(), ") must match device of values (", values.get_device(), ")"); diff --git a/aten/src/ATen/SparseTensorUtils.h b/aten/src/ATen/SparseTensorUtils.h index a7fa4ab..8113367 100644 --- a/aten/src/ATen/SparseTensorUtils.h +++ b/aten/src/ATen/SparseTensorUtils.h @@ -31,7 +31,10 @@ inline void alias_into_sparse(const SparseTensor& self, const LongTensor& indice // Take indices and values and makes a (data) copy of them to put into the sparse // indices/values. This used to be called THSTensor_(_set) inline void copy_into_sparse(const SparseTensor& self, const LongTensor& indices, const Tensor& values, bool non_blocking) { - alias_into_sparse(self, self._indices().type().copy(indices, non_blocking), self._values().type().copy(values, non_blocking)); + alias_into_sparse( + self, + self._indices().dispatch_type().copy(indices, non_blocking), + self._values().dispatch_type().copy(values, non_blocking)); } // TODO: put this into the public API @@ -82,7 +85,7 @@ inline LongTensor flatten_indices(const Tensor& indices, IntArrayRef full_size, indices_mult_cpu_vec[i] = mult; mult *= full_size[i]; } - auto indices_mult_cpu = indices.type().cpu() + auto indices_mult_cpu = indices.dispatch_type().cpu() .tensorFromBlob(indices_mult_cpu_vec.data(), /*size=*/{sparse_dim, 1}); // NB: must be blocking because this blob may be freed after this closure, // and non_blocking copy will see garbage. diff --git a/aten/src/ATen/core/DeprecatedTypeProperties.h b/aten/src/ATen/core/DeprecatedTypeProperties.h new file mode 100644 index 0000000..88f53f6 --- /dev/null +++ b/aten/src/ATen/core/DeprecatedTypeProperties.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include + + + +namespace at { + +// This class specifies a Backend and a ScalarType. Currently, it primarily +// serves as a replacement return value for Tensor::type(). Previously, +// Tensor::type() returned Type&, but we are changing Type to not be +// dtype-specific. +class DeprecatedTypeProperties { + public: + DeprecatedTypeProperties(Backend backend, ScalarType scalar_type) + : backend_(backend), scalar_type_(scalar_type) {} + + Backend backend() const { + return backend_; + } + + bool is_sparse() const { + return layout_from_backend(backend()) == kSparse; + } + + DeviceType device_type() const { + return backendToDeviceType(backend_); + } + + bool is_cuda() const { + return backendToDeviceType(backend_) == kCUDA; + } + + ScalarType scalarType() const { + return scalar_type_; + } + + caffe2::TypeMeta typeMeta() const { + return scalarTypeToTypeMeta(scalar_type_); + } + + bool is_defined() const { + return backend_ != Backend::Undefined && scalar_type_ != ScalarType::Undefined; + } + + bool operator==(const DeprecatedTypeProperties& other) const { + return backend_ == other.backend() && scalar_type_ == other.scalarType(); + } + + bool operator!=(const DeprecatedTypeProperties& other) const { + return !(*this == other); + } + + std::string toString() const { + std::stringstream ss; + ss << at::toString(backend()) << at::toString(scalarType()) << "Type"; + return ss.str(); + } + + private: + Backend backend_; + ScalarType scalar_type_; +}; + +} // namespace at diff --git a/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp b/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp new file mode 100644 index 0000000..154f04d --- /dev/null +++ b/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.cpp @@ -0,0 +1,12 @@ +#include + +namespace at { + +// TODO: This could be bad juju if someone calls globalContext() in the +// destructor of an object with static lifetime. +DeprecatedTypePropertiesRegistry & globalDeprecatedTypePropertiesRegistry() { + static DeprecatedTypePropertiesRegistry singleton; + return singleton; +} + +} diff --git a/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.h b/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.h new file mode 100644 index 0000000..0ab57bf --- /dev/null +++ b/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.h @@ -0,0 +1,46 @@ +#pragma once + +// In order to preserve bc, we make DeprecatedTypeProperties instances unique +// just like they are for Type. + +#include +#include +#include + +namespace at { + +struct CAFFE2_API DeprecatedTypePropertiesDeleter { + void operator()(DeprecatedTypeProperties * ptr) { + delete ptr; + } +}; + +class CAFFE2_API DeprecatedTypePropertiesRegistry { + public: + using DeprecatedTypePropertiesUniquePtr = + std::unique_ptr; + + DeprecatedTypePropertiesRegistry() { + for (int b = 0; b < static_cast(Backend::NumOptions); ++b) { + for (int s = 0; s < static_cast(ScalarType::NumOptions); ++s) { + registry[b][s] = DeprecatedTypePropertiesUniquePtr{ + new DeprecatedTypeProperties(static_cast(b), static_cast(s)), + DeprecatedTypePropertiesDeleter() + }; + } + } + } + + DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) { + return *registry[static_cast(p)][static_cast(s)]; + } + +private: + DeprecatedTypePropertiesUniquePtr registry + [static_cast(Backend::NumOptions)] + [static_cast(ScalarType::NumOptions)]; +}; + +CAFFE2_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry(); + +} // namespace at diff --git a/aten/src/ATen/core/Formatting.cpp b/aten/src/ATen/core/Formatting.cpp index ab0bb33..0abedca 100644 --- a/aten/src/ATen/core/Formatting.cpp +++ b/aten/src/ATen/core/Formatting.cpp @@ -37,6 +37,10 @@ std::ostream& operator<<(std::ostream & out, const Type& t) { return out << t.toString(); } +std::ostream& operator<<(std::ostream & out, const DeprecatedTypeProperties& t) { + return out << t.toString(); +} + static std::tuple __printFormat(std::ostream& stream, const Tensor& self) { auto size = self.numel(); if(size == 0) { @@ -238,8 +242,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi stream << "size:\n" << tensor_.sizes() << "\n"; stream << "]"; } else { - Type& cpudouble = tensor_.type().toBackend(Backend::CPU).toScalarType(kDouble); - Tensor tensor = tensor_.toType(cpudouble).contiguous(); + Tensor tensor = tensor_.to(kCPU, kDouble).contiguous(); if(tensor.ndimension() == 0) { stream << defaultfloat << tensor.data()[0] << std::endl; stream << "[ " << tensor_.toString() << "{} ]"; diff --git a/aten/src/ATen/core/Formatting.h b/aten/src/ATen/core/Formatting.h index c03a00e..1055ad1 100644 --- a/aten/src/ATen/core/Formatting.h +++ b/aten/src/ATen/core/Formatting.h @@ -13,6 +13,7 @@ CAFFE2_API std::ostream& operator<<(std::ostream& out, Backend b); namespace at { CAFFE2_API std::ostream& operator<<(std::ostream& out, const Type& t); +CAFFE2_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t); CAFFE2_API std::ostream& print( std::ostream& stream, const Tensor& tensor, diff --git a/aten/src/ATen/core/LegacyTypeDispatch.cpp b/aten/src/ATen/core/LegacyTypeDispatch.cpp index d9936ea..521e587 100644 --- a/aten/src/ATen/core/LegacyTypeDispatch.cpp +++ b/aten/src/ATen/core/LegacyTypeDispatch.cpp @@ -7,13 +7,13 @@ namespace at { /// Previously, in VariableType_*.cpp (generated by gen_variable_type.py), when /// a function is using the 'use_derived' strategy, we call its implementation /// on the base non-Variable type (`baseType`), passing unwrapped tensors to the -/// call so that any `.type()` calls in the implementation can treat the passed +/// call so that any `.dispatch_type()` calls in the implementation can treat the passed /// tensors as non-Variables and won't dispatch back to functions in VariableType. /// /// However, after the Variable/Tensor merge, there is no concept of unwrapping /// a tensor anymore, and directly passing variables to the base type calls will -/// cause the `.type()` dispatch in the implementation to treat the tensor as a -/// variable, and any function dispatch based on `.type()` will dispatch back to +/// cause the `.dispatch_type()` dispatch in the implementation to treat the tensor as a +/// variable, and any function dispatch based on `.dispatch_type()` will dispatch back to /// VariableType, which is not what we want. /// /// The solution to the above problem is to add `at::NonVariableTypeMode`, which diff --git a/aten/src/ATen/core/Tensor.cpp b/aten/src/ATen/core/Tensor.cpp index d575423..aa611e8 100644 --- a/aten/src/ATen/core/Tensor.cpp +++ b/aten/src/ATen/core/Tensor.cpp @@ -35,14 +35,14 @@ void Tensor::enforce_invariants() { void Tensor::print() const { if (defined()) { - std::cerr << "[" << type().toString() << " " << sizes() << "]" << std::endl; + std::cerr << "[" << dispatch_type().toString() << " " << sizes() << "]" << std::endl; } else { std::cerr << "[UndefinedTensor]" << std::endl; } } const char * Tensor::toString() const { - return type().toString(); + return dispatch_type().toString(); } } // namespace at diff --git a/aten/src/ATen/core/Tensor.h b/aten/src/ATen/core/Tensor.h index 8dbfb81..a171eab 100644 --- a/aten/src/ATen/core/Tensor.h +++ b/aten/src/ATen/core/Tensor.h @@ -13,6 +13,7 @@ #include #include #include +#include namespace c10{ struct TensorOptions; @@ -196,7 +197,11 @@ class CAFFE2_API Tensor { return impl_->itemsize(); } - Type & type() const { + DeprecatedTypeProperties & type() const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + tensorTypeIdToBackend(type_id()), scalar_type()); + } + Type & dispatch_type() const { return legacyTensorType(*impl_); } TensorTypeId type_id() const { diff --git a/aten/src/ATen/core/TensorMethods.h b/aten/src/ATen/core/TensorMethods.h index 9e50931..efe387d 100644 --- a/aten/src/ATen/core/TensorMethods.h +++ b/aten/src/ATen/core/TensorMethods.h @@ -10,33 +10,33 @@ namespace at { inline Tensor Tensor::toType(const Type & t, bool non_blocking) const { - if(type() == t) + if(dispatch_type() == t) return *this; return t.copy(*this, non_blocking); } inline Tensor Tensor::cpu() const { - return toType(type().cpu()); + return toType(dispatch_type().cpu()); } inline Tensor Tensor::cuda() const { - return toType(type().cuda()); + return toType(dispatch_type().cuda()); } inline Tensor Tensor::hip() const { - return toType(type().hip()); + return toType(dispatch_type().hip()); } inline Tensor & Tensor::copy_(const Tensor & src, bool non_blocking) { - return type().copy_(*this, src, non_blocking); + return dispatch_type().copy_(*this, src, non_blocking); } inline Tensor Tensor::toType(ScalarType t) const { - return toType(type().toScalarType(t)); + return toType(dispatch_type().toScalarType(t)); } inline Tensor Tensor::toBackend(Backend b) const { - return toType(type().toBackend(b)); + return toType(dispatch_type().toBackend(b)); } inline TensorOptions Tensor::options() const { @@ -50,1240 +50,1240 @@ inline void Tensor::backward( c10::optional gradient, bool keep_graph, bool create_graph) { - type().backward(*this, std::move(gradient), keep_graph, create_graph); + dispatch_type().backward(*this, std::move(gradient), keep_graph, create_graph); } inline void Tensor::set_data(Tensor new_data) { - type().set_data(*this, new_data); + dispatch_type().set_data(*this, new_data); } // all static inline to allow for inlining of the non-dynamic part of dispatch inline Tensor Tensor::abs() const { - return type().abs(*this); + return dispatch_type().abs(*this); } inline Tensor & Tensor::abs_() { - return type().abs_(*this); + return dispatch_type().abs_(*this); } inline Tensor Tensor::acos() const { - return type().acos(*this); + return dispatch_type().acos(*this); } inline Tensor & Tensor::acos_() { - return type().acos_(*this); + return dispatch_type().acos_(*this); } inline Tensor Tensor::add(const Tensor & other, Scalar alpha) const { - return type().add(*this, other, alpha); + return dispatch_type().add(*this, other, alpha); } inline Tensor & Tensor::add_(const Tensor & other, Scalar alpha) { - return type().add_(*this, other, alpha); + return dispatch_type().add_(*this, other, alpha); } inline Tensor Tensor::add(Scalar other, Scalar alpha) const { - return type().add(*this, other, alpha); + return dispatch_type().add(*this, other, alpha); } inline Tensor & Tensor::add_(Scalar other, Scalar alpha) { - return type().add_(*this, other, alpha); + return dispatch_type().add_(*this, other, alpha); } inline Tensor Tensor::addmv(const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { - return type().addmv(*this, mat, vec, beta, alpha); + return dispatch_type().addmv(*this, mat, vec, beta, alpha); } inline Tensor & Tensor::addmv_(const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { - return type().addmv_(*this, mat, vec, beta, alpha); + return dispatch_type().addmv_(*this, mat, vec, beta, alpha); } inline Tensor Tensor::addr(const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { - return type().addr(*this, vec1, vec2, beta, alpha); + return dispatch_type().addr(*this, vec1, vec2, beta, alpha); } inline Tensor & Tensor::addr_(const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { - return type().addr_(*this, vec1, vec2, beta, alpha); + return dispatch_type().addr_(*this, vec1, vec2, beta, alpha); } inline Tensor Tensor::all(int64_t dim, bool keepdim) const { - return type().all(*this, dim, keepdim); + return dispatch_type().all(*this, dim, keepdim); } inline bool Tensor::allclose(const Tensor & other, double rtol, double atol, bool equal_nan) const { - return type().allclose(*this, other, rtol, atol, equal_nan); + return dispatch_type().allclose(*this, other, rtol, atol, equal_nan); } inline Tensor Tensor::any(int64_t dim, bool keepdim) const { - return type().any(*this, dim, keepdim); + return dispatch_type().any(*this, dim, keepdim); } inline Tensor Tensor::argmax(c10::optional dim, bool keepdim) const { - return type().argmax(*this, dim, keepdim); + return dispatch_type().argmax(*this, dim, keepdim); } inline Tensor Tensor::argmin(c10::optional dim, bool keepdim) const { - return type().argmin(*this, dim, keepdim); + return dispatch_type().argmin(*this, dim, keepdim); } inline Tensor Tensor::as_strided(IntArrayRef size, IntArrayRef stride, c10::optional storage_offset) const { - return type().as_strided(*this, size, stride, storage_offset); + return dispatch_type().as_strided(*this, size, stride, storage_offset); } inline Tensor & Tensor::as_strided_(IntArrayRef size, IntArrayRef stride, c10::optional storage_offset) { - return type().as_strided_(*this, size, stride, storage_offset); + return dispatch_type().as_strided_(*this, size, stride, storage_offset); } inline Tensor Tensor::asin() const { - return type().asin(*this); + return dispatch_type().asin(*this); } inline Tensor & Tensor::asin_() { - return type().asin_(*this); + return dispatch_type().asin_(*this); } inline Tensor Tensor::atan() const { - return type().atan(*this); + return dispatch_type().atan(*this); } inline Tensor & Tensor::atan_() { - return type().atan_(*this); + return dispatch_type().atan_(*this); } inline Tensor Tensor::baddbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { - return type().baddbmm(*this, batch1, batch2, beta, alpha); + return dispatch_type().baddbmm(*this, batch1, batch2, beta, alpha); } inline Tensor & Tensor::baddbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { - return type().baddbmm_(*this, batch1, batch2, beta, alpha); + return dispatch_type().baddbmm_(*this, batch1, batch2, beta, alpha); } inline Tensor Tensor::bernoulli(Generator * generator) const { - return type().bernoulli(*this, generator); + return dispatch_type().bernoulli(*this, generator); } inline Tensor & Tensor::bernoulli_(const Tensor & p, Generator * generator) { - return type().bernoulli_(*this, p, generator); + return dispatch_type().bernoulli_(*this, p, generator); } inline Tensor & Tensor::bernoulli_(double p, Generator * generator) { - return type().bernoulli_(*this, p, generator); + return dispatch_type().bernoulli_(*this, p, generator); } inline Tensor Tensor::bernoulli(double p, Generator * generator) const { - return type().bernoulli(*this, p, generator); + return dispatch_type().bernoulli(*this, p, generator); } inline Tensor Tensor::bincount(const Tensor & weights, int64_t minlength) const { - return type().bincount(*this, weights, minlength); + return dispatch_type().bincount(*this, weights, minlength); } inline Tensor Tensor::bmm(const Tensor & mat2) const { - return type().bmm(*this, mat2); + return dispatch_type().bmm(*this, mat2); } inline Tensor Tensor::ceil() const { - return type().ceil(*this); + return dispatch_type().ceil(*this); } inline Tensor & Tensor::ceil_() { - return type().ceil_(*this); + return dispatch_type().ceil_(*this); } inline std::vector Tensor::chunk(int64_t chunks, int64_t dim) const { - return type().chunk(*this, chunks, dim); + return dispatch_type().chunk(*this, chunks, dim); } inline Tensor Tensor::clamp(c10::optional min, c10::optional max) const { - return type().clamp(*this, min, max); + return dispatch_type().clamp(*this, min, max); } inline Tensor & Tensor::clamp_(c10::optional min, c10::optional max) { - return type().clamp_(*this, min, max); + return dispatch_type().clamp_(*this, min, max); } inline Tensor Tensor::clamp_max(Scalar max) const { - return type().clamp_max(*this, max); + return dispatch_type().clamp_max(*this, max); } inline Tensor & Tensor::clamp_max_(Scalar max) { - return type().clamp_max_(*this, max); + return dispatch_type().clamp_max_(*this, max); } inline Tensor Tensor::clamp_min(Scalar min) const { - return type().clamp_min(*this, min); + return dispatch_type().clamp_min(*this, min); } inline Tensor & Tensor::clamp_min_(Scalar min) { - return type().clamp_min_(*this, min); + return dispatch_type().clamp_min_(*this, min); } inline Tensor Tensor::contiguous() const { - return type().contiguous(*this); + return dispatch_type().contiguous(*this); } inline Tensor Tensor::cos() const { - return type().cos(*this); + return dispatch_type().cos(*this); } inline Tensor & Tensor::cos_() { - return type().cos_(*this); + return dispatch_type().cos_(*this); } inline Tensor Tensor::cosh() const { - return type().cosh(*this); + return dispatch_type().cosh(*this); } inline Tensor & Tensor::cosh_() { - return type().cosh_(*this); + return dispatch_type().cosh_(*this); } inline Tensor Tensor::cumsum(int64_t dim, ScalarType dtype) const { - return type().cumsum(*this, dim, dtype); + return dispatch_type().cumsum(*this, dim, dtype); } inline Tensor Tensor::cumsum(int64_t dim) const { - return type().cumsum(*this, dim); + return dispatch_type().cumsum(*this, dim); } inline Tensor Tensor::cumprod(int64_t dim, ScalarType dtype) const { - return type().cumprod(*this, dim, dtype); + return dispatch_type().cumprod(*this, dim, dtype); } inline Tensor Tensor::cumprod(int64_t dim) const { - return type().cumprod(*this, dim); + return dispatch_type().cumprod(*this, dim); } inline Tensor Tensor::det() const { - return type().det(*this); + return dispatch_type().det(*this); } inline Tensor Tensor::diag_embed(int64_t offset, int64_t dim1, int64_t dim2) const { - return type().diag_embed(*this, offset, dim1, dim2); + return dispatch_type().diag_embed(*this, offset, dim1, dim2); } inline Tensor Tensor::diagflat(int64_t offset) const { - return type().diagflat(*this, offset); + return dispatch_type().diagflat(*this, offset); } inline Tensor Tensor::diagonal(int64_t offset, int64_t dim1, int64_t dim2) const { - return type().diagonal(*this, offset, dim1, dim2); + return dispatch_type().diagonal(*this, offset, dim1, dim2); } inline Tensor Tensor::div(const Tensor & other) const { - return type().div(*this, other); + return dispatch_type().div(*this, other); } inline Tensor & Tensor::div_(const Tensor & other) { - return type().div_(*this, other); + return dispatch_type().div_(*this, other); } inline Tensor Tensor::div(Scalar other) const { - return type().div(*this, other); + return dispatch_type().div(*this, other); } inline Tensor & Tensor::div_(Scalar other) { - return type().div_(*this, other); + return dispatch_type().div_(*this, other); } inline Tensor Tensor::dot(const Tensor & tensor) const { - return type().dot(*this, tensor); + return dispatch_type().dot(*this, tensor); } inline Tensor & Tensor::resize_(IntArrayRef size) { - return type().resize_(*this, size); + return dispatch_type().resize_(*this, size); } inline Tensor Tensor::erf() const { - return type().erf(*this); + return dispatch_type().erf(*this); } inline Tensor & Tensor::erf_() { - return type().erf_(*this); + return dispatch_type().erf_(*this); } inline Tensor Tensor::erfc() const { - return type().erfc(*this); + return dispatch_type().erfc(*this); } inline Tensor & Tensor::erfc_() { - return type().erfc_(*this); + return dispatch_type().erfc_(*this); } inline Tensor Tensor::exp() const { - return type().exp(*this); + return dispatch_type().exp(*this); } inline Tensor & Tensor::exp_() { - return type().exp_(*this); + return dispatch_type().exp_(*this); } inline Tensor Tensor::expm1() const { - return type().expm1(*this); + return dispatch_type().expm1(*this); } inline Tensor & Tensor::expm1_() { - return type().expm1_(*this); + return dispatch_type().expm1_(*this); } inline Tensor Tensor::expand(IntArrayRef size, bool implicit) const { - return type().expand(*this, size, implicit); + return dispatch_type().expand(*this, size, implicit); } inline Tensor Tensor::expand_as(const Tensor & other) const { - return type().expand_as(*this, other); + return dispatch_type().expand_as(*this, other); } inline Tensor Tensor::flatten(int64_t start_dim, int64_t end_dim) const { - return type().flatten(*this, start_dim, end_dim); + return dispatch_type().flatten(*this, start_dim, end_dim); } inline Tensor & Tensor::fill_(Scalar value) { - return type().fill_(*this, value); + return dispatch_type().fill_(*this, value); } inline Tensor & Tensor::fill_(const Tensor & value) { - return type().fill_(*this, value); + return dispatch_type().fill_(*this, value); } inline Tensor Tensor::floor() const { - return type().floor(*this); + return dispatch_type().floor(*this); } inline Tensor & Tensor::floor_() { - return type().floor_(*this); + return dispatch_type().floor_(*this); } inline Tensor Tensor::ger(const Tensor & vec2) const { - return type().ger(*this, vec2); + return dispatch_type().ger(*this, vec2); } inline Tensor Tensor::fft(int64_t signal_ndim, bool normalized) const { - return type().fft(*this, signal_ndim, normalized); + return dispatch_type().fft(*this, signal_ndim, normalized); } inline Tensor Tensor::ifft(int64_t signal_ndim, bool normalized) const { - return type().ifft(*this, signal_ndim, normalized); + return dispatch_type().ifft(*this, signal_ndim, normalized); } inline Tensor Tensor::rfft(int64_t signal_ndim, bool normalized, bool onesided) const { - return type().rfft(*this, signal_ndim, normalized, onesided); + return dispatch_type().rfft(*this, signal_ndim, normalized, onesided); } inline Tensor Tensor::irfft(int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) const { - return type().irfft(*this, signal_ndim, normalized, onesided, signal_sizes); + return dispatch_type().irfft(*this, signal_ndim, normalized, onesided, signal_sizes); } inline Tensor Tensor::index(TensorList indices) const { - return type().index(*this, indices); + return dispatch_type().index(*this, indices); } inline Tensor & Tensor::index_copy_(int64_t dim, const Tensor & index, const Tensor & source) { - return type().index_copy_(*this, dim, index, source); + return dispatch_type().index_copy_(*this, dim, index, source); } inline Tensor Tensor::index_copy(int64_t dim, const Tensor & index, const Tensor & source) const { - return type().index_copy(*this, dim, index, source); + return dispatch_type().index_copy(*this, dim, index, source); } inline Tensor & Tensor::index_put_(TensorList indices, const Tensor & values, bool accumulate) { - return type().index_put_(*this, indices, values, accumulate); + return dispatch_type().index_put_(*this, indices, values, accumulate); } inline Tensor Tensor::index_put(TensorList indices, const Tensor & values, bool accumulate) const { - return type().index_put(*this, indices, values, accumulate); + return dispatch_type().index_put(*this, indices, values, accumulate); } inline Tensor Tensor::inverse() const { - return type().inverse(*this); + return dispatch_type().inverse(*this); } inline Tensor Tensor::isclose(const Tensor & other, double rtol, double atol, bool equal_nan) const { - return type().isclose(*this, other, rtol, atol, equal_nan); + return dispatch_type().isclose(*this, other, rtol, atol, equal_nan); } inline bool Tensor::is_distributed() const { - return type().is_distributed(*this); + return dispatch_type().is_distributed(*this); } inline bool Tensor::is_floating_point() const { - return type().is_floating_point(*this); + return dispatch_type().is_floating_point(*this); } inline bool Tensor::is_complex() const { - return type().is_complex(*this); + return dispatch_type().is_complex(*this); } inline bool Tensor::is_nonzero() const { - return type().is_nonzero(*this); + return dispatch_type().is_nonzero(*this); } inline bool Tensor::is_same_size(const Tensor & other) const { - return type().is_same_size(*this, other); + return dispatch_type().is_same_size(*this, other); } inline bool Tensor::is_signed() const { - return type().is_signed(*this); + return dispatch_type().is_signed(*this); } inline std::tuple Tensor::kthvalue(int64_t k, int64_t dim, bool keepdim) const { - return type().kthvalue(*this, k, dim, keepdim); + return dispatch_type().kthvalue(*this, k, dim, keepdim); } inline Tensor Tensor::log() const { - return type().log(*this); + return dispatch_type().log(*this); } inline Tensor & Tensor::log_() { - return type().log_(*this); + return dispatch_type().log_(*this); } inline Tensor Tensor::log10() const { - return type().log10(*this); + return dispatch_type().log10(*this); } inline Tensor & Tensor::log10_() { - return type().log10_(*this); + return dispatch_type().log10_(*this); } inline Tensor Tensor::log1p() const { - return type().log1p(*this); + return dispatch_type().log1p(*this); } inline Tensor & Tensor::log1p_() { - return type().log1p_(*this); + return dispatch_type().log1p_(*this); } inline Tensor Tensor::log2() const { - return type().log2(*this); + return dispatch_type().log2(*this); } inline Tensor & Tensor::log2_() { - return type().log2_(*this); + return dispatch_type().log2_(*this); } inline Tensor Tensor::logdet() const { - return type().logdet(*this); + return dispatch_type().logdet(*this); } inline Tensor Tensor::log_softmax(int64_t dim, ScalarType dtype) const { - return type().log_softmax(*this, dim, dtype); + return dispatch_type().log_softmax(*this, dim, dtype); } inline Tensor Tensor::log_softmax(int64_t dim) const { - return type().log_softmax(*this, dim); + return dispatch_type().log_softmax(*this, dim); } inline Tensor Tensor::logsumexp(IntArrayRef dim, bool keepdim) const { - return type().logsumexp(*this, dim, keepdim); + return dispatch_type().logsumexp(*this, dim, keepdim); } inline Tensor Tensor::matmul(const Tensor & other) const { - return type().matmul(*this, other); + return dispatch_type().matmul(*this, other); } inline Tensor Tensor::matrix_power(int64_t n) const { - return type().matrix_power(*this, n); + return dispatch_type().matrix_power(*this, n); } inline std::tuple Tensor::max(int64_t dim, bool keepdim) const { - return type().max(*this, dim, keepdim); + return dispatch_type().max(*this, dim, keepdim); } inline Tensor Tensor::max_values(IntArrayRef dim, bool keepdim) const { - return type().max_values(*this, dim, keepdim); + return dispatch_type().max_values(*this, dim, keepdim); } inline Tensor Tensor::mean(ScalarType dtype) const { - return type().mean(*this, dtype); + return dispatch_type().mean(*this, dtype); } inline Tensor Tensor::mean() const { - return type().mean(*this); + return dispatch_type().mean(*this); } inline Tensor Tensor::mean(IntArrayRef dim, bool keepdim, ScalarType dtype) const { - return type().mean(*this, dim, keepdim, dtype); + return dispatch_type().mean(*this, dim, keepdim, dtype); } inline Tensor Tensor::mean(IntArrayRef dim, bool keepdim) const { - return type().mean(*this, dim, keepdim); + return dispatch_type().mean(*this, dim, keepdim); } inline Tensor Tensor::mean(IntArrayRef dim, ScalarType dtype) const { - return type().mean(*this, dim, dtype); + return dispatch_type().mean(*this, dim, dtype); } inline std::tuple Tensor::median(int64_t dim, bool keepdim) const { - return type().median(*this, dim, keepdim); + return dispatch_type().median(*this, dim, keepdim); } inline std::tuple Tensor::min(int64_t dim, bool keepdim) const { - return type().min(*this, dim, keepdim); + return dispatch_type().min(*this, dim, keepdim); } inline Tensor Tensor::min_values(IntArrayRef dim, bool keepdim) const { - return type().min_values(*this, dim, keepdim); + return dispatch_type().min_values(*this, dim, keepdim); } inline Tensor Tensor::mm(const Tensor & mat2) const { - return type().mm(*this, mat2); + return dispatch_type().mm(*this, mat2); } inline std::tuple Tensor::mode(int64_t dim, bool keepdim) const { - return type().mode(*this, dim, keepdim); + return dispatch_type().mode(*this, dim, keepdim); } inline Tensor Tensor::mul(const Tensor & other) const { - return type().mul(*this, other); + return dispatch_type().mul(*this, other); } inline Tensor & Tensor::mul_(const Tensor & other) { - return type().mul_(*this, other); + return dispatch_type().mul_(*this, other); } inline Tensor Tensor::mul(Scalar other) const { - return type().mul(*this, other); + return dispatch_type().mul(*this, other); } inline Tensor & Tensor::mul_(Scalar other) { - return type().mul_(*this, other); + return dispatch_type().mul_(*this, other); } inline Tensor Tensor::mv(const Tensor & vec) const { - return type().mv(*this, vec); + return dispatch_type().mv(*this, vec); } inline Tensor Tensor::mvlgamma(int64_t p) const { - return type().mvlgamma(*this, p); + return dispatch_type().mvlgamma(*this, p); } inline Tensor & Tensor::mvlgamma_(int64_t p) { - return type().mvlgamma_(*this, p); + return dispatch_type().mvlgamma_(*this, p); } inline Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) const { - return type().narrow_copy(*this, dim, start, length); + return dispatch_type().narrow_copy(*this, dim, start, length); } inline Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const { - return type().narrow(*this, dim, start, length); + return dispatch_type().narrow(*this, dim, start, length); } inline Tensor Tensor::permute(IntArrayRef dims) const { - return type().permute(*this, dims); + return dispatch_type().permute(*this, dims); } inline Tensor Tensor::pin_memory() const { - return type().pin_memory(*this); + return dispatch_type().pin_memory(*this); } inline Tensor Tensor::pinverse(double rcond) const { - return type().pinverse(*this, rcond); + return dispatch_type().pinverse(*this, rcond); } inline Tensor Tensor::repeat(IntArrayRef repeats) const { - return type().repeat(*this, repeats); + return dispatch_type().repeat(*this, repeats); } inline Tensor Tensor::reshape(IntArrayRef shape) const { - return type().reshape(*this, shape); + return dispatch_type().reshape(*this, shape); } inline Tensor Tensor::reshape_as(const Tensor & other) const { - return type().reshape_as(*this, other); + return dispatch_type().reshape_as(*this, other); } inline Tensor Tensor::round() const { - return type().round(*this); + return dispatch_type().round(*this); } inline Tensor & Tensor::round_() { - return type().round_(*this); + return dispatch_type().round_(*this); } inline Tensor Tensor::relu() const { - return type().relu(*this); + return dispatch_type().relu(*this); } inline Tensor & Tensor::relu_() { - return type().relu_(*this); + return dispatch_type().relu_(*this); } inline Tensor Tensor::prelu(const Tensor & weight) const { - return type().prelu(*this, weight); + return dispatch_type().prelu(*this, weight); } inline std::tuple Tensor::prelu_backward(const Tensor & grad_output, const Tensor & weight) const { - return type().prelu_backward(grad_output, *this, weight); + return dispatch_type().prelu_backward(grad_output, *this, weight); } inline Tensor Tensor::hardshrink(Scalar lambd) const { - return type().hardshrink(*this, lambd); + return dispatch_type().hardshrink(*this, lambd); } inline Tensor Tensor::hardshrink_backward(const Tensor & grad_out, Scalar lambd) const { - return type().hardshrink_backward(grad_out, *this, lambd); + return dispatch_type().hardshrink_backward(grad_out, *this, lambd); } inline Tensor Tensor::rsqrt() const { - return type().rsqrt(*this); + return dispatch_type().rsqrt(*this); } inline Tensor & Tensor::rsqrt_() { - return type().rsqrt_(*this); + return dispatch_type().rsqrt_(*this); } inline Tensor Tensor::select(int64_t dim, int64_t index) const { - return type().select(*this, dim, index); + return dispatch_type().select(*this, dim, index); } inline Tensor Tensor::sigmoid() const { - return type().sigmoid(*this); + return dispatch_type().sigmoid(*this); } inline Tensor & Tensor::sigmoid_() { - return type().sigmoid_(*this); + return dispatch_type().sigmoid_(*this); } inline Tensor Tensor::sin() const { - return type().sin(*this); + return dispatch_type().sin(*this); } inline Tensor & Tensor::sin_() { - return type().sin_(*this); + return dispatch_type().sin_(*this); } inline Tensor Tensor::sinh() const { - return type().sinh(*this); + return dispatch_type().sinh(*this); } inline Tensor & Tensor::sinh_() { - return type().sinh_(*this); + return dispatch_type().sinh_(*this); } inline Tensor Tensor::detach() const { - return type().detach(*this); + return dispatch_type().detach(*this); } inline Tensor & Tensor::detach_() { - return type().detach_(*this); + return dispatch_type().detach_(*this); } inline int64_t Tensor::size(int64_t dim) const { - return type().size(*this, dim); + return dispatch_type().size(*this, dim); } inline Tensor Tensor::slice(int64_t dim, int64_t start, int64_t end, int64_t step) const { - return type().slice(*this, dim, start, end, step); + return dispatch_type().slice(*this, dim, start, end, step); } inline std::tuple Tensor::slogdet() const { - return type().slogdet(*this); + return dispatch_type().slogdet(*this); } inline Tensor Tensor::smm(const Tensor & mat2) const { - return type().smm(*this, mat2); + return dispatch_type().smm(*this, mat2); } inline Tensor Tensor::softmax(int64_t dim, ScalarType dtype) const { - return type().softmax(*this, dim, dtype); + return dispatch_type().softmax(*this, dim, dtype); } inline Tensor Tensor::softmax(int64_t dim) const { - return type().softmax(*this, dim); + return dispatch_type().softmax(*this, dim); } inline std::vector Tensor::split(int64_t split_size, int64_t dim) const { - return type().split(*this, split_size, dim); + return dispatch_type().split(*this, split_size, dim); } inline std::vector Tensor::split_with_sizes(IntArrayRef split_sizes, int64_t dim) const { - return type().split_with_sizes(*this, split_sizes, dim); + return dispatch_type().split_with_sizes(*this, split_sizes, dim); } inline Tensor Tensor::squeeze() const { - return type().squeeze(*this); + return dispatch_type().squeeze(*this); } inline Tensor Tensor::squeeze(int64_t dim) const { - return type().squeeze(*this, dim); + return dispatch_type().squeeze(*this, dim); } inline Tensor & Tensor::squeeze_() { - return type().squeeze_(*this); + return dispatch_type().squeeze_(*this); } inline Tensor & Tensor::squeeze_(int64_t dim) { - return type().squeeze_(*this, dim); + return dispatch_type().squeeze_(*this, dim); } inline Tensor Tensor::sspaddmm(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { - return type().sspaddmm(*this, mat1, mat2, beta, alpha); + return dispatch_type().sspaddmm(*this, mat1, mat2, beta, alpha); } inline Tensor Tensor::stft(int64_t n_fft, c10::optional hop_length, c10::optional win_length, const Tensor & window, bool normalized, bool onesided) const { - return type().stft(*this, n_fft, hop_length, win_length, window, normalized, onesided); + return dispatch_type().stft(*this, n_fft, hop_length, win_length, window, normalized, onesided); } inline int64_t Tensor::stride(int64_t dim) const { - return type().stride(*this, dim); + return dispatch_type().stride(*this, dim); } inline Tensor Tensor::sum(ScalarType dtype) const { - return type().sum(*this, dtype); + return dispatch_type().sum(*this, dtype); } inline Tensor Tensor::sum() const { - return type().sum(*this); + return dispatch_type().sum(*this); } inline Tensor Tensor::sum(IntArrayRef dim, bool keepdim, ScalarType dtype) const { - return type().sum(*this, dim, keepdim, dtype); + return dispatch_type().sum(*this, dim, keepdim, dtype); } inline Tensor Tensor::sum(IntArrayRef dim, bool keepdim) const { - return type().sum(*this, dim, keepdim); + return dispatch_type().sum(*this, dim, keepdim); } inline Tensor Tensor::sum(IntArrayRef dim, ScalarType dtype) const { - return type().sum(*this, dim, dtype); + return dispatch_type().sum(*this, dim, dtype); } inline Tensor Tensor::sum_to_size(IntArrayRef size) const { - return type().sum_to_size(*this, size); + return dispatch_type().sum_to_size(*this, size); } inline Tensor Tensor::sqrt() const { - return type().sqrt(*this); + return dispatch_type().sqrt(*this); } inline Tensor & Tensor::sqrt_() { - return type().sqrt_(*this); + return dispatch_type().sqrt_(*this); } inline Tensor Tensor::std(bool unbiased) const { - return type().std(*this, unbiased); + return dispatch_type().std(*this, unbiased); } inline Tensor Tensor::std(IntArrayRef dim, bool unbiased, bool keepdim) const { - return type().std(*this, dim, unbiased, keepdim); + return dispatch_type().std(*this, dim, unbiased, keepdim); } inline Tensor Tensor::prod(ScalarType dtype) const { - return type().prod(*this, dtype); + return dispatch_type().prod(*this, dtype); } inline Tensor Tensor::prod() const { - return type().prod(*this); + return dispatch_type().prod(*this); } inline Tensor Tensor::prod(int64_t dim, bool keepdim, ScalarType dtype) const { - return type().prod(*this, dim, keepdim, dtype); + return dispatch_type().prod(*this, dim, keepdim, dtype); } inline Tensor Tensor::prod(int64_t dim, bool keepdim) const { - return type().prod(*this, dim, keepdim); + return dispatch_type().prod(*this, dim, keepdim); } inline Tensor Tensor::prod(int64_t dim, ScalarType dtype) const { - return type().prod(*this, dim, dtype); + return dispatch_type().prod(*this, dim, dtype); } inline Tensor Tensor::t() const { - return type().t(*this); + return dispatch_type().t(*this); } inline Tensor & Tensor::t_() { - return type().t_(*this); + return dispatch_type().t_(*this); } inline Tensor Tensor::tan() const { - return type().tan(*this); + return dispatch_type().tan(*this); } inline Tensor & Tensor::tan_() { - return type().tan_(*this); + return dispatch_type().tan_(*this); } inline Tensor Tensor::tanh() const { - return type().tanh(*this); + return dispatch_type().tanh(*this); } inline Tensor & Tensor::tanh_() { - return type().tanh_(*this); + return dispatch_type().tanh_(*this); } inline Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const { - return type().transpose(*this, dim0, dim1); + return dispatch_type().transpose(*this, dim0, dim1); } inline Tensor & Tensor::transpose_(int64_t dim0, int64_t dim1) { - return type().transpose_(*this, dim0, dim1); + return dispatch_type().transpose_(*this, dim0, dim1); } inline Tensor Tensor::flip(IntArrayRef dims) const { - return type().flip(*this, dims); + return dispatch_type().flip(*this, dims); } inline Tensor Tensor::roll(IntArrayRef shifts, IntArrayRef dims) const { - return type().roll(*this, shifts, dims); + return dispatch_type().roll(*this, shifts, dims); } inline Tensor Tensor::rot90(int64_t k, IntArrayRef dims) const { - return type().rot90(*this, k, dims); + return dispatch_type().rot90(*this, k, dims); } inline Tensor Tensor::trunc() const { - return type().trunc(*this); + return dispatch_type().trunc(*this); } inline Tensor & Tensor::trunc_() { - return type().trunc_(*this); + return dispatch_type().trunc_(*this); } inline Tensor Tensor::type_as(const Tensor & other) const { - return type().type_as(*this, other); + return dispatch_type().type_as(*this, other); } inline Tensor Tensor::unsqueeze(int64_t dim) const { - return type().unsqueeze(*this, dim); + return dispatch_type().unsqueeze(*this, dim); } inline Tensor & Tensor::unsqueeze_(int64_t dim) { - return type().unsqueeze_(*this, dim); + return dispatch_type().unsqueeze_(*this, dim); } inline Tensor Tensor::var(bool unbiased) const { - return type().var(*this, unbiased); + return dispatch_type().var(*this, unbiased); } inline Tensor Tensor::var(IntArrayRef dim, bool unbiased, bool keepdim) const { - return type().var(*this, dim, unbiased, keepdim); + return dispatch_type().var(*this, dim, unbiased, keepdim); } inline Tensor Tensor::view_as(const Tensor & other) const { - return type().view_as(*this, other); + return dispatch_type().view_as(*this, other); } inline Tensor Tensor::where(const Tensor & condition, const Tensor & other) const { - return type().where(condition, *this, other); + return dispatch_type().where(condition, *this, other); } inline Tensor Tensor::norm(c10::optional p, ScalarType dtype) const { - return type().norm(*this, p, dtype); + return dispatch_type().norm(*this, p, dtype); } inline Tensor Tensor::norm(Scalar p) const { - return type().norm(*this, p); + return dispatch_type().norm(*this, p); } inline Tensor Tensor::norm(c10::optional p, IntArrayRef dim, bool keepdim, ScalarType dtype) const { - return type().norm(*this, p, dim, keepdim, dtype); + return dispatch_type().norm(*this, p, dim, keepdim, dtype); } inline Tensor Tensor::norm(c10::optional p, IntArrayRef dim, bool keepdim) const { - return type().norm(*this, p, dim, keepdim); + return dispatch_type().norm(*this, p, dim, keepdim); } inline Tensor Tensor::clone() const { - return type().clone(*this); + return dispatch_type().clone(*this); } inline Tensor & Tensor::resize_as_(const Tensor & the_template) { - return type().resize_as_(*this, the_template); + return dispatch_type().resize_as_(*this, the_template); } inline Tensor Tensor::pow(Scalar exponent) const { - return type().pow(*this, exponent); + return dispatch_type().pow(*this, exponent); } inline Tensor & Tensor::zero_() { - return type().zero_(*this); + return dispatch_type().zero_(*this); } inline Tensor Tensor::sub(const Tensor & other, Scalar alpha) const { - return type().sub(*this, other, alpha); + return dispatch_type().sub(*this, other, alpha); } inline Tensor & Tensor::sub_(const Tensor & other, Scalar alpha) { - return type().sub_(*this, other, alpha); + return dispatch_type().sub_(*this, other, alpha); } inline Tensor Tensor::sub(Scalar other, Scalar alpha) const { - return type().sub(*this, other, alpha); + return dispatch_type().sub(*this, other, alpha); } inline Tensor & Tensor::sub_(Scalar other, Scalar alpha) { - return type().sub_(*this, other, alpha); + return dispatch_type().sub_(*this, other, alpha); } inline Tensor Tensor::addmm(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { - return type().addmm(*this, mat1, mat2, beta, alpha); + return dispatch_type().addmm(*this, mat1, mat2, beta, alpha); } inline Tensor & Tensor::addmm_(const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { - return type().addmm_(*this, mat1, mat2, beta, alpha); + return dispatch_type().addmm_(*this, mat1, mat2, beta, alpha); } inline Tensor & Tensor::sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { - return type().sparse_resize_(*this, size, sparse_dim, dense_dim); + return dispatch_type().sparse_resize_(*this, size, sparse_dim, dense_dim); } inline Tensor & Tensor::sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { - return type().sparse_resize_and_clear_(*this, size, sparse_dim, dense_dim); + return dispatch_type().sparse_resize_and_clear_(*this, size, sparse_dim, dense_dim); } inline Tensor Tensor::sparse_mask(SparseTensorRef mask) const { - return type().sparse_mask(*this, mask); + return dispatch_type().sparse_mask(*this, mask); } inline Tensor Tensor::to_dense() const { - return type().to_dense(*this); + return dispatch_type().to_dense(*this); } inline int64_t Tensor::sparse_dim() const { - return type().sparse_dim(*this); + return dispatch_type().sparse_dim(*this); } inline int64_t Tensor::_dimI() const { - return type()._dimI(*this); + return dispatch_type()._dimI(*this); } inline int64_t Tensor::dense_dim() const { - return type().dense_dim(*this); + return dispatch_type().dense_dim(*this); } inline int64_t Tensor::_dimV() const { - return type()._dimV(*this); + return dispatch_type()._dimV(*this); } inline int64_t Tensor::_nnz() const { - return type()._nnz(*this); + return dispatch_type()._nnz(*this); } inline Tensor Tensor::coalesce() const { - return type().coalesce(*this); + return dispatch_type().coalesce(*this); } inline bool Tensor::is_coalesced() const { - return type().is_coalesced(*this); + return dispatch_type().is_coalesced(*this); } inline Tensor Tensor::_indices() const { - return type()._indices(*this); + return dispatch_type()._indices(*this); } inline Tensor Tensor::_values() const { - return type()._values(*this); + return dispatch_type()._values(*this); } inline Tensor & Tensor::_coalesced_(bool coalesced) { - return type()._coalesced_(*this, coalesced); + return dispatch_type()._coalesced_(*this, coalesced); } inline Tensor Tensor::indices() const { - return type().indices(*this); + return dispatch_type().indices(*this); } inline Tensor Tensor::values() const { - return type().values(*this); + return dispatch_type().values(*this); } inline int64_t Tensor::numel() const { - return type().numel(*this); + return dispatch_type().numel(*this); } inline std::vector Tensor::unbind(int64_t dim) const { - return type().unbind(*this, dim); + return dispatch_type().unbind(*this, dim); } inline Tensor Tensor::to_sparse(int64_t sparse_dim) const { - return type().to_sparse(*this, sparse_dim); + return dispatch_type().to_sparse(*this, sparse_dim); } inline Tensor Tensor::to_sparse() const { - return type().to_sparse(*this); + return dispatch_type().to_sparse(*this); } inline Tensor Tensor::quantize_linear(double scale, int64_t zero_point) const { - return type().quantize_linear(*this, scale, zero_point); + return dispatch_type().quantize_linear(*this, scale, zero_point); } inline Tensor Tensor::dequantize() const { - return type().dequantize(*this); + return dispatch_type().dequantize(*this); } inline Scalar Tensor::q_scale() const { - return type().q_scale(*this); + return dispatch_type().q_scale(*this); } inline Scalar Tensor::q_zero_point() const { - return type().q_zero_point(*this); + return dispatch_type().q_zero_point(*this); } inline Tensor Tensor::to(const TensorOptions & options, bool non_blocking, bool copy) const { - return type().to(*this, options, non_blocking, copy); + return dispatch_type().to(*this, options, non_blocking, copy); } inline Tensor Tensor::to(Device device, ScalarType dtype, bool non_blocking, bool copy) const { - return type().to(*this, device, dtype, non_blocking, copy); + return dispatch_type().to(*this, device, dtype, non_blocking, copy); } inline Tensor Tensor::to(ScalarType dtype, bool non_blocking, bool copy) const { - return type().to(*this, dtype, non_blocking, copy); + return dispatch_type().to(*this, dtype, non_blocking, copy); } inline Tensor Tensor::to(const Tensor & other, bool non_blocking, bool copy) const { - return type().to(*this, other, non_blocking, copy); + return dispatch_type().to(*this, other, non_blocking, copy); } inline Scalar Tensor::item() const { - return type().item(*this); + return dispatch_type().item(*this); } inline void* Tensor::data_ptr() const { - return type().data_ptr(*this); + return dispatch_type().data_ptr(*this); } inline Tensor & Tensor::set_(Storage source) { - return type().set_(*this, source); + return dispatch_type().set_(*this, source); } inline Tensor & Tensor::set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride) { - return type().set_(*this, source, storage_offset, size, stride); + return dispatch_type().set_(*this, source, storage_offset, size, stride); } inline Tensor & Tensor::set_(const Tensor & source) { - return type().set_(*this, source); + return dispatch_type().set_(*this, source); } inline Tensor & Tensor::set_() { - return type().set_(*this); + return dispatch_type().set_(*this); } inline bool Tensor::is_set_to(const Tensor & tensor) const { - return type().is_set_to(*this, tensor); + return dispatch_type().is_set_to(*this, tensor); } inline Tensor & Tensor::masked_fill_(const Tensor & mask, Scalar value) { - return type().masked_fill_(*this, mask, value); + return dispatch_type().masked_fill_(*this, mask, value); } inline Tensor Tensor::masked_fill(const Tensor & mask, Scalar value) const { - return type().masked_fill(*this, mask, value); + return dispatch_type().masked_fill(*this, mask, value); } inline Tensor & Tensor::masked_fill_(const Tensor & mask, const Tensor & value) { - return type().masked_fill_(*this, mask, value); + return dispatch_type().masked_fill_(*this, mask, value); } inline Tensor Tensor::masked_fill(const Tensor & mask, const Tensor & value) const { - return type().masked_fill(*this, mask, value); + return dispatch_type().masked_fill(*this, mask, value); } inline Tensor & Tensor::masked_scatter_(const Tensor & mask, const Tensor & source) { - return type().masked_scatter_(*this, mask, source); + return dispatch_type().masked_scatter_(*this, mask, source); } inline Tensor Tensor::masked_scatter(const Tensor & mask, const Tensor & source) const { - return type().masked_scatter(*this, mask, source); + return dispatch_type().masked_scatter(*this, mask, source); } inline Tensor Tensor::view(IntArrayRef size) const { - return type().view(*this, size); + return dispatch_type().view(*this, size); } inline Tensor & Tensor::put_(const Tensor & index, const Tensor & source, bool accumulate) { - return type().put_(*this, index, source, accumulate); + return dispatch_type().put_(*this, index, source, accumulate); } inline Tensor & Tensor::index_add_(int64_t dim, const Tensor & index, const Tensor & source) { - return type().index_add_(*this, dim, index, source); + return dispatch_type().index_add_(*this, dim, index, source); } inline Tensor Tensor::index_add(int64_t dim, const Tensor & index, const Tensor & source) const { - return type().index_add(*this, dim, index, source); + return dispatch_type().index_add(*this, dim, index, source); } inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, Scalar value) { - return type().index_fill_(*this, dim, index, value); + return dispatch_type().index_fill_(*this, dim, index, value); } inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, Scalar value) const { - return type().index_fill(*this, dim, index, value); + return dispatch_type().index_fill(*this, dim, index, value); } inline Tensor & Tensor::index_fill_(int64_t dim, const Tensor & index, const Tensor & value) { - return type().index_fill_(*this, dim, index, value); + return dispatch_type().index_fill_(*this, dim, index, value); } inline Tensor Tensor::index_fill(int64_t dim, const Tensor & index, const Tensor & value) const { - return type().index_fill(*this, dim, index, value); + return dispatch_type().index_fill(*this, dim, index, value); } inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, const Tensor & src) { - return type().scatter_(*this, dim, index, src); + return dispatch_type().scatter_(*this, dim, index, src); } inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, const Tensor & src) const { - return type().scatter(*this, dim, index, src); + return dispatch_type().scatter(*this, dim, index, src); } inline Tensor & Tensor::scatter_(int64_t dim, const Tensor & index, Scalar value) { - return type().scatter_(*this, dim, index, value); + return dispatch_type().scatter_(*this, dim, index, value); } inline Tensor Tensor::scatter(int64_t dim, const Tensor & index, Scalar value) const { - return type().scatter(*this, dim, index, value); + return dispatch_type().scatter(*this, dim, index, value); } inline Tensor & Tensor::scatter_add_(int64_t dim, const Tensor & index, const Tensor & src) { - return type().scatter_add_(*this, dim, index, src); + return dispatch_type().scatter_add_(*this, dim, index, src); } inline Tensor Tensor::scatter_add(int64_t dim, const Tensor & index, const Tensor & src) const { - return type().scatter_add(*this, dim, index, src); + return dispatch_type().scatter_add(*this, dim, index, src); } inline Tensor & Tensor::lt_(Scalar other) { - return type().lt_(*this, other); + return dispatch_type().lt_(*this, other); } inline Tensor & Tensor::lt_(const Tensor & other) { - return type().lt_(*this, other); + return dispatch_type().lt_(*this, other); } inline Tensor & Tensor::gt_(Scalar other) { - return type().gt_(*this, other); + return dispatch_type().gt_(*this, other); } inline Tensor & Tensor::gt_(const Tensor & other) { - return type().gt_(*this, other); + return dispatch_type().gt_(*this, other); } inline Tensor & Tensor::le_(Scalar other) { - return type().le_(*this, other); + return dispatch_type().le_(*this, other); } inline Tensor & Tensor::le_(const Tensor & other) { - return type().le_(*this, other); + return dispatch_type().le_(*this, other); } inline Tensor & Tensor::ge_(Scalar other) { - return type().ge_(*this, other); + return dispatch_type().ge_(*this, other); } inline Tensor & Tensor::ge_(const Tensor & other) { - return type().ge_(*this, other); + return dispatch_type().ge_(*this, other); } inline Tensor & Tensor::eq_(Scalar other) { - return type().eq_(*this, other); + return dispatch_type().eq_(*this, other); } inline Tensor & Tensor::eq_(const Tensor & other) { - return type().eq_(*this, other); + return dispatch_type().eq_(*this, other); } inline Tensor & Tensor::ne_(Scalar other) { - return type().ne_(*this, other); + return dispatch_type().ne_(*this, other); } inline Tensor & Tensor::ne_(const Tensor & other) { - return type().ne_(*this, other); + return dispatch_type().ne_(*this, other); } inline Tensor Tensor::__and__(Scalar other) const { - return type().__and__(*this, other); + return dispatch_type().__and__(*this, other); } inline Tensor Tensor::__and__(const Tensor & other) const { - return type().__and__(*this, other); + return dispatch_type().__and__(*this, other); } inline Tensor & Tensor::__iand__(Scalar other) { - return type().__iand__(*this, other); + return dispatch_type().__iand__(*this, other); } inline Tensor & Tensor::__iand__(const Tensor & other) { - return type().__iand__(*this, other); + return dispatch_type().__iand__(*this, other); } inline Tensor Tensor::__or__(Scalar other) const { - return type().__or__(*this, other); + return dispatch_type().__or__(*this, other); } inline Tensor Tensor::__or__(const Tensor & other) const { - return type().__or__(*this, other); + return dispatch_type().__or__(*this, other); } inline Tensor & Tensor::__ior__(Scalar other) { - return type().__ior__(*this, other); + return dispatch_type().__ior__(*this, other); } inline Tensor & Tensor::__ior__(const Tensor & other) { - return type().__ior__(*this, other); + return dispatch_type().__ior__(*this, other); } inline Tensor Tensor::__xor__(Scalar other) const { - return type().__xor__(*this, other); + return dispatch_type().__xor__(*this, other); } inline Tensor Tensor::__xor__(const Tensor & other) const { - return type().__xor__(*this, other); + return dispatch_type().__xor__(*this, other); } inline Tensor & Tensor::__ixor__(Scalar other) { - return type().__ixor__(*this, other); + return dispatch_type().__ixor__(*this, other); } inline Tensor & Tensor::__ixor__(const Tensor & other) { - return type().__ixor__(*this, other); + return dispatch_type().__ixor__(*this, other); } inline Tensor Tensor::__lshift__(Scalar other) const { - return type().__lshift__(*this, other); + return dispatch_type().__lshift__(*this, other); } inline Tensor Tensor::__lshift__(const Tensor & other) const { - return type().__lshift__(*this, other); + return dispatch_type().__lshift__(*this, other); } inline Tensor & Tensor::__ilshift__(Scalar other) { - return type().__ilshift__(*this, other); + return dispatch_type().__ilshift__(*this, other); } inline Tensor & Tensor::__ilshift__(const Tensor & other) { - return type().__ilshift__(*this, other); + return dispatch_type().__ilshift__(*this, other); } inline Tensor Tensor::__rshift__(Scalar other) const { - return type().__rshift__(*this, other); + return dispatch_type().__rshift__(*this, other); } inline Tensor Tensor::__rshift__(const Tensor & other) const { - return type().__rshift__(*this, other); + return dispatch_type().__rshift__(*this, other); } inline Tensor & Tensor::__irshift__(Scalar other) { - return type().__irshift__(*this, other); + return dispatch_type().__irshift__(*this, other); } inline Tensor & Tensor::__irshift__(const Tensor & other) { - return type().__irshift__(*this, other); + return dispatch_type().__irshift__(*this, other); } inline Tensor & Tensor::lgamma_() { - return type().lgamma_(*this); + return dispatch_type().lgamma_(*this); } inline Tensor & Tensor::atan2_(const Tensor & other) { - return type().atan2_(*this, other); + return dispatch_type().atan2_(*this, other); } inline Tensor & Tensor::tril_(int64_t diagonal) { - return type().tril_(*this, diagonal); + return dispatch_type().tril_(*this, diagonal); } inline Tensor & Tensor::triu_(int64_t diagonal) { - return type().triu_(*this, diagonal); + return dispatch_type().triu_(*this, diagonal); } inline Tensor & Tensor::digamma_() { - return type().digamma_(*this); + return dispatch_type().digamma_(*this); } inline Tensor & Tensor::polygamma_(int64_t n) { - return type().polygamma_(*this, n); + return dispatch_type().polygamma_(*this, n); } inline Tensor & Tensor::erfinv_() { - return type().erfinv_(*this); + return dispatch_type().erfinv_(*this); } inline Tensor & Tensor::frac_() { - return type().frac_(*this); + return dispatch_type().frac_(*this); } inline Tensor & Tensor::renorm_(Scalar p, int64_t dim, Scalar maxnorm) { - return type().renorm_(*this, p, dim, maxnorm); + return dispatch_type().renorm_(*this, p, dim, maxnorm); } inline Tensor & Tensor::reciprocal_() { - return type().reciprocal_(*this); + return dispatch_type().reciprocal_(*this); } inline Tensor & Tensor::neg_() { - return type().neg_(*this); + return dispatch_type().neg_(*this); } inline Tensor & Tensor::pow_(Scalar exponent) { - return type().pow_(*this, exponent); + return dispatch_type().pow_(*this, exponent); } inline Tensor & Tensor::pow_(const Tensor & exponent) { - return type().pow_(*this, exponent); + return dispatch_type().pow_(*this, exponent); } inline Tensor & Tensor::lerp_(const Tensor & end, Scalar weight) { - return type().lerp_(*this, end, weight); + return dispatch_type().lerp_(*this, end, weight); } inline Tensor & Tensor::lerp_(const Tensor & end, const Tensor & weight) { - return type().lerp_(*this, end, weight); + return dispatch_type().lerp_(*this, end, weight); } inline Tensor & Tensor::sign_() { - return type().sign_(*this); + return dispatch_type().sign_(*this); } inline Tensor & Tensor::fmod_(Scalar other) { - return type().fmod_(*this, other); + return dispatch_type().fmod_(*this, other); } inline Tensor & Tensor::fmod_(const Tensor & other) { - return type().fmod_(*this, other); + return dispatch_type().fmod_(*this, other); } inline Tensor & Tensor::remainder_(Scalar other) { - return type().remainder_(*this, other); + return dispatch_type().remainder_(*this, other); } inline Tensor & Tensor::remainder_(const Tensor & other) { - return type().remainder_(*this, other); + return dispatch_type().remainder_(*this, other); } inline Tensor & Tensor::addbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { - return type().addbmm_(*this, batch1, batch2, beta, alpha); + return dispatch_type().addbmm_(*this, batch1, batch2, beta, alpha); } inline Tensor Tensor::addbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { - return type().addbmm(*this, batch1, batch2, beta, alpha); + return dispatch_type().addbmm(*this, batch1, batch2, beta, alpha); } inline Tensor & Tensor::addcmul_(const Tensor & tensor1, const Tensor & tensor2, Scalar value) { - return type().addcmul_(*this, tensor1, tensor2, value); + return dispatch_type().addcmul_(*this, tensor1, tensor2, value); } inline Tensor & Tensor::addcdiv_(const Tensor & tensor1, const Tensor & tensor2, Scalar value) { - return type().addcdiv_(*this, tensor1, tensor2, value); + return dispatch_type().addcdiv_(*this, tensor1, tensor2, value); } inline Tensor & Tensor::random_(int64_t from, int64_t to, Generator * generator) { - return type().random_(*this, from, to, generator); + return dispatch_type().random_(*this, from, to, generator); } inline Tensor & Tensor::random_(int64_t to, Generator * generator) { - return type().random_(*this, to, generator); + return dispatch_type().random_(*this, to, generator); } inline Tensor & Tensor::random_(Generator * generator) { - return type().random_(*this, generator); + return dispatch_type().random_(*this, generator); } inline Tensor & Tensor::uniform_(double from, double to, Generator * generator) { - return type().uniform_(*this, from, to, generator); + return dispatch_type().uniform_(*this, from, to, generator); } inline Tensor & Tensor::normal_(double mean, double std, Generator * generator) { - return type().normal_(*this, mean, std, generator); + return dispatch_type().normal_(*this, mean, std, generator); } inline Tensor & Tensor::cauchy_(double median, double sigma, Generator * generator) { - return type().cauchy_(*this, median, sigma, generator); + return dispatch_type().cauchy_(*this, median, sigma, generator); } inline Tensor & Tensor::log_normal_(double mean, double std, Generator * generator) { - return type().log_normal_(*this, mean, std, generator); + return dispatch_type().log_normal_(*this, mean, std, generator); } inline Tensor & Tensor::exponential_(double lambd, Generator * generator) { - return type().exponential_(*this, lambd, generator); + return dispatch_type().exponential_(*this, lambd, generator); } inline Tensor & Tensor::geometric_(double p, Generator * generator) { - return type().geometric_(*this, p, generator); + return dispatch_type().geometric_(*this, p, generator); } inline Tensor Tensor::diag(int64_t diagonal) const { - return type().diag(*this, diagonal); + return dispatch_type().diag(*this, diagonal); } inline Tensor Tensor::cross(const Tensor & other, c10::optional dim) const { - return type().cross(*this, other, dim); + return dispatch_type().cross(*this, other, dim); } inline Tensor Tensor::triu(int64_t diagonal) const { - return type().triu(*this, diagonal); + return dispatch_type().triu(*this, diagonal); } inline Tensor Tensor::tril(int64_t diagonal) const { - return type().tril(*this, diagonal); + return dispatch_type().tril(*this, diagonal); } inline Tensor Tensor::trace() const { - return type().trace(*this); + return dispatch_type().trace(*this); } inline Tensor Tensor::ne(Scalar other) const { - return type().ne(*this, other); + return dispatch_type().ne(*this, other); } inline Tensor Tensor::ne(const Tensor & other) const { - return type().ne(*this, other); + return dispatch_type().ne(*this, other); } inline Tensor Tensor::eq(Scalar other) const { - return type().eq(*this, other); + return dispatch_type().eq(*this, other); } inline Tensor Tensor::eq(const Tensor & other) const { - return type().eq(*this, other); + return dispatch_type().eq(*this, other); } inline Tensor Tensor::ge(Scalar other) const { - return type().ge(*this, other); + return dispatch_type().ge(*this, other); } inline Tensor Tensor::ge(const Tensor & other) const { - return type().ge(*this, other); + return dispatch_type().ge(*this, other); } inline Tensor Tensor::le(Scalar other) const { - return type().le(*this, other); + return dispatch_type().le(*this, other); } inline Tensor Tensor::le(const Tensor & other) const { - return type().le(*this, other); + return dispatch_type().le(*this, other); } inline Tensor Tensor::gt(Scalar other) const { - return type().gt(*this, other); + return dispatch_type().gt(*this, other); } inline Tensor Tensor::gt(const Tensor & other) const { - return type().gt(*this, other); + return dispatch_type().gt(*this, other); } inline Tensor Tensor::lt(Scalar other) const { - return type().lt(*this, other); + return dispatch_type().lt(*this, other); } inline Tensor Tensor::lt(const Tensor & other) const { - return type().lt(*this, other); + return dispatch_type().lt(*this, other); } inline Tensor Tensor::take(const Tensor & index) const { - return type().take(*this, index); + return dispatch_type().take(*this, index); } inline Tensor Tensor::index_select(int64_t dim, const Tensor & index) const { - return type().index_select(*this, dim, index); + return dispatch_type().index_select(*this, dim, index); } inline Tensor Tensor::masked_select(const Tensor & mask) const { - return type().masked_select(*this, mask); + return dispatch_type().masked_select(*this, mask); } inline Tensor Tensor::nonzero() const { - return type().nonzero(*this); + return dispatch_type().nonzero(*this); } inline Tensor Tensor::gather(int64_t dim, const Tensor & index, bool sparse_grad) const { - return type().gather(*this, dim, index, sparse_grad); + return dispatch_type().gather(*this, dim, index, sparse_grad); } inline Tensor Tensor::addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { - return type().addcmul(*this, tensor1, tensor2, value); + return dispatch_type().addcmul(*this, tensor1, tensor2, value); } inline Tensor Tensor::addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { - return type().addcdiv(*this, tensor1, tensor2, value); + return dispatch_type().addcdiv(*this, tensor1, tensor2, value); } inline std::tuple Tensor::gels(const Tensor & A) const { - return type().gels(*this, A); + return dispatch_type().gels(*this, A); } inline std::tuple Tensor::triangular_solve(const Tensor & A, bool upper, bool transpose, bool unitriangular) const { - return type().triangular_solve(*this, A, upper, transpose, unitriangular); + return dispatch_type().triangular_solve(*this, A, upper, transpose, unitriangular); } inline std::tuple Tensor::symeig(bool eigenvectors, bool upper) const { - return type().symeig(*this, eigenvectors, upper); + return dispatch_type().symeig(*this, eigenvectors, upper); } inline std::tuple Tensor::eig(bool eigenvectors) const { - return type().eig(*this, eigenvectors); + return dispatch_type().eig(*this, eigenvectors); } inline std::tuple Tensor::svd(bool some, bool compute_uv) const { - return type().svd(*this, some, compute_uv); + return dispatch_type().svd(*this, some, compute_uv); } inline Tensor Tensor::cholesky(bool upper) const { - return type().cholesky(*this, upper); + return dispatch_type().cholesky(*this, upper); } inline Tensor Tensor::cholesky_solve(const Tensor & input2, bool upper) const { - return type().cholesky_solve(*this, input2, upper); + return dispatch_type().cholesky_solve(*this, input2, upper); } inline std::tuple Tensor::solve(const Tensor & A) const { - return type().solve(*this, A); + return dispatch_type().solve(*this, A); } inline Tensor Tensor::potri(bool upper) const { - return type().potri(*this, upper); + return dispatch_type().potri(*this, upper); } inline std::tuple Tensor::pstrf(bool upper, Scalar tol) const { - return type().pstrf(*this, upper, tol); + return dispatch_type().pstrf(*this, upper, tol); } inline std::tuple Tensor::qr() const { - return type().qr(*this); + return dispatch_type().qr(*this); } inline std::tuple Tensor::geqrf() const { - return type().geqrf(*this); + return dispatch_type().geqrf(*this); } inline Tensor Tensor::orgqr(const Tensor & input2) const { - return type().orgqr(*this, input2); + return dispatch_type().orgqr(*this, input2); } inline Tensor Tensor::ormqr(const Tensor & input2, const Tensor & input3, bool left, bool transpose) const { - return type().ormqr(*this, input2, input3, left, transpose); + return dispatch_type().ormqr(*this, input2, input3, left, transpose); } inline Tensor Tensor::btrisolve(const Tensor & LU_data, const Tensor & LU_pivots) const { - return type().btrisolve(*this, LU_data, LU_pivots); + return dispatch_type().btrisolve(*this, LU_data, LU_pivots); } inline Tensor Tensor::multinomial(int64_t num_samples, bool replacement, Generator * generator) const { - return type().multinomial(*this, num_samples, replacement, generator); + return dispatch_type().multinomial(*this, num_samples, replacement, generator); } inline Tensor Tensor::lgamma() const { - return type().lgamma(*this); + return dispatch_type().lgamma(*this); } inline Tensor Tensor::digamma() const { - return type().digamma(*this); + return dispatch_type().digamma(*this); } inline Tensor Tensor::polygamma(int64_t n) const { - return type().polygamma(n, *this); + return dispatch_type().polygamma(n, *this); } inline Tensor Tensor::erfinv() const { - return type().erfinv(*this); + return dispatch_type().erfinv(*this); } inline Tensor Tensor::frac() const { - return type().frac(*this); + return dispatch_type().frac(*this); } inline Tensor Tensor::dist(const Tensor & other, Scalar p) const { - return type().dist(*this, other, p); + return dispatch_type().dist(*this, other, p); } inline Tensor Tensor::reciprocal() const { - return type().reciprocal(*this); + return dispatch_type().reciprocal(*this); } inline Tensor Tensor::neg() const { - return type().neg(*this); + return dispatch_type().neg(*this); } inline Tensor Tensor::atan2(const Tensor & other) const { - return type().atan2(*this, other); + return dispatch_type().atan2(*this, other); } inline Tensor Tensor::lerp(const Tensor & end, Scalar weight) const { - return type().lerp(*this, end, weight); + return dispatch_type().lerp(*this, end, weight); } inline Tensor Tensor::lerp(const Tensor & end, const Tensor & weight) const { - return type().lerp(*this, end, weight); + return dispatch_type().lerp(*this, end, weight); } inline Tensor Tensor::histc(int64_t bins, Scalar min, Scalar max) const { - return type().histc(*this, bins, min, max); + return dispatch_type().histc(*this, bins, min, max); } inline Tensor Tensor::sign() const { - return type().sign(*this); + return dispatch_type().sign(*this); } inline Tensor Tensor::fmod(Scalar other) const { - return type().fmod(*this, other); + return dispatch_type().fmod(*this, other); } inline Tensor Tensor::fmod(const Tensor & other) const { - return type().fmod(*this, other); + return dispatch_type().fmod(*this, other); } inline Tensor Tensor::remainder(Scalar other) const { - return type().remainder(*this, other); + return dispatch_type().remainder(*this, other); } inline Tensor Tensor::remainder(const Tensor & other) const { - return type().remainder(*this, other); + return dispatch_type().remainder(*this, other); } inline Tensor Tensor::min(const Tensor & other) const { - return type().min(*this, other); + return dispatch_type().min(*this, other); } inline Tensor Tensor::min() const { - return type().min(*this); + return dispatch_type().min(*this); } inline Tensor Tensor::max(const Tensor & other) const { - return type().max(*this, other); + return dispatch_type().max(*this, other); } inline Tensor Tensor::max() const { - return type().max(*this); + return dispatch_type().max(*this); } inline Tensor Tensor::median() const { - return type().median(*this); + return dispatch_type().median(*this); } inline std::tuple Tensor::sort(int64_t dim, bool descending) const { - return type().sort(*this, dim, descending); + return dispatch_type().sort(*this, dim, descending); } inline Tensor Tensor::argsort(int64_t dim, bool descending) const { - return type().argsort(*this, dim, descending); + return dispatch_type().argsort(*this, dim, descending); } inline std::tuple Tensor::topk(int64_t k, int64_t dim, bool largest, bool sorted) const { - return type().topk(*this, k, dim, largest, sorted); + return dispatch_type().topk(*this, k, dim, largest, sorted); } inline Tensor Tensor::all() const { - return type().all(*this); + return dispatch_type().all(*this); } inline Tensor Tensor::any() const { - return type().any(*this); + return dispatch_type().any(*this); } inline Tensor Tensor::renorm(Scalar p, int64_t dim, Scalar maxnorm) const { - return type().renorm(*this, p, dim, maxnorm); + return dispatch_type().renorm(*this, p, dim, maxnorm); } inline Tensor Tensor::unfold(int64_t dimension, int64_t size, int64_t step) const { - return type().unfold(*this, dimension, size, step); + return dispatch_type().unfold(*this, dimension, size, step); } inline bool Tensor::equal(const Tensor & other) const { - return type().equal(*this, other); + return dispatch_type().equal(*this, other); } inline Tensor Tensor::pow(const Tensor & exponent) const { - return type().pow(*this, exponent); + return dispatch_type().pow(*this, exponent); } inline Tensor Tensor::alias() const { - return type().alias(*this); + return dispatch_type().alias(*this); } inline bool Tensor::is_variable() const noexcept { diff --git a/aten/src/ATen/function_wrapper.py b/aten/src/ATen/function_wrapper.py index f50537b..ce585e3 100644 --- a/aten/src/ATen/function_wrapper.py +++ b/aten/src/ATen/function_wrapper.py @@ -130,7 +130,7 @@ ${return_type} ${api_name}(${method_formals_with_defaults})${const_mark}; # add non-virtual declaration to Tensor.cpp TENSOR_METHOD_DEFINITION = CodeTemplate("""\ inline ${return_type} Tensor::${api_name}(${method_formals})${const_mark} { - return type().${api_name}(${method_actuals}); + return dispatch_type().${api_name}(${method_actuals}); } """) # add a method declaration in Functions.h diff --git a/aten/src/ATen/native/BatchLinearAlgebra.cpp b/aten/src/ATen/native/BatchLinearAlgebra.cpp index 3507279..3da24a1 100644 --- a/aten/src/ATen/native/BatchLinearAlgebra.cpp +++ b/aten/src/ATen/native/BatchLinearAlgebra.cpp @@ -140,7 +140,7 @@ static void apply_solve(Tensor& b, Tensor& A, std::vector& infos) { auto n = A.size(-2); auto nrhs = b.size(-1); - auto ipiv = at::empty({n}, b.type().toScalarType(kInt)); + auto ipiv = at::empty({n}, b.options().dtype(kInt)); int info; if (b.dim() == 2) { @@ -211,7 +211,7 @@ static void apply_inverse(Tensor& self, std::vector& infos) { auto batch_size = batchCount(self); auto n = self.size(-2); - auto ipiv = at::empty({n}, self.type().toScalarType(kInt)); + auto ipiv = at::empty({n}, self.options().dtype(kInt)); int lwork; scalar_t wkopt; Tensor work; @@ -230,7 +230,7 @@ static void apply_inverse(Tensor& self, std::vector& infos) { lapackGetri(n, self_working_ptr, n, ipiv.data(), &wkopt, lwork, &info); lwork = static_cast(wkopt); - work = at::empty({lwork}, self.type()); + work = at::empty({lwork}, self.options()); // now to compute the actual inverse lapackGetri(n, self_working_ptr, n, ipiv.data(), work.data(), lwork, &info); diff --git a/aten/src/ATen/native/Indexing.cpp b/aten/src/ATen/native/Indexing.cpp index 34851f0..062d267 100644 --- a/aten/src/ATen/native/Indexing.cpp +++ b/aten/src/ATen/native/Indexing.cpp @@ -190,7 +190,6 @@ static Tensor wrapIndexOnce(const Tensor & index, int64_t dim, int64_t dim_size) static Tensor computeLinearIndex(const Tensor & src, TensorList indices) { auto strides = computeLinearStride(src); - Type& longType = src.type().toScalarType(kLong); // Compute the linear index by multiplying the indexing tensors by the // stride and summing them. All the indexing tensors have the same shape at @@ -202,7 +201,7 @@ static Tensor computeLinearIndex(const Tensor & src, TensorList indices) { if (indices[i].defined()) { // Cast index to the longType matching src's backend // This allows us to support ie indexing a cuda tensor with a cpu tensor - Tensor index = (wrapIndexOnce(indices[i], i, src.size(i)) * strides[i]).toType(longType); + Tensor index = (wrapIndexOnce(indices[i], i, src.size(i)) * strides[i]).to(kLong); if (linearIndex.defined()) { linearIndex += index; } else { @@ -220,13 +219,13 @@ static Tensor computeLinearIndex(const Tensor & src, TensorList indices) { // Compute the linear indices for the parts of the tensor not being indexed Tensor beforeIndex; if (emptyBefore > 0) { - auto index = at::arange(0, nElemBefore, longType) * strides[emptyBefore - 1]; + auto index = at::arange(0, nElemBefore, src.options().dtype(kLong)) * strides[emptyBefore - 1]; index = index.view(src.sizes().slice(0, emptyBefore)); beforeIndex = unsqueezeN(index, 0, linearIndex.dim() + emptyAfter); } Tensor afterIndex; if (emptyAfter > 0) { - auto index = at::arange(0, nElemAfter, longType); + auto index = at::arange(0, nElemAfter, src.options().dtype(kLong)); index = index.view(src.sizes().slice(src.dim() - emptyAfter, emptyAfter)); afterIndex = unsqueezeN(index, linearIndex.dim() + emptyBefore, 0); } @@ -408,7 +407,7 @@ static AdvancedIndex make_info(Tensor self, TensorList orig) { static std::unique_ptr make_index_iterator(const AdvancedIndex& info) { auto builder = TensorIterator::Builder(); builder.dont_compute_common_dtype(); - builder.add_output(Tensor(), &info.src.type()); + builder.add_output(Tensor(), &info.src.dispatch_type()); builder.add_input(info.src); for (auto& index : info.indices) { builder.add_input(index); @@ -425,7 +424,7 @@ static std::unique_ptr make_index_put_iterator(const AdvancedInd builder.dont_compute_common_dtype(); builder.dont_resize_outputs(); builder.add_output(info.src); - builder.add_input(value, &info.src.type()); + builder.add_input(value, &info.src.dispatch_type()); for (auto& index : info.indices) { builder.add_input(index); } diff --git a/aten/src/ATen/native/LegacyBridge.cpp b/aten/src/ATen/native/LegacyBridge.cpp index 62f2984..f0556f6 100644 --- a/aten/src/ATen/native/LegacyBridge.cpp +++ b/aten/src/ATen/native/LegacyBridge.cpp @@ -7,12 +7,8 @@ namespace at { namespace native { namespace { - static bool _type_has_native(const Type& dtype) { - return dtype.is_sparse(); - } - static bool _has_native(const Tensor& self) { - return _type_has_native(self.type()); + return self.is_sparse(); } } diff --git a/aten/src/ATen/native/LinearAlgebra.cpp b/aten/src/ATen/native/LinearAlgebra.cpp index 5d3c157..ee8b743 100644 --- a/aten/src/ATen/native/LinearAlgebra.cpp +++ b/aten/src/ATen/native/LinearAlgebra.cpp @@ -157,7 +157,7 @@ Tensor& ger_out(Tensor& result, const Tensor& self, const Tensor& vec2) { Tensor mm(const Tensor& self, const Tensor& mat2) { if (self.is_sparse()) { - return mat2.type().addmm(at::zeros({}, mat2.type()), self, mat2, 0, 1); + return at::zeros({}, mat2.options()).addmm(self, mat2, 0, 1); } return at::legacy::th::_th_mm(self, mat2); } @@ -368,8 +368,9 @@ Tensor dot(const Tensor& self, const Tensor& tensor) { Tensor& dot_out(Tensor& result, const Tensor& self, const Tensor& tensor) { result.resize_({}); - // dispatching through type ensures we don't allow mismatched types. - return self.type().fill_(result, self.dot(tensor)); + AT_CHECK(result.scalar_type() == self.scalar_type(), + "result dtype ", result.scalar_type(), " does not match self dtype ", self.scalar_type()); + return result.fill_(self.dot(tensor)); } /* diff --git a/aten/src/ATen/native/LossCTC.cpp b/aten/src/ATen/native/LossCTC.cpp index f6d8906..cdd7a4e 100644 --- a/aten/src/ATen/native/LossCTC.cpp +++ b/aten/src/ATen/native/LossCTC.cpp @@ -364,7 +364,7 @@ Tensor ctc_loss(const Tensor& log_probs, const Tensor& targets, IntArrayRef inpu } } if (reduction == Reduction::Mean) { - auto target_lengths_t = at::tensor(target_lengths, res.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(res.type()); + auto target_lengths_t = at::tensor(target_lengths, res.options()); return (res / target_lengths_t).mean(); } else if (reduction == Reduction::Sum) { return res.sum(); diff --git a/aten/src/ATen/native/Memory.cpp b/aten/src/ATen/native/Memory.cpp index 5717735..861b7b2 100644 --- a/aten/src/ATen/native/Memory.cpp +++ b/aten/src/ATen/native/Memory.cpp @@ -12,7 +12,7 @@ Tensor pin_memory(const Tensor& self) { AT_ERROR("cannot pin '", self.type().toString(), "' only dense CPU tensors can be pinned"); } auto* allocator = detail::getCUDAHooks().getPinnedMemoryAllocator(); - auto tensor = self.type().tensorWithAllocator(self.sizes(), self.strides(), allocator); + auto tensor = self.dispatch_type().tensorWithAllocator(self.sizes(), self.strides(), allocator); tensor.copy_(self); return tensor; } diff --git a/aten/src/ATen/native/NNPACK.cpp b/aten/src/ATen/native/NNPACK.cpp index a1ebbca..874a65b 100644 --- a/aten/src/ATen/native/NNPACK.cpp +++ b/aten/src/ATen/native/NNPACK.cpp @@ -213,10 +213,10 @@ Tensor _nnpack_spatial_convolution( auto algorithm = nnp_convolution_algorithm_auto; // All Tensors must be float Tensors - if (input.type().ID() != at::TypeID::CPUFloat || - weight.type().ID() != at::TypeID::CPUFloat || - output.type().ID() != at::TypeID::CPUFloat || - (bias.defined() && bias.type().ID() != at::TypeID::CPUFloat)) { + if (input.dispatch_type().ID() != at::TypeID::CPUFloat || + weight.dispatch_type().ID() != at::TypeID::CPUFloat || + output.dispatch_type().ID() != at::TypeID::CPUFloat || + (bias.defined() && bias.dispatch_type().ID() != at::TypeID::CPUFloat)) { throw std::runtime_error( "Mismatched Tensor types in NNPack convolutionOutput"); } diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp index 97994aa..9cb247d 100644 --- a/aten/src/ATen/native/ReduceOps.cpp +++ b/aten/src/ATen/native/ReduceOps.cpp @@ -67,7 +67,7 @@ static void allocate_reduction_result( if (result.defined()) { result.resize_(shape); } else { - result = at::empty(shape, self.type().toScalarType(dtype)); + result = at::empty(shape, self.options().dtype(dtype)); } } diff --git a/aten/src/ATen/native/TensorConversions.cpp b/aten/src/ATen/native/TensorConversions.cpp index 05c8734..bf5eb4a 100644 --- a/aten/src/ATen/native/TensorConversions.cpp +++ b/aten/src/ATen/native/TensorConversions.cpp @@ -20,8 +20,8 @@ static inline Device ensure_has_index(Device device) { } static inline Tensor to_impl(const Tensor& self, const TensorOptions& options, bool non_blocking) { - return self.type().toBackend(options.backend()).toScalarType(typeMetaToScalarType(options.dtype())) - .copy(self, non_blocking, options.device()); + return self.dispatch_type().toBackend(options.backend()).toScalarType(typeMetaToScalarType(options.dtype())) + .copy(self, non_blocking, options.device()); } Tensor to(const Tensor& self, const TensorOptions& options, bool non_blocking, bool copy) { diff --git a/aten/src/ATen/native/TensorFactories.cpp b/aten/src/ATen/native/TensorFactories.cpp index 3fc0ee2..5727caf 100644 --- a/aten/src/ATen/native/TensorFactories.cpp +++ b/aten/src/ATen/native/TensorFactories.cpp @@ -140,12 +140,12 @@ Tensor& empty_out(Tensor& result, IntArrayRef size) { // specialized operators for each datatype. // TODO: remove when we have Type support in the IR -#define DEFINE_CAST_OP(_1, n, _2) \ - Tensor _cast_##n(const Tensor& self, bool non_blocking) { \ - auto& target_type = self.type().toScalarType(ScalarType::n); \ - if (self.type() == target_type) \ - return self; \ - return target_type.copy(self, non_blocking); \ +#define DEFINE_CAST_OP(_1, n, _2) \ + Tensor _cast_##n(const Tensor& self, bool non_blocking) { \ + auto& target_type = self.dispatch_type().toScalarType(ScalarType::n); \ + if (self.dispatch_type() == target_type) \ + return self; \ + return target_type.copy(self, non_blocking); \ } AT_FORALL_SCALAR_TYPES_AND_BOOL_EXCEPT_QINT(DEFINE_CAST_OP) diff --git a/aten/src/ATen/native/TensorIterator.h b/aten/src/ATen/native/TensorIterator.h index f3510ab..affcade 100644 --- a/aten/src/ATen/native/TensorIterator.h +++ b/aten/src/ATen/native/TensorIterator.h @@ -69,7 +69,7 @@ struct CAFFE2_API OperandInfo { OperandInfo(const Tensor& t, const Type* type=nullptr) : tensor(t), type(const_cast(type)) { if (t.defined() && !type) { - this->type = &t.type(); + this->type = &t.dispatch_type(); } } diff --git a/aten/src/ATen/native/TensorIteratorReduce.cpp b/aten/src/ATen/native/TensorIteratorReduce.cpp index 5ec8f98..f6d2028 100644 --- a/aten/src/ATen/native/TensorIteratorReduce.cpp +++ b/aten/src/ATen/native/TensorIteratorReduce.cpp @@ -35,7 +35,7 @@ static void two_pass_reduction(TensorIterator& iter, const loop2d_t& loop) { auto& dst = iter.tensor(0); auto buffer_shape = DimVector(dst.sizes()); buffer_shape.insert(buffer_shape.begin(), max_threads); - auto buffer = at::empty(buffer_shape, dst.type()); + auto buffer = at::empty(buffer_shape, dst.options()); std::unique_ptr written(new bool[max_threads]); std::fill(written.get(), written.get() + max_threads, false); diff --git a/aten/src/ATen/native/TypeProperties.cpp b/aten/src/ATen/native/TypeProperties.cpp index 9ed7648..c2cae17 100644 --- a/aten/src/ATen/native/TypeProperties.cpp +++ b/aten/src/ATen/native/TypeProperties.cpp @@ -10,7 +10,7 @@ bool is_cuda(const Tensor& self) { } bool is_distributed(const Tensor& self) { - return self.type().is_distributed(); + return self.dispatch_type().is_distributed(); } bool is_complex(const Tensor& self) { @@ -35,7 +35,7 @@ bool is_sparse(const Tensor& self) { } Tensor type_as(const Tensor& self, const Tensor& other) { - return self.toType(other.type()); + return self.toType(other.dispatch_type()); } }} // namespace at::native diff --git a/aten/src/ATen/native/cuda/Distributions.cu b/aten/src/ATen/native/cuda/Distributions.cu index 4b0f6f0..fb72346 100644 --- a/aten/src/ATen/native/cuda/Distributions.cu +++ b/aten/src/ATen/native/cuda/Distributions.cu @@ -237,7 +237,6 @@ Tensor& bernoulli_tensor_cuda_(Tensor &self, const Tensor& p_, Generator* gen) { auto p = std::get<0>(expand_inplace(self, p_.to(kCUDA))); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, self.scalar_type(), "bernoulli_tensor_cuda_self_", [&] { - const at::Type& p_type = p.type(); using self_t = scalar_t; auto seeds = next_philox_seed(gen, 10); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, p.scalar_type(), "bernoulli_tensor_cuda_p_", [&] { diff --git a/aten/src/ATen/native/cuda/LossCTC.cu b/aten/src/ATen/native/cuda/LossCTC.cu index 547dd6c..9fe9697 100644 --- a/aten/src/ATen/native/cuda/LossCTC.cu +++ b/aten/src/ATen/native/cuda/LossCTC.cu @@ -164,12 +164,11 @@ ctc_loss_log_alpha_gpu_kernel(scalar_t* __restrict__ log_alpha_data, // We return log_alpha (currently, might change to (log_alpha+log_beta) to be passed to the // backward. The dispatch function will only return the loss. template -std::tuple ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets_, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) { +std::tuple ctc_loss_gpu_template(const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t BLANK) { // log_probs: input_len x batch_size x num_labels // targets [int64]: batch_size x target_length OR sum(target_lengths) CheckedFrom c = "ctc_loss_gpu"; using target_t = typename std::conditional::type; - auto targets = targets_.toType(log_probs.type().toScalarType(target_scalar_type)); // to log_probs cuda if it isn't there already auto log_probs_arg = TensorArg(log_probs, "log_probs", 1); auto targets_arg = TensorArg(targets, "targets", 2); checkAllSameGPU(c, {log_probs_arg, targets_arg}); @@ -225,7 +224,7 @@ std::tuple ctc_loss_gpu_template(const Tensor& log_probs, const auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong)); auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong)); - tg_batch_offsets = tg_batch_offsets.toType(targets.type().toScalarType(kLong)); + tg_batch_offsets = tg_batch_offsets.cuda(); Tensor log_alpha = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options()); Tensor neg_log_likelihood = at::empty({batch_size}, log_probs.options()); @@ -481,11 +480,10 @@ ctc_loss_backward_collect_gpu_kernel(scalar_t* __restrict__ gradient_data, // The backward. It essentially computes eq 16 by using the above kernels. // We don't do a lot of checking as we envision this to be called only when backpropagating through a (well-checked) forward. template -Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets_, IntArrayRef input_lengths, IntArrayRef target_lengths, +Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_probs, const Tensor& targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor& neg_log_likelihood, const Tensor& log_alpha, int64_t BLANK, bool zero_infinity) { constexpr scalar_t neginf = -INFINITY; using target_t = typename std::conditional::type; - auto targets = targets_.toType(log_probs.type().toScalarType(target_scalar_type)); // to cuda if it isn't there already int64_t batch_size = log_probs.size(1); int64_t num_labels = log_probs.size(2); int64_t lp_input_stride = log_probs.stride(0); @@ -515,9 +513,9 @@ Tensor ctc_loss_backward_gpu_template(const Tensor& grad_out, const Tensor& log_ tg_target_stride = targets.stride(1); max_target_length = targets.size(1); } - auto target_lengths_t = at::tensor(target_lengths, targets.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(targets.type().toScalarType(kLong)); - auto input_lengths_t = at::tensor(input_lengths, targets.options().device(at::Device(at::Device::Type::CPU)).dtype(kLong)).toType(targets.type().toScalarType(kLong)); - tg_batch_offsets = tg_batch_offsets.toType(targets.type().toScalarType(kLong)); + auto target_lengths_t = at::tensor(target_lengths, targets.options().dtype(kLong)); + auto input_lengths_t = at::tensor(input_lengths, targets.options().dtype(kLong)); + tg_batch_offsets = tg_batch_offsets.cuda(); Tensor log_beta = at::empty({batch_size, log_probs.size(0), 2*max_target_length+1}, log_probs.options()); Tensor grad = at::full_like(log_probs, neginf); // initialization for log(sum (alpha beta)) diff --git a/aten/src/ATen/native/cuda/MiscUtils.h b/aten/src/ATen/native/cuda/MiscUtils.h index e9dedc7..4cc1c4f 100644 --- a/aten/src/ATen/native/cuda/MiscUtils.h +++ b/aten/src/ATen/native/cuda/MiscUtils.h @@ -57,9 +57,9 @@ template static inline Storage pin_memory(int64_t size, Tensor dummy) { int64_t adjusted_size = size * sizeof(T); auto* allocator = cuda::getPinnedMemoryAllocator(); - auto& backend = dummy.type().toBackend(Backend::CPU).toScalarType(kByte); + auto& backend = dummy.dispatch_type().toBackend(Backend::CPU).toScalarType(kByte); return backend.storageWithAllocator(adjusted_size, allocator); } - + } // namespace native } // namespace at diff --git a/aten/src/ATen/templates/Tensor.h b/aten/src/ATen/templates/Tensor.h index 091450e..b1e917a 100644 --- a/aten/src/ATen/templates/Tensor.h +++ b/aten/src/ATen/templates/Tensor.h @@ -13,6 +13,7 @@ #include #include #include +#include namespace c10{ struct TensorOptions; @@ -196,7 +197,11 @@ class CAFFE2_API Tensor { return impl_->itemsize(); } - Type & type() const { + DeprecatedTypeProperties & type() const { + return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( + tensorTypeIdToBackend(type_id()), scalar_type()); + } + Type & dispatch_type() const { return legacyTensorType(*impl_); } TensorTypeId type_id() const { diff --git a/aten/src/ATen/templates/TensorMethods.h b/aten/src/ATen/templates/TensorMethods.h index bbaa19b..5928907 100644 --- a/aten/src/ATen/templates/TensorMethods.h +++ b/aten/src/ATen/templates/TensorMethods.h @@ -10,33 +10,33 @@ namespace at { inline Tensor Tensor::toType(const Type & t, bool non_blocking) const { - if(type() == t) + if(dispatch_type() == t) return *this; return t.copy(*this, non_blocking); } inline Tensor Tensor::cpu() const { - return toType(type().cpu()); + return toType(dispatch_type().cpu()); } inline Tensor Tensor::cuda() const { - return toType(type().cuda()); + return toType(dispatch_type().cuda()); } inline Tensor Tensor::hip() const { - return toType(type().hip()); + return toType(dispatch_type().hip()); } inline Tensor & Tensor::copy_(const Tensor & src, bool non_blocking) { - return type().copy_(*this, src, non_blocking); + return dispatch_type().copy_(*this, src, non_blocking); } inline Tensor Tensor::toType(ScalarType t) const { - return toType(type().toScalarType(t)); + return toType(dispatch_type().toScalarType(t)); } inline Tensor Tensor::toBackend(Backend b) const { - return toType(type().toBackend(b)); + return toType(dispatch_type().toBackend(b)); } inline TensorOptions Tensor::options() const { @@ -50,11 +50,11 @@ inline void Tensor::backward( c10::optional gradient, bool keep_graph, bool create_graph) { - type().backward(*this, std::move(gradient), keep_graph, create_graph); + dispatch_type().backward(*this, std::move(gradient), keep_graph, create_graph); } inline void Tensor::set_data(Tensor new_data) { - type().set_data(*this, new_data); + dispatch_type().set_data(*this, new_data); } // all static inline to allow for inlining of the non-dynamic part of dispatch diff --git a/aten/src/ATen/test/cuda_tensor_interop_test.cpp b/aten/src/ATen/test/cuda_tensor_interop_test.cpp index 9877fd2..f206a93 100644 --- a/aten/src/ATen/test/cuda_tensor_interop_test.cpp +++ b/aten/src/ATen/test/cuda_tensor_interop_test.cpp @@ -30,7 +30,7 @@ TEST(CUDACaffe2ToPytorch, SimpleLegacy) { caffe2::math::Set(16, 777, data, &context); } at::Tensor at_tensor(c2_tensor); - ASSERT_TRUE(&at_tensor.type() != nullptr); + ASSERT_TRUE(&at_tensor.dispatch_type() != nullptr); ASSERT_TRUE(at_tensor.is_cuda()); auto at_cpu = at_tensor.cpu(); @@ -50,7 +50,7 @@ TEST(CUDACaffe2ToPytorch, Simple) { caffe2::math::Set(16, 777, data, &context); } at::Tensor at_tensor(c2_tensor); - ASSERT_TRUE(&at_tensor.type() != nullptr); + ASSERT_TRUE(&at_tensor.dispatch_type() != nullptr); ASSERT_TRUE(at_tensor.is_cuda()); auto at_cpu = at_tensor.cpu(); diff --git a/aten/src/ATen/test/scalar_test.cpp b/aten/src/ATen/test/scalar_test.cpp index 24c4da8..08b4bdf 100644 --- a/aten/src/ATen/test/scalar_test.cpp +++ b/aten/src/ATen/test/scalar_test.cpp @@ -19,7 +19,7 @@ struct Foo { static void apply(Tensor a, Tensor b) { scalar_type s = 1; std::stringstream ss; - ss << "hello, dispatch: " << a.type().toString() << s << "\n"; + ss << "hello, dispatch: " << a.dispatch_type().toString() << s << "\n"; auto data = (scalar_type*)a.data_ptr(); (void)data; } @@ -105,7 +105,7 @@ TEST(TestScalar, TestScalar) { scalar_t s = 1; std::stringstream ss; ASSERT_NO_THROW( - ss << "hello, dispatch" << x.type().toString() << s << "\n"); + ss << "hello, dispatch" << x.dispatch_type().toString() << s << "\n"); auto data = (scalar_t*)x.data_ptr(); (void)data; }); diff --git a/aten/src/ATen/test/tensor_interop_test.cpp b/aten/src/ATen/test/tensor_interop_test.cpp index 495fe0f..5cdd0e4 100644 --- a/aten/src/ATen/test/tensor_interop_test.cpp +++ b/aten/src/ATen/test/tensor_interop_test.cpp @@ -12,7 +12,7 @@ TEST(Caffe2ToPytorch, SimpleLegacy) { data[i] = i; } at::Tensor at_tensor(c2_tensor); - ASSERT_TRUE(&at_tensor.type() != nullptr); + ASSERT_TRUE(&at_tensor.dispatch_type() != nullptr); auto it = at_tensor.data(); for (int64_t i = 0; i < 16; i++) { @@ -27,7 +27,7 @@ TEST(Caffe2ToPytorch, Simple) { data[i] = i; } at::Tensor at_tensor(c2_tensor); - ASSERT_TRUE(&at_tensor.type() != nullptr); + ASSERT_TRUE(&at_tensor.dispatch_type() != nullptr); auto it = at_tensor.data(); for (int64_t i = 0; i < 16; i++) { diff --git a/aten/src/ATen/test/undefined_tensor_test.cpp b/aten/src/ATen/test/undefined_tensor_test.cpp index 9c9c42c..5a3c926 100644 --- a/aten/src/ATen/test/undefined_tensor_test.cpp +++ b/aten/src/ATen/test/undefined_tensor_test.cpp @@ -27,9 +27,9 @@ TEST(TestUndefined, UndefinedTest) { ASSERT_ANY_THROW(und.add(5)); ASSERT_ANY_THROW(und.mm(und)); - und.toType(und.type()); - ASSERT_ANY_THROW(und.toType(ft.type())); - ASSERT_ANY_THROW(ft.toType(und.type())); + und.toType(und.dispatch_type()); + ASSERT_ANY_THROW(und.toType(ft.dispatch_type())); + ASSERT_ANY_THROW(ft.toType(und.dispatch_type())); und.toType(ScalarType::Undefined); ASSERT_ANY_THROW(und.toType(ScalarType::Float)); ASSERT_ANY_THROW(ft.toType(ScalarType::Undefined)); diff --git a/test/test_nn.py b/test/test_nn.py index 2ba7167..bd4b18b 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -4619,7 +4619,7 @@ class TestNN(NNTestCase): def test_CTCLoss_zero_infinity(self): target_lengths = [60, 25, 20] input_lengths = [50, 50, 50] - targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int) + targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int, device='cuda') log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_() res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='sum', zero_infinity=True) diff --git a/tools/autograd/derivatives.yaml b/tools/autograd/derivatives.yaml index 8e9fd2d..815fea3 100644 --- a/tools/autograd/derivatives.yaml +++ b/tools/autograd/derivatives.yaml @@ -753,10 +753,10 @@ self: index_select_backward(grad, dim, indices, self.sizes(), true) - name: split(Tensor self, int64_t split_size, int64_t dim) - self: split_backward(grads, split_size, dim, self.sizes(), self.type()) + self: split_backward(grads, split_size, dim, self.sizes(), self.options()) - name: split_with_sizes(Tensor self, IntArrayRef split_sizes, int64_t dim) - self: split_with_sizes_backward(grads, split_sizes, dim, self.sizes(), self.type()) + self: split_with_sizes_backward(grads, split_sizes, dim, self.sizes(), self.options()) - name: sqrt(Tensor self) self: grad / (2 * result) @@ -1199,7 +1199,7 @@ - name: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output) grad_output: elu_backward(grad, alpha, scale, input_scale, output) - output: grad * grad_output * input_scale * (output < 0).toType(grad.type()) + output: grad * grad_output * input_scale * (output < 0).type_as(grad) - name: fractional_max_pool2d_backward(Tensor grad_output, Tensor self, IntArrayRef kernel_size, IntArrayRef output_size, Tensor indices) grad_output: max_pool_double_backward(grad, indices, 2) diff --git a/tools/autograd/templates/Functions.cpp b/tools/autograd/templates/Functions.cpp index 4736ca2..68012b0 100644 --- a/tools/autograd/templates/Functions.cpp +++ b/tools/autograd/templates/Functions.cpp @@ -100,7 +100,7 @@ Tensor norm_backward(const Tensor & grad, const Tensor & self, const optional &grads, - IntArrayRef split_sizes, int64_t dim, IntArrayRef sizes, const Type &type) { + IntArrayRef split_sizes, int64_t dim, IntArrayRef sizes, const at::TensorOptions &options) { dim = at::maybe_wrap_dim(dim, sizes.size()); // it's possible some of the grads are not defined (represents tensors of all 0s). @@ -724,7 +724,7 @@ Tensor split_with_sizes_backward(const std::vector &g auto length = split_sizes[j]; auto grad_size = sizes.vec(); grad_size[dim] = length; - grads_all_defined[j] = at::zeros(grad_size, type); + grads_all_defined[j] = at::zeros(grad_size, options); } } @@ -733,13 +733,13 @@ Tensor split_with_sizes_backward(const std::vector &g } Tensor split_backward(const std::vector &grads, - int64_t split_size, int64_t dim, IntArrayRef sizes, const Type &type) { + int64_t split_size, int64_t dim, IntArrayRef sizes, const at::TensorOptions &options) { dim = at::maybe_wrap_dim(dim, sizes.size()); int64_t dim_size = sizes[dim]; int64_t num_splits = grads.size(); std::vector split_sizes(num_splits, split_size); split_sizes[num_splits - 1] = split_size - (split_size * num_splits - dim_size); - return split_with_sizes_backward(grads, split_sizes, dim, sizes, type); + return split_with_sizes_backward(grads, split_sizes, dim, sizes, options); } Tensor max_pool_double_backward(const Tensor & grad, const Tensor & indices, int dim) { @@ -857,7 +857,7 @@ Tensor l1_loss_double_backward_grad_output(const Tensor & grad, const Tensor & i Tensor smooth_l1_loss_double_backward(const Tensor & grad, const Tensor & input, const Tensor & target, int64_t reduction) { auto d = (input - target).abs(); - auto grad_input = grad * (d < 1).toType(grad.type()); + auto grad_input = grad * (d < 1).type_as(grad); if (reduction == Reduction::Mean) { grad_input /= input.numel(); } @@ -930,7 +930,7 @@ Tensor soft_margin_loss_double_backward_grad_output(const Tensor & grad, const T Tensor softplus_double_backward(const Tensor & grad, const Tensor & input, Scalar beta, Scalar threshold) { auto x = (input * beta); - return sigmoid_backward(grad, x.sigmoid()) * (x < threshold).toType(grad.type()) * beta; + return sigmoid_backward(grad, x.sigmoid()) * (x < threshold).type_as(grad) * beta; } diff --git a/tools/autograd/templates/Functions.h b/tools/autograd/templates/Functions.h index 6b032dd..99b6ab3 100644 --- a/tools/autograd/templates/Functions.h +++ b/tools/autograd/templates/Functions.h @@ -35,7 +35,7 @@ struct TypeAndSize { /* implicit */ TypeAndSize(const Tensor & t) : sizes(t.sizes().vec()) - , type(&t.type()) {} + , type(&t.dispatch_type()) {} Tensor zeros() { return at::zeros(sizes, *type); } diff --git a/tools/autograd/templates/python_torch_functions.cpp b/tools/autograd/templates/python_torch_functions.cpp index 49110c6..2c5bbee 100644 --- a/tools/autograd/templates/python_torch_functions.cpp +++ b/tools/autograd/templates/python_torch_functions.cpp @@ -52,9 +52,9 @@ static void check_out_type_matches(Tensor result, } auto scalarType_arg = scalarType_is_none ? result.scalar_type() : scalarType; auto layout_arg = layout_is_none ? *torch::getLayout(result.type().backend()) : layout; - auto device_type_arg = device_is_none ? torch::getDeviceType(result.type()) : device.type(); + auto device_type_arg = device_is_none ? result.device().type() : device.type(); const auto& type = torch::getVariableType(scalarType_arg, layout_arg, device_type_arg); - if (result.type() != type) { + if (result.dispatch_type() != type) { AT_ERROR( "type corresponding to ", type.toString(), " does not match type of out parameter (", result.type().toString(), ")"); diff --git a/tools/autograd/templates/python_variable_methods.cpp b/tools/autograd/templates/python_variable_methods.cpp index 0f043a6..34943d5 100644 --- a/tools/autograd/templates/python_variable_methods.cpp +++ b/tools/autograd/templates/python_variable_methods.cpp @@ -496,7 +496,7 @@ static PyObject * THPVariable_new(PyObject* self, PyObject* args, PyObject* kwar HANDLE_TH_ERRORS auto& self_ = reinterpret_cast(self)->cdata; OptionalDeviceGuard device_guard(device_of(self_)); - return THPVariable_Wrap(torch::utils::legacy_tensor_new(self_.type(), args, kwargs)); + return THPVariable_Wrap(torch::utils::legacy_tensor_new(self_.dispatch_type(), args, kwargs)); END_HANDLE_TH_ERRORS } @@ -506,7 +506,7 @@ static PyObject * THPVariable_new_empty(PyObject* self, PyObject* args, PyObject jit::tracer::warn("new_empty", jit::tracer::LEGACY_CONSTRUCTOR); auto& self_ = reinterpret_cast(self)->cdata; OptionalDeviceGuard device_guard(device_of(self_)); - return THPVariable_Wrap(torch::utils::new_empty(self_.type(), args, kwargs)); + return THPVariable_Wrap(torch::utils::new_empty(self_.dispatch_type(), args, kwargs)); END_HANDLE_TH_ERRORS } @@ -516,7 +516,7 @@ static PyObject * THPVariable_new_full(PyObject* self, PyObject* args, PyObject* jit::tracer::warn("new_full", jit::tracer::LEGACY_CONSTRUCTOR); auto& self_ = reinterpret_cast(self)->cdata; OptionalDeviceGuard device_guard(device_of(self_)); - return THPVariable_Wrap(torch::utils::new_full(self_.type(), args, kwargs)); + return THPVariable_Wrap(torch::utils::new_full(self_.dispatch_type(), args, kwargs)); END_HANDLE_TH_ERRORS } @@ -526,7 +526,7 @@ static PyObject * THPVariable_new_ones(PyObject* self, PyObject* args, PyObject* jit::tracer::warn("new_ones", jit::tracer::LEGACY_CONSTRUCTOR); auto& self_ = reinterpret_cast(self)->cdata; OptionalDeviceGuard device_guard(device_of(self_)); - return THPVariable_Wrap(torch::utils::new_ones(self_.type(), args, kwargs)); + return THPVariable_Wrap(torch::utils::new_ones(self_.dispatch_type(), args, kwargs)); END_HANDLE_TH_ERRORS } @@ -536,7 +536,7 @@ static PyObject * THPVariable_new_tensor(PyObject* self, PyObject* args, PyObjec jit::tracer::warn("new_tensor", jit::tracer::LEGACY_CONSTRUCTOR); auto& self_ = reinterpret_cast(self)->cdata; OptionalDeviceGuard device_guard(device_of(self_)); - return THPVariable_Wrap(torch::utils::new_tensor(self_.type(), args, kwargs)); + return THPVariable_Wrap(torch::utils::new_tensor(self_.dispatch_type(), args, kwargs)); END_HANDLE_TH_ERRORS } @@ -546,7 +546,7 @@ static PyObject * THPVariable_new_zeros(PyObject* self, PyObject* args, PyObject jit::tracer::warn("new_zeros", jit::tracer::LEGACY_CONSTRUCTOR); auto& self_ = reinterpret_cast(self)->cdata; OptionalDeviceGuard device_guard(device_of(self_)); - return THPVariable_Wrap(torch::utils::new_zeros(self_.type(), args, kwargs)); + return THPVariable_Wrap(torch::utils::new_zeros(self_.dispatch_type(), args, kwargs)); END_HANDLE_TH_ERRORS } @@ -615,7 +615,7 @@ static PyObject * THPVariable_type(PyObject* self, PyObject* args, PyObject* kwa ParsedArgs<2> parsed_args; auto r = parser.parse(args, kwargs, parsed_args); if (r.isNone(0)) { - return THPUtils_packString(torch::utils::type_to_string(self_.type())); + return THPUtils_packString(torch::utils::type_to_string(self_.dispatch_type())); } auto obj = r.pyobject(0); std::string type_name; diff --git a/torch/csrc/DynamicTypes.cpp b/torch/csrc/DynamicTypes.cpp index 64d9c76..e984461 100644 --- a/torch/csrc/DynamicTypes.cpp +++ b/torch/csrc/DynamicTypes.cpp @@ -135,10 +135,6 @@ THPLayout* getLayout(at::Backend backend) { return layout; } -at::Device::Type getDeviceType(const at::Type& type) { - return type.is_cuda() ? at::Device::Type::CUDA : at::Device::Type::CPU; -} - PyObject* createPyObject(const at::Storage& storage) { auto type = getPyTypeObject(storage); diff --git a/torch/csrc/DynamicTypes.h b/torch/csrc/DynamicTypes.h index 5aee862..1f3c4bc 100644 --- a/torch/csrc/DynamicTypes.h +++ b/torch/csrc/DynamicTypes.h @@ -38,5 +38,4 @@ bool isStorage(PyObject* obj); THPDtype* getDtype(at::ScalarType scalarType); THPLayout* getLayout(at::Backend backend); at::Type& getVariableType(at::ScalarType scalarType, const THPLayout& layout, const at::Device& device); -at::Device::Type getDeviceType(const at::Type& type); } // namespace torch diff --git a/torch/csrc/Generator.cpp b/torch/csrc/Generator.cpp index 50c321f..0c166ac 100644 --- a/torch/csrc/Generator.cpp +++ b/torch/csrc/Generator.cpp @@ -84,7 +84,7 @@ static PyObject * THPGenerator_setState(THPGenerator *self, PyObject *_new_state auto& tensor = ((THPVariable*)_new_state)->cdata.data(); auto& tensor_type = at::globalContext().getNonVariableType(tensor.type().backend(), tensor.scalar_type()); if (tensor_type != CPU(kByte)) { - auto type_name = torch::utils::type_to_string(tensor_type); + auto type_name = torch::utils::type_to_string(tensor.dispatch_type()); throw TypeError("expected a torch.ByteTensor, but got %s", type_name.c_str()); } THGenerator *generator = THPGenerator_TH_CData(self); diff --git a/torch/csrc/autograd/VariableTypeManual.cpp b/torch/csrc/autograd/VariableTypeManual.cpp index 640a8d5..27d6fe4 100644 --- a/torch/csrc/autograd/VariableTypeManual.cpp +++ b/torch/csrc/autograd/VariableTypeManual.cpp @@ -174,7 +174,7 @@ const Variable & VariableType::checked_cast_variable(const Tensor & t, const cha AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor for argument #", pos, " '", name, "'"); } if (!t.is_variable()) { - AT_ERROR("Expected object of type Variable but found type ", t.type().toString(), " for argument #", pos, " '", name, "'"); + AT_ERROR("Expected object of type Variable but found type ", t.dispatch_type().toString(), " for argument #", pos, " '", name, "'"); } return as_variable_ref(t); } @@ -184,7 +184,7 @@ Variable & VariableType::checked_cast_variable(Tensor & t, const char * name, in AT_ERROR("Expected a Tensor of type Variable but found an undefined Tensor for argument #", pos, " '", name, "'"); } if (!t.is_variable()) { - AT_ERROR("Expected object of type Variable but found type ", t.type().toString(), " for argument #", pos, " '", name, "'"); + AT_ERROR("Expected object of type Variable but found type ", t.dispatch_type().toString(), " for argument #", pos, " '", name, "'"); } return as_variable_ref(t); } @@ -215,8 +215,8 @@ std::vector VariableType::unpack(at::TensorList tl, const char *name if (!t.defined()) { continue; } - if (!isVariableType(t.type())) { - AT_ERROR("Expected object of type Variable but found type ", t.type().toString(), " at position #", i, " " + if (!isVariableType(t.dispatch_type())) { + AT_ERROR("Expected object of type Variable but found type ", t.dispatch_type().toString(), " at position #", i, " " "for iterable argument #", pos, " '", name, "'"); } ret[i] = static_cast(t).data(); @@ -266,7 +266,7 @@ Tensor & VariableType::s_copy_(Tensor & self, const Tensor & src, bool non_block if (requires_grad) { grad_fn = std::make_shared(); grad_fn->set_next_edges(collect_next_edges(self, src)); - grad_fn->src_type = &src.type(); + grad_fn->src_type = &src.dispatch_type(); grad_fn->src_device = src.device(); } { diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp index 35b4f89..8882edc 100644 --- a/torch/csrc/autograd/engine.cpp +++ b/torch/csrc/autograd/engine.cpp @@ -371,7 +371,7 @@ static void validate_outputs(const edge_list& edges, variable_list& grads, const } grads[i] = at::sum_to(std::move(grads[i]), metadata.shape()); } - if (!is_compatible_type(metadata.type(), grads[i].type())) { + if (!is_compatible_type(metadata.type(), grads[i].dispatch_type())) { std::stringstream ss; ss << "invalid gradient at index " << i << " - expected type "; ss << metadata.type() << " but got " << grads[i].type(); diff --git a/torch/csrc/autograd/function.h b/torch/csrc/autograd/function.h index 3462e02..7c73893 100644 --- a/torch/csrc/autograd/function.h +++ b/torch/csrc/autograd/function.h @@ -360,7 +360,7 @@ struct MakeNextFunctionList : IterArgs { /// `input_nr` thus equal to `function->num_inputs()`. Additionally, it /// increments the `Function`'s number of inputs by one. Approximately /// equivalent to `variable.set_gradient_edge(function, -/// function->add_input_metadata(variable.type(), variable.sizes()))`. +/// function->add_input_metadata(variable.dispatch_type(), variable.sizes()))`. /// If you don't want the `Function`'s `num_inputs` to be incremented, use /// `set_gradient_edge` directly. inline void create_gradient_edge( diff --git a/torch/csrc/autograd/input_metadata.h b/torch/csrc/autograd/input_metadata.h index c0ec3a3..df767de 100644 --- a/torch/csrc/autograd/input_metadata.h +++ b/torch/csrc/autograd/input_metadata.h @@ -16,7 +16,7 @@ struct InputMetadata { : type_{&type} , shape_{shape}, device_{device} { } InputMetadata(const at::Tensor& t) - : InputMetadata(t.type(), t.sizes(), t.device()) { } + : InputMetadata(t.dispatch_type(), t.sizes(), t.device()) { } bool is_valid() const { return type_ != nullptr; diff --git a/torch/csrc/autograd/python_function.cpp b/torch/csrc/autograd/python_function.cpp index c105533..81d7871 100644 --- a/torch/csrc/autograd/python_function.cpp +++ b/torch/csrc/autograd/python_function.cpp @@ -44,7 +44,7 @@ PyObject *THPFunctionClass = nullptr; namespace torch { namespace autograd { VariableInfo::VariableInfo(const Variable& var) - : type(&var.type()) + : type(&var.dispatch_type()) , device(var.device()) , size(var.sizes().vec()) , requires_grad(var.requires_grad()) { diff --git a/torch/csrc/autograd/python_hook.cpp b/torch/csrc/autograd/python_hook.cpp index b3763e4..f268a56 100644 --- a/torch/csrc/autograd/python_hook.cpp +++ b/torch/csrc/autograd/python_hook.cpp @@ -160,7 +160,7 @@ static void check_single_result(PyObject* _original, PyObject* _result, PyObject auto& original = ((THPVariable*)_original)->cdata.data(); auto& result = ((THPVariable*)_result)->cdata.data(); - if (original.type().ID() != result.type().ID()) { + if (original.type() != result.type()) { std::stringstream ss; auto name = hook_name(hook); ss << "hook '" << name << "' has changed the type of value ("; diff --git a/torch/csrc/autograd/python_variable_indexing.cpp b/torch/csrc/autograd/python_variable_indexing.cpp index 73da40c..8035aeb 100644 --- a/torch/csrc/autograd/python_variable_indexing.cpp +++ b/torch/csrc/autograd/python_variable_indexing.cpp @@ -178,7 +178,7 @@ static Variable applySlicing(const Variable& self, PyObject* index, variable_lis handle_var(var); } } else if (PySequence_Check(obj)) { - handle_var(sequenceToVariable(self.type(), obj)); + handle_var(sequenceToVariable(self.dispatch_type(), obj)); } else { auto index = THPObjectPtr(PyNumber_Index(obj)); if (!index) { @@ -334,7 +334,7 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) { auto& self_ = reinterpret_cast(self)->cdata; OptionalDeviceGuard device_guard(device_of(self_)); - auto value = valueToTensor(self_.type(), py_value); + auto value = valueToTensor(self_.dispatch_type(), py_value); // handle simple types: integers, slices, ellipsis, bool if (index == Py_False) { // NOLINT(cppcoreguidelines-pro-type-cstyle-cast) diff --git a/torch/csrc/autograd/variable.cpp b/torch/csrc/autograd/variable.cpp index 33b9537..4747296 100644 --- a/torch/csrc/autograd/variable.cpp +++ b/torch/csrc/autograd/variable.cpp @@ -172,7 +172,7 @@ void Variable::Impl::set_data(const at::Tensor &new_data) { // Updates metadata data_type_ = new_data.type().typeMeta(); - type_id_ = new_data.type().type_id(); + type_id_ = new_data.dispatch_type().type_id(); is_variable_ = true; auto new_data_copy = at::Tensor(new_data.getIntrusivePtr()->shallow_copy_and_detach()); @@ -214,7 +214,7 @@ const std::shared_ptr& Variable::grad_fn() const { fn->storage_offset = data().storage_offset(); fn->set_next_edges(collect_next_edges(diff_view_meta->base_)); fn->add_input_metadata( - diff_view_meta->base_.type() + diff_view_meta->base_.dispatch_type() , sizes() // Note: sizes(), not base_.sizes(), is intentional , diff_view_meta->base_.device()); diff_view_meta->grad_fn_ = std::move(fn); diff --git a/torch/csrc/cuda/comm.cpp b/torch/csrc/cuda/comm.cpp index 93439b3..c1f1b43 100644 --- a/torch/csrc/cuda/comm.cpp +++ b/torch/csrc/cuda/comm.cpp @@ -47,7 +47,7 @@ struct unique_type_checker { }; std::vector broadcast(const Tensor& tensor, IntArrayRef devices) { - auto & type = tensor.type(); + auto & type = tensor.dispatch_type(); if (type.is_cuda() && tensor.get_device() != devices[0]) throw std::runtime_error("device of broadcasted tensor must appear as the " "first on devices list"); diff --git a/torch/csrc/cuda/nccl.cpp b/torch/csrc/cuda/nccl.cpp index 4d51a12..2db8306 100644 --- a/torch/csrc/cuda/nccl.cpp +++ b/torch/csrc/cuda/nccl.cpp @@ -126,7 +126,7 @@ void _check_inputs( device_set devices; int64_t numel = inputs[0].numel(); - auto& type = inputs[0].type(); + auto type = inputs[0].type(); for (size_t i = 0; i < len; i++) { auto input = inputs[i]; @@ -178,7 +178,7 @@ bool is_available(TensorList tensors) { #ifdef USE_NCCL device_set devices; for (auto& tensor : tensors) { - auto& type = tensor.type(); + auto type = tensor.type(); if (!type.is_cuda() || type.is_sparse()) return false; if (!tensor.is_contiguous()) diff --git a/torch/csrc/jit/node_hashing.cpp b/torch/csrc/jit/node_hashing.cpp index 32250ec..05cf2e9 100644 --- a/torch/csrc/jit/node_hashing.cpp +++ b/torch/csrc/jit/node_hashing.cpp @@ -16,7 +16,7 @@ namespace jit { namespace { bool tensorEqual(const at::Tensor& lhs, const at::Tensor& rhs) { - return &lhs.type() == &rhs.type() && lhs.equal(rhs); + return lhs.type() == rhs.type() && lhs.equal(rhs); } bool tensorListEqual( diff --git a/torch/csrc/tensor/python_tensor.cpp b/torch/csrc/tensor/python_tensor.cpp index f2eceb3..a2a17d0 100644 --- a/torch/csrc/tensor/python_tensor.cpp +++ b/torch/csrc/tensor/python_tensor.cpp @@ -85,7 +85,7 @@ static PyObject* Tensor_instancecheck(PyTensorType* self, PyObject* arg) { // be nullptr if you had a tensor of some type, in which case you can // skip initializign aten_type(), but TestAutograd.test_type_conversions // seems to violate this property (for whatever reason.) - if (&var.type() == self->aten_type()) { + if (&var.dispatch_type() == self->aten_type()) { Py_RETURN_TRUE; } } @@ -346,7 +346,7 @@ void py_set_default_dtype(PyObject* obj) { if (THPDtype_Check(obj)) { auto ¤t_default = get_default_tensor_type(); type = &get_tensor_type((THPDtype*)obj, torch::getLayout(current_default.backend()), - torch::getDeviceType(current_default) == at::Device::Type::CUDA); + current_default.device_type() == at::Device::Type::CUDA); } else { throw TypeError("invalid type object"); } diff --git a/torch/csrc/utils/tensor_apply.cpp b/torch/csrc/utils/tensor_apply.cpp index 1eae084..8b6debf 100644 --- a/torch/csrc/utils/tensor_apply.cpp +++ b/torch/csrc/utils/tensor_apply.cpp @@ -68,7 +68,7 @@ Tensor & map_(Tensor & self, const Tensor & other_, PyObject* fn) { } if (other_.type() != self.type()) { throw TypeError("map_: expected %s for 'other' (got %s)", - self.type().toString(), other_.type().toString()); + self.type().toString().c_str(), other_.type().toString().c_str()); } Tensor other; std::tie(other) = expand_inplace(self, other_, "map_"); @@ -83,11 +83,11 @@ Tensor & map2_(Tensor & self, const Tensor & x_, const Tensor & y_, PyObject* fn } if (x_.type() != self.type()) { throw TypeError("map2_: expected %s for argument 'x' (got %s)", - self.type().toString(), x_.type().toString()); + self.type().toString().c_str(), x_.type().toString().c_str()); } if (y_.type() != self.type()) { throw TypeError("map2_: expected %s for argument 'y' (got %s)", - self.type().toString(), y_.type().toString()); + self.type().toString().c_str(), y_.type().toString().c_str()); } Tensor other1, other2; std::tie(other1, other2) = expand_inplace(self, x_, y_, "map2_"); diff --git a/torch/csrc/utils/tensor_flatten.cpp b/torch/csrc/utils/tensor_flatten.cpp index 37c98f3..0423cbc 100644 --- a/torch/csrc/utils/tensor_flatten.cpp +++ b/torch/csrc/utils/tensor_flatten.cpp @@ -18,9 +18,8 @@ std::vector take_tensors( size_t cur_group_size = 0; for (const auto & tensor : tensors) { - auto& type = tensor.type(); size_t tensor_size; - if (type.is_sparse()) { + if (tensor.is_sparse()) { const auto& indices = tensor._indices(); const auto& values = tensor._values(); tensor_size = indices.numel() * indices.element_size() + @@ -29,7 +28,7 @@ std::vector take_tensors( tensor_size = tensor.numel() * tensor.element_size(); } - auto& type_group = groups[type.ID()]; + auto& type_group = groups[tensor.dispatch_type().ID()]; type_group.tensors.push_back(tensor); if (fine_grained) { @@ -67,13 +66,13 @@ void reorder_tensors_like(std::vector& tensors, TensorList order) { AT_ASSERT(tensors.size() == order.size()); std::unordered_map> type_indices; for (size_t i = 0, num_tensors = tensors.size(); i < num_tensors; ++i) - type_indices[&tensors[i].type()].push_back(i); + type_indices[&tensors[i].dispatch_type()].push_back(i); std::unordered_map type_used; std::vector ordered_tensors; ordered_tensors.reserve(tensors.size()); for (auto & tmpl_tensor : order) { - auto * type = &tmpl_tensor.type(); + auto * type = &tmpl_tensor.dispatch_type(); auto & indices = type_indices[type]; auto & used = type_used[type]; ordered_tensors.push_back(tensors[indices[used++]]); diff --git a/torch/csrc/utils/tensor_flatten.h b/torch/csrc/utils/tensor_flatten.h index efb4768..dde48e1 100644 --- a/torch/csrc/utils/tensor_flatten.h +++ b/torch/csrc/utils/tensor_flatten.h @@ -33,7 +33,7 @@ struct TensorGroup { at::Type& type() { AT_ASSERT(!tensors.empty()); - return tensors[0].type(); + return tensors[0].dispatch_type(); } }; diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index d7c693f..7b04fd6 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -84,8 +84,8 @@ Tensor new_with_storage(const Type& type, Storage storage) { } Tensor new_with_tensor(const Type& type, const Tensor& other) { - if (other.type() != type) { - throw TypeError("expected %s (got %s)", type.toString(), other.type().toString()); + if (other.dispatch_type() != type) { + throw TypeError("expected %s (got %s)", type.toString(), other.type().toString().c_str()); } return other.slice(); } @@ -209,7 +209,7 @@ Tensor internal_new_from_data( // infer the scalar type and device type; it's not expected to infer the layout since these constructors // are defined per-layout-type (e.g. tensor vs sparse_coo_tensor). const auto& scalar_type = type_inference ? var.scalar_type() : type.scalarType(); - auto device = device_opt.has_value() ? *device_opt : (type_inference ? var.device() : at::Device(torch::getDeviceType(type))); + auto device = device_opt.has_value() ? *device_opt : (type_inference ? var.device() : at::Device(type.device_type())); AutoNoGIL no_gil; maybe_initialize_cuda(device); return var.to(device, scalar_type, /*non_blocking=*/false, /*copy=*/copy_variables); @@ -233,7 +233,7 @@ Tensor internal_new_from_data( recursive_store( (char*)tensor.data_ptr(), tensor.sizes(), tensor.strides(), 0, scalar_type, tensor.element_size(), data); - auto device = device_opt.has_value() ? *device_opt : at::Device(torch::getDeviceType(type)); + auto device = device_opt.has_value() ? *device_opt : at::Device(type.device_type()); AutoNoGIL no_gil; maybe_initialize_cuda(device); return tensor.to(device, scalar_type, /*non_blocking=*/false, /*copy=*/false); @@ -484,7 +484,7 @@ Tensor sparse_coo_tensor_ctor(const Type& default_type, PyObject* args, PyObject at::OptionalDeviceGuard device_guard(r.deviceOptional(3)); // if no dtype provided, infer type based on value type. Tensor values = internal_new_from_data(values_type, r.deviceOptional(3), r.pyobject(1), false, true, type_inference); - const auto& indices_type = values.type().toScalarType(kLong); + const auto& indices_type = values.dispatch_type().toScalarType(kLong); Tensor indices = internal_new_from_data(indices_type, r.deviceOptional(3), r.pyobject(0), false, true, false); return at::sparse_coo_tensor(indices, values, values.options().layout(at::kSparse)).set_requires_grad(r.toBool(4)); } else if (r.idx == 1) { @@ -493,7 +493,7 @@ Tensor sparse_coo_tensor_ctor(const Type& default_type, PyObject* args, PyObject const auto& values_type = type.toDense(); at::OptionalDeviceGuard device_guard(r.deviceOptional(4)); Tensor values = internal_new_from_data(values_type, r.deviceOptional(4), r.pyobject(1), false, true, type_inference); - const auto& indices_type = values.type().toScalarType(kLong); + const auto& indices_type = values.dispatch_type().toScalarType(kLong); Tensor indices = internal_new_from_data(indices_type, r.deviceOptional(4), r.pyobject(0), false, true, false); return at::sparse_coo_tensor(indices, values, r.intlist(2), values.options().layout(at::kSparse)).set_requires_grad(r.toBool(5)); } else if (r.idx == 2) { diff --git a/torch/csrc/utils/tensor_numpy.cpp b/torch/csrc/utils/tensor_numpy.cpp index cf41742..134ab7c 100644 --- a/torch/csrc/utils/tensor_numpy.cpp +++ b/torch/csrc/utils/tensor_numpy.cpp @@ -63,7 +63,7 @@ PyObject* tensor_to_numpy(const at::Tensor& tensor) { "convert to a dense tensor first."); } if (tensor.type().backend() != Backend::CPU) { - throw TypeError("NumPy conversion for %s is not supported", tensor.type().toString()); + throw TypeError("NumPy conversion for %s is not supported", tensor.type().toString().c_str()); } auto dtype = aten_to_dtype(tensor.scalar_type()); auto sizes = to_numpy_shape(tensor.sizes()); diff --git a/torch/lib/c10d/ProcessGroupGloo.cpp b/torch/lib/c10d/ProcessGroupGloo.cpp index d5c674c..f0251b4 100644 --- a/torch/lib/c10d/ProcessGroupGloo.cpp +++ b/torch/lib/c10d/ProcessGroupGloo.cpp @@ -123,7 +123,7 @@ void setOutput(O& opts, at::Tensor& tensor) { #ifdef USE_CUDA at::Tensor pinnedLike(at::Tensor& tensor) { - auto& type = tensor.type().toBackend(at::Backend::CPU); + auto& type = tensor.dispatch_type().toBackend(at::Backend::CPU); auto* allocator = at::cuda::getPinnedMemoryAllocator(); return type.tensorWithAllocator(tensor.sizes(), tensor.strides(), allocator); } diff --git a/torch/lib/c10d/Utils.hpp b/torch/lib/c10d/Utils.hpp index 7315260..b9b3a73 100644 --- a/torch/lib/c10d/Utils.hpp +++ b/torch/lib/c10d/Utils.hpp @@ -34,7 +34,7 @@ inline std::string toString(at::IntArrayRef l) { } inline void assertSameType( - const at::Type& type, + const at::DeprecatedTypeProperties& type, const std::vector& tensors) { for (size_t i = 0; i < tensors.size(); i++) { if (tensors[i].type() != type) { @@ -66,7 +66,7 @@ inline void assertSameSizeAndType(const std::vector& tensors) { } // Ensure all tensors have identical type and shape - auto& type = tensors[0].type(); + auto type = tensors[0].type(); auto sizes = tensors[0].sizes(); for (size_t i = 1; i < tensors.size(); i++) { if (tensors[i].type() != type) { @@ -88,7 +88,7 @@ inline void assertSameSizeAndType(const std::vector& tensors) { inline void assertTypeMatch( std::function fn, - const at::Type& type, + const at::DeprecatedTypeProperties& type, const at::ArrayRef& tensors, size_t index) { if (tensors[index].type() != type) { @@ -179,7 +179,7 @@ inline void assertCPU( inline void assertTypeAndSizesMatch( std::function fn, const at::ArrayRef& tensors, - const at::Type& type, + const at::DeprecatedTypeProperties& type, const at::IntArrayRef& sizes) { for (size_t i = 0; i < tensors.size(); i++) { assertTypeMatch(fn, type, tensors, i); -- 2.7.4