From 3a85f88efd0fd93890447b40cd14ad3fe5389014 Mon Sep 17 00:00:00 2001 From: Gregory Chanan Date: Tue, 19 Mar 2019 07:50:31 -0700 Subject: [PATCH] Remove deviceTypeToBackend, which is underspecified. (#18135) Summary: There are multiple backends for a device type, so we just kill this function. Also, kill an getNonVariableType instance which was also underspecified. Pull Request resolved: https://github.com/pytorch/pytorch/pull/18135 Differential Revision: D14507474 Pulled By: gchanan fbshipit-source-id: fc791a76d4b851b23d09a070725f3838621eb13d --- aten/src/ATen/Context.h | 4 --- c10/core/Backend.h | 17 --------- c10/core/TensorOptions.h | 84 +++++++++++++++++++++------------------------ torch/csrc/DynamicTypes.cpp | 6 ++-- 4 files changed, 44 insertions(+), 67 deletions(-) diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h index ff75ba3..af92aeb 100644 --- a/aten/src/ATen/Context.h +++ b/aten/src/ATen/Context.h @@ -171,10 +171,6 @@ static inline TypeExtendedInterface& getNonVariableType(Backend p, ScalarType s) return globalContext().getNonVariableType(p, s); } -static inline TypeExtendedInterface& getNonVariableType(DeviceType p, ScalarType s) { - return globalContext().getNonVariableType(deviceTypeToBackend(p), s); -} - CAFFE2_API TypeExtendedInterface& getType(TensorOptions options); CAFFE2_API TypeExtendedInterface& getType(const TensorImpl*); CAFFE2_API TypeExtendedInterface& getType(const Tensor&); diff --git a/c10/core/Backend.h b/c10/core/Backend.h index ddb1dc4..0e1e1fa 100644 --- a/c10/core/Backend.h +++ b/c10/core/Backend.h @@ -138,23 +138,6 @@ static inline DeviceType backendToDeviceType(Backend b) { } } -static inline Backend deviceTypeToBackend(DeviceType d) { - switch (d) { - case DeviceType::CPU: - return Backend::CPU; - case DeviceType::CUDA: - return Backend::CUDA; - case DeviceType::HIP: - return Backend::HIP; - case DeviceType::MSNPU: - return Backend::MSNPU; - case DeviceType::XLA: - return Backend::XLA; - default: - AT_ERROR("Unknown device type ", d); - } -} - static inline Backend backendToCPU(Backend b) { switch (b) { case Backend::CPU: diff --git a/c10/core/TensorOptions.h b/c10/core/TensorOptions.h index 9c90137..26faf31 100644 --- a/c10/core/TensorOptions.h +++ b/c10/core/TensorOptions.h @@ -325,14 +325,47 @@ struct C10_API TensorOptions { // Resolves the ATen backend specified by the current construction axes. Backend backend() const noexcept { - Backend backend = deviceTypeToBackend(device().type()); + return at::tensorTypeIdToBackend(computeTensorTypeId()); + } + + inline TensorTypeId computeTensorTypeId() const { switch (layout()) { - case kStrided: - return backend; - case kSparse: - return toSparse(backend); + case Layout::Strided: + switch (device().type()) { + case DeviceType::CPU: + return CPUTensorId(); + case DeviceType::CUDA: + return CUDATensorId(); + case DeviceType::MKLDNN: + return MKLDNNTensorId(); + case DeviceType::OPENGL: + return OpenGLTensorId(); + case DeviceType::OPENCL: + return OpenCLTensorId(); + case DeviceType::IDEEP: + return IDEEPTensorId(); + case DeviceType::HIP: + return HIPTensorId(); + case DeviceType::MSNPU: + return MSNPUTensorId(); + case DeviceType::XLA: + return XLATensorId(); + default: + AT_ERROR("Unsupported device type for dense layout: ", device().type()); + } + case Layout::Sparse: + switch (device().type()) { + case DeviceType::CPU: + return SparseCPUTensorId(); + case DeviceType::CUDA: + return SparseCUDATensorId(); + case DeviceType::HIP: + return SparseHIPTensorId(); + default: + AT_ERROR("Unsupported device type for sparse layout: ", device().type()); + } default: - return backend; + AT_ERROR("Unsupported layout: ", layout()); } } @@ -491,44 +524,7 @@ inline TensorOptions dtype() { // TensorOptions. WARNING: If you do this, you need to fix the calls // to computeTensorTypeId in caffe2/tensor.h inline TensorTypeId computeTensorTypeId(TensorOptions options) { - switch (options.layout()) { - case Layout::Strided: - switch (options.device().type()) { - case DeviceType::CPU: - return CPUTensorId(); - case DeviceType::CUDA: - return CUDATensorId(); - case DeviceType::MKLDNN: - return MKLDNNTensorId(); - case DeviceType::OPENGL: - return OpenGLTensorId(); - case DeviceType::OPENCL: - return OpenCLTensorId(); - case DeviceType::IDEEP: - return IDEEPTensorId(); - case DeviceType::HIP: - return HIPTensorId(); - case DeviceType::MSNPU: - return MSNPUTensorId(); - case DeviceType::XLA: - return XLATensorId(); - default: - AT_ERROR("Unsupported device type for dense layout: ", options.device().type()); - } - case Layout::Sparse: - switch (options.device().type()) { - case DeviceType::CPU: - return SparseCPUTensorId(); - case DeviceType::CUDA: - return SparseCUDATensorId(); - case DeviceType::HIP: - return SparseHIPTensorId(); - default: - AT_ERROR("Unsupported device type for sparse layout: ", options.device().type()); - } - default: - AT_ERROR("Unsupported layout: ", options.layout()); - } + return options.computeTensorTypeId(); } inline DeviceType computeDeviceType(TensorTypeId tid) { diff --git a/torch/csrc/DynamicTypes.cpp b/torch/csrc/DynamicTypes.cpp index ba217c9..64d9c76 100644 --- a/torch/csrc/DynamicTypes.cpp +++ b/torch/csrc/DynamicTypes.cpp @@ -71,9 +71,11 @@ at::Type* get_type(const std::string& name, bool is_cuda, bool is_sparse) { PyTypeObject* getPyTypeObject(const at::Storage& storage) { + at::ScalarType scalarType = at::typeMetaToScalarType(storage.dtype()); + at::TensorOptions options = at::TensorOptions(storage.device_type()).dtype(scalarType); auto attype = at::globalContext().getNonVariableTypeOpt( - at::deviceTypeToBackend(storage.device_type()), - at::typeMetaToScalarType(storage.dtype())); + at::tensorTypeIdToBackend(at::computeTensorTypeId(options)), + scalarType); auto it = attype_to_py_storage_type.find(attype); if (it != attype_to_py_storage_type.end()) { return it->second; -- 2.7.4