From: Gregory Chanan Date: Fri, 7 Dec 2018 20:37:03 +0000 (-0800) Subject: Replace calls of Type::_th_tensor. (#14877) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~2399 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=efc5e9f71a059fa72135795bb9ec5aab33b36479;p=platform%2Fupstream%2Fpytorch.git Replace calls of Type::_th_tensor. (#14877) Summary: _th_tensor is moving off Type, so these calls need to be replaced. Unfortunately, replacing these with a full-fledged solution [e.g. from_storage(..., TensorOptions)] is a bit complicated because the storage itself fully defines the Type (modulo variable). It's simpler to just wait for the Variable/Tensor merge rather than to solve this now, so instead I changed the call sites to: at::empty({0}, type.options()).set_(storage...). This isn't great because we are also trying to get rid of Type::options, but this seems to be the lesser-of-two-evils. Pull Request resolved: https://github.com/pytorch/pytorch/pull/14877 Differential Revision: D13374310 Pulled By: gchanan fbshipit-source-id: eb953ed041507e6190d6f32e383912e5a08311cd --- diff --git a/aten/src/ATen/templates/TypeDefault.cpp b/aten/src/ATen/templates/TypeDefault.cpp index ad1c360..96174db 100644 --- a/aten/src/ATen/templates/TypeDefault.cpp +++ b/aten/src/ATen/templates/TypeDefault.cpp @@ -84,14 +84,14 @@ Tensor TypeDefault::tensorFromBlob(void * data, IntList sizes, const std::functi } Tensor TypeDefault::tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function & deleter) const { auto storage = storageFromBlob(data, computeStorageSize(sizes, strides), deleter); - return _th_tensor(storage, 0, sizes, strides); + return at::empty({0}, options()).set_(storage, 0, sizes, strides); } Tensor TypeDefault::tensorWithAllocator(IntList sizes, Allocator* allocator) const { return tensorWithAllocator(sizes, defaultStrides(sizes), std::move(allocator)); } Tensor TypeDefault::tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const { auto storage = storageWithAllocator(computeStorageSize(sizes, strides), std::move(allocator)); - return _th_tensor(storage, 0, sizes, strides); + return at::empty({0}, options()).set_(storage, 0, sizes, strides); } Storage TypeDefault::storage(bool resizable) const { diff --git a/torch/csrc/jit/export.cpp b/torch/csrc/jit/export.cpp index 932efbb..85dac6a 100644 --- a/torch/csrc/jit/export.cpp +++ b/torch/csrc/jit/export.cpp @@ -559,8 +559,8 @@ void ScriptModuleSerializer::convertAndWriteTensor( // NB: This new tensor is created to support cuda tensors. // Storages can be mutated when converting tensors from cuda to cpu, // and we need a cpu tensor to copy data from. - storage_tensor = at::getType(tensor) - ._th_tensor( + storage_tensor = at::empty({0}, tensor.options()) + .set_( tensor.storage(), /* storageOffset = */ 0, /* size = */ diff --git a/torch/csrc/jit/import.cpp b/torch/csrc/jit/import.cpp index 1dde3c1..b18b163 100644 --- a/torch/csrc/jit/import.cpp +++ b/torch/csrc/jit/import.cpp @@ -142,7 +142,7 @@ at::Tensor ScriptModuleDeserializer::loadTensor(const torch::TensorDef& tensor_p storage_it = storageMap.insert(std::make_pair( record_key, cpu_storage)).first; } else if (device.type() == at::DeviceType::CUDA) { - at::Tensor cpu_tensor = at::CPU(type)._th_tensor( + at::Tensor cpu_tensor = at::empty({0}, at::CPU(type).options()).set_( cpu_storage, tensor_proto.offset(), dims, strides); at::Storage cuda_storage = cpu_tensor.to(device, cpu_tensor.scalar_type()).storage(); @@ -166,10 +166,10 @@ at::Tensor ScriptModuleDeserializer::loadTensor(const torch::TensorDef& tensor_p at::Tensor result; if (device.type() == at::DeviceType::CPU) { - result = at::CPU(type)._th_tensor( + result = at::empty({0}, at::CPU(type).options()).set_( storage_it->second, tensor_proto.offset(), dims, strides); } else if (device.type() == at::DeviceType::CUDA) { - result = at::CUDA(type)._th_tensor( + result = at::empty({0}, at::CUDA(type).options()).set_( storage_it->second, tensor_proto.offset(), dims, strides); } AT_ASSERT(result.defined());