Replace calls of Type::_th_tensor. (#14877)
authorGregory Chanan <gchanan@fb.com>
Fri, 7 Dec 2018 20:37:03 +0000 (12:37 -0800)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Fri, 7 Dec 2018 21:04:48 +0000 (13:04 -0800)
Summary:
_th_tensor is moving off Type, so these calls need to be replaced.

Unfortunately, replacing these with a full-fledged solution [e.g. from_storage(..., TensorOptions)] is a bit complicated because the storage itself fully defines the Type (modulo variable).  It's simpler to just wait for the Variable/Tensor merge rather than to solve this now, so instead I changed the call sites to: at::empty({0}, type.options()).set_(storage...).

This isn't great because we are also trying to get rid of Type::options, but this seems to be the lesser-of-two-evils.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14877

Differential Revision: D13374310

Pulled By: gchanan

fbshipit-source-id: eb953ed041507e6190d6f32e383912e5a08311cd

aten/src/ATen/templates/TypeDefault.cpp
torch/csrc/jit/export.cpp
torch/csrc/jit/import.cpp

index ad1c360..96174db 100644 (file)
@@ -84,14 +84,14 @@ Tensor TypeDefault::tensorFromBlob(void * data, IntList sizes, const std::functi
 }
 Tensor TypeDefault::tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function<void(void*)> & deleter) const {
   auto storage = storageFromBlob(data, computeStorageSize(sizes, strides), deleter);
-  return _th_tensor(storage, 0, sizes, strides);
+  return at::empty({0}, options()).set_(storage, 0, sizes, strides);
 }
 Tensor TypeDefault::tensorWithAllocator(IntList sizes, Allocator* allocator) const {
   return tensorWithAllocator(sizes, defaultStrides(sizes), std::move(allocator));
 }
 Tensor TypeDefault::tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const {
   auto storage = storageWithAllocator(computeStorageSize(sizes, strides), std::move(allocator));
-  return _th_tensor(storage, 0, sizes, strides);
+  return at::empty({0}, options()).set_(storage, 0, sizes, strides);
 }
 
 Storage TypeDefault::storage(bool resizable) const {
index 932efbb..85dac6a 100644 (file)
@@ -559,8 +559,8 @@ void ScriptModuleSerializer::convertAndWriteTensor(
       // NB: This new tensor is created to support cuda tensors.
       // Storages can be mutated when converting tensors from cuda to cpu,
       // and we need a cpu tensor to copy data from.
-      storage_tensor = at::getType(tensor)
-                           ._th_tensor(
+      storage_tensor = at::empty({0}, tensor.options())
+                           .set_(
                                tensor.storage(),
                                /* storageOffset = */ 0,
                                /* size = */
index 1dde3c1..b18b163 100644 (file)
@@ -142,7 +142,7 @@ at::Tensor ScriptModuleDeserializer::loadTensor(const torch::TensorDef& tensor_p
       storage_it = storageMap.insert(std::make_pair(
             record_key, cpu_storage)).first;
     } else if (device.type() == at::DeviceType::CUDA) {
-      at::Tensor cpu_tensor = at::CPU(type)._th_tensor(
+      at::Tensor cpu_tensor = at::empty({0}, at::CPU(type).options()).set_(
           cpu_storage, tensor_proto.offset(), dims, strides);
       at::Storage cuda_storage = cpu_tensor.to(device,
           cpu_tensor.scalar_type()).storage();
@@ -166,10 +166,10 @@ at::Tensor ScriptModuleDeserializer::loadTensor(const torch::TensorDef& tensor_p
 
   at::Tensor result;
   if (device.type() == at::DeviceType::CPU) {
-    result = at::CPU(type)._th_tensor(
+    result = at::empty({0}, at::CPU(type).options()).set_(
         storage_it->second, tensor_proto.offset(), dims, strides);
   } else if (device.type() == at::DeviceType::CUDA) {
-    result = at::CUDA(type)._th_tensor(
+    result = at::empty({0}, at::CUDA(type).options()).set_(
         storage_it->second, tensor_proto.offset(), dims, strides);
   }
   AT_ASSERT(result.defined());