Summary:
_th_tensor is moving off Type, so these calls need to be replaced.
Unfortunately, replacing these with a full-fledged solution [e.g. from_storage(..., TensorOptions)] is a bit complicated because the storage itself fully defines the Type (modulo variable). It's simpler to just wait for the Variable/Tensor merge rather than to solve this now, so instead I changed the call sites to: at::empty({0}, type.options()).set_(storage...).
This isn't great because we are also trying to get rid of Type::options, but this seems to be the lesser-of-two-evils.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/14877
Differential Revision:
D13374310
Pulled By: gchanan
fbshipit-source-id:
eb953ed041507e6190d6f32e383912e5a08311cd
}
Tensor TypeDefault::tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function<void(void*)> & deleter) const {
auto storage = storageFromBlob(data, computeStorageSize(sizes, strides), deleter);
- return _th_tensor(storage, 0, sizes, strides);
+ return at::empty({0}, options()).set_(storage, 0, sizes, strides);
}
Tensor TypeDefault::tensorWithAllocator(IntList sizes, Allocator* allocator) const {
return tensorWithAllocator(sizes, defaultStrides(sizes), std::move(allocator));
}
Tensor TypeDefault::tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const {
auto storage = storageWithAllocator(computeStorageSize(sizes, strides), std::move(allocator));
- return _th_tensor(storage, 0, sizes, strides);
+ return at::empty({0}, options()).set_(storage, 0, sizes, strides);
}
Storage TypeDefault::storage(bool resizable) const {
// NB: This new tensor is created to support cuda tensors.
// Storages can be mutated when converting tensors from cuda to cpu,
// and we need a cpu tensor to copy data from.
- storage_tensor = at::getType(tensor)
- ._th_tensor(
+ storage_tensor = at::empty({0}, tensor.options())
+ .set_(
tensor.storage(),
/* storageOffset = */ 0,
/* size = */
storage_it = storageMap.insert(std::make_pair(
record_key, cpu_storage)).first;
} else if (device.type() == at::DeviceType::CUDA) {
- at::Tensor cpu_tensor = at::CPU(type)._th_tensor(
+ at::Tensor cpu_tensor = at::empty({0}, at::CPU(type).options()).set_(
cpu_storage, tensor_proto.offset(), dims, strides);
at::Storage cuda_storage = cpu_tensor.to(device,
cpu_tensor.scalar_type()).storage();
at::Tensor result;
if (device.type() == at::DeviceType::CPU) {
- result = at::CPU(type)._th_tensor(
+ result = at::empty({0}, at::CPU(type).options()).set_(
storage_it->second, tensor_proto.offset(), dims, strides);
} else if (device.type() == at::DeviceType::CUDA) {
- result = at::CUDA(type)._th_tensor(
+ result = at::empty({0}, at::CUDA(type).options()).set_(
storage_it->second, tensor_proto.offset(), dims, strides);
}
AT_ASSERT(result.defined());