}
Storage TypeDefault::storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const {
- return Storage(
+ return Storage(
typeMeta(),
- InefficientStdFunctionContext::makeDataPtr(data, deleter, getDeviceFromPtr(data)),
size,
- deleter);
+ InefficientStdFunctionContext::makeDataPtr(
+ data, deleter, getDeviceFromPtr(data)),
+ /*allocator=*/nullptr,
+ /*resizable=*/false);
}
Storage TypeDefault::storageWithAllocator(int64_t size, Allocator* allocator) const {
- return Storage(typeMeta(), size, allocator);
+ // Potentially the storage might be marked as resizable too here
+ return Storage(typeMeta(), size, allocator, /*resizable=*/false);
}
Tensor TypeDefault::unsafeTensorFromTH(void * th_pointer, bool retain) const {
auto tensor_impl = c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(static_cast<TensorImpl*>(th_pointer));
public:
Storage() {}
Storage(c10::intrusive_ptr<StorageImpl> ptr) : storage_impl_(std::move(ptr)) {}
+
+ // Allocates memory buffer using given allocator and creates a storage with it
Storage(
caffe2::TypeMeta data_type,
size_t size,
Allocator* allocator,
- bool resizable = false)
+ bool resizable)
: storage_impl_(c10::make_intrusive<StorageImpl>(
data_type,
size,
allocator,
resizable)) {}
- Storage(
- caffe2::TypeMeta data_type,
- at::DataPtr data_ptr,
- size_t size,
- const std::function<void(void*)>& deleter,
- bool resizable = false)
- : storage_impl_(c10::make_intrusive<StorageImpl>(
- data_type,
- size,
- std::move(data_ptr),
- /* allocator */ nullptr,
- resizable)) {}
-
- Storage(at::DeviceType device_type)
- : storage_impl_(
- c10::make_intrusive<StorageImpl>(at::Device(device_type))) {}
- Storage(at::Device device)
- : storage_impl_(c10::make_intrusive<StorageImpl>(device)) {}
- Storage(at::Device device, caffe2::TypeMeta data_type)
- : storage_impl_(c10::make_intrusive<StorageImpl>(device, data_type)) {}
-
+ // Creates storage with pre-allocated memory buffer. Allocator is given for
+ // potential future reallocations, however it can be nullptr if the storage
+ // is non-resizable
Storage(
caffe2::TypeMeta data_type,
int64_t numel,
allocator,
resizable)) {}
+ // Legacy constructor for partially initialized (dtype or memory) storages
+ // that can be temporarily created with Caffe2 APIs. See the note on top of
+ // TensorImpl.h for details.
+ static Storage create_legacy(at::Device device, caffe2::TypeMeta data_type) {
+ return Storage(c10::make_intrusive<StorageImpl>(
+ data_type,
+ 0,
+ at::DataPtr(nullptr, device),
+ GetAllocator(device.type()),
+ true));
+ }
+
template <typename T>
inline bool IsType() const {
return storage_impl_->IsType<T>();
allocator,
resizable) {}
- explicit StorageImpl(at::Device device)
- : StorageImpl(device, caffe2::TypeMeta()) {}
-
- StorageImpl(at::Device device, caffe2::TypeMeta data_type)
- : StorageImpl(
- data_type,
- 0,
- at::DataPtr(nullptr, device),
- GetAllocator(device.type()),
- true) {}
-
StorageImpl& operator=(StorageImpl&& other) = default;
StorageImpl& operator=(const StorageImpl&) = delete;
StorageImpl() = delete;
*/
inline void FreeMemory() {
// We'll detach from the old Storage and create a new one
- storage_ = Storage(storage_.device(), data_type_);
+ storage_ = Storage::create_legacy(storage_.device(), data_type_);
storage_offset_ = 0;
}
storage_.set_dtype(meta);
} else {
if (data_type_ != meta) {
- storage_ = Storage(storage_.device(), meta);
+ storage_ = Storage::create_legacy(storage_.device(), meta);
}
}
data_type_ = meta;
*/
explicit Tensor(at::Device device)
: impl_(c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(
- Storage(device),
+ Storage::create_legacy(device, TypeMeta()),
c10::computeTensorTypeId(at::device(device).layout(at::kStrided)),
/*is_variable=*/ false
)) {
if (impl_->dtype() != src.impl_->dtype()) {
// NB: copy preserves device_type
// This storage will get initialized by the mutable_data call below.
- impl_->set_storage(at::Storage(impl_->device_type(), src.impl_->dtype()));
+ impl_->set_storage(at::Storage::create_legacy(impl_->device_type(), src.impl_->dtype()));
}
impl_->Resize(src.impl_->sizes());
std::tie(storage_ptr, record_size) = reader_.getRecord(record_key);
auto cpu_storage = at::Storage(
at::CPU(type).typeMeta(),
- std::move(storage_ptr),
record_size / at::CPU(type).typeMeta().itemsize(),
- nullptr); // NB: we didn't set any allocator for the tensor
+ std::move(storage_ptr),
+ /*allocator=*/nullptr,
+ /*resizable=*/false); // NB: we didn't set any allocator for the tensor
if (device.type() == at::DeviceType::CPU) {
storage_it =
storageMap.insert(std::make_pair(record_key, cpu_storage)).first;