}
}
+TEST(Caffe2ToPytorch, ExternalData) {
+ caffe2::Tensor c2_tensor = caffe2::empty({4, 4}, at::kLong);
+ int64_t buf[16];
+ for (int64_t i = 0; i < 16; i++) {
+ buf[i] = i;
+ }
+ c2_tensor.ShareExternalPointer(buf, 16);
+
+ // If the buffer is allocated externally, we can still pass tensor around,
+ // but we can't resize its storage using PT APIs
+ at::Tensor at_tensor(c2_tensor);
+ auto it = at_tensor.data<int64_t>();
+ for (int64_t i = 0; i < 16; i++) {
+ ASSERT_EQ(it[i], i);
+ }
+ ASSERT_FALSE(at_tensor.storage().resizable());
+ ASSERT_ANY_THROW(at_tensor.resize_({7,7}));
+}
+
TEST(Caffe2ToPytorch, Op) {
caffe2::Tensor c2_tensor(caffe2::CPU);
c2_tensor.Resize(3, 3);
}
}
+TEST(Caffe2ToPytorch, MutualResizes) {
+ caffe2::Tensor c2_tensor = caffe2::empty({5, 5}, at::kFloat);
+ auto data = c2_tensor.mutable_data<float>();
+ for (int64_t i = 0; i < 25; i++) {
+ data[i] = 0;
+ }
+
+ at::Tensor at_tensor(c2_tensor);
+
+ // change is visible
+ at_tensor[0][0] = 123;
+ ASSERT_EQ(c2_tensor.mutable_data<float>()[0], 123);
+
+ // resize PT tensor in smaller direction - storage is preserved
+ at_tensor.resize_({4, 4});
+ c2_tensor.mutable_data<float>()[1] = 234;
+ ASSERT_EQ(at_tensor[0][1].item().to<float>(), 234);
+
+ // resize PT tensor in larger direction - storage is preserved
+ at_tensor.resize_({6, 6});
+ c2_tensor.mutable_data<float>()[2] = 345;
+ ASSERT_EQ(at_tensor[0][2].item().to<float>(), 345);
+ ASSERT_EQ(c2_tensor.sizes()[0], 6);
+ ASSERT_EQ(c2_tensor.sizes()[1], 6);
+
+ // resize Caffe2 tensor - semantics are to NOT preserve the data, but the
+ // TensorImpl is still shared
+ c2_tensor.Resize(7, 7);
+ c2_tensor.mutable_data<float>()[3] = 456;
+ ASSERT_EQ(at_tensor[0][3].item().to<float>(), 456);
+ ASSERT_EQ(at_tensor.sizes()[0], 7);
+ ASSERT_EQ(at_tensor.sizes()[1], 7);
+}
+
TEST(PytorchToCaffe2, Op) {
caffe2::Workspace workspace;
caffe2::NetDef net;
numel_(numel),
resizable_(resizable),
allocator_(allocator) {
+ if (resizable) {
+ AT_ASSERTM(
+ allocator_, "For resizable storage, allocator must be provided");
+ }
if (numel > 0) {
if (data_type_.id() == caffe2::TypeIdentifier::uninitialized()) {
AT_ERROR(
: StorageImpl(device, caffe2::TypeMeta()) {}
StorageImpl(at::Device device, caffe2::TypeMeta data_type)
- : StorageImpl(data_type, 0, at::DataPtr(nullptr, device), nullptr, true) {
- }
+ : StorageImpl(
+ data_type,
+ 0,
+ at::DataPtr(nullptr, device),
+ caffe2::GetAllocator(device.type()),
+ true) {}
StorageImpl& operator=(StorageImpl&& other) = default;
StorageImpl& operator=(const StorageImpl&) = delete;
}
void set_resizable(bool resizable) {
+ if (resizable) {
+ // We need an allocator to be resizable
+ AT_ASSERT(allocator_);
+ }
resizable_ = resizable;
}
// capacity() might not return the value that was set here, if itemsize does
// not evenly divide it.
numel_ = capacity / data_type_.itemsize();
+ allocator_ = nullptr;
+ resizable_ = false;
}
private:
} else {
int64_t numel = capacity / data_type.itemsize();
// Create a new Storage
- storage_ = Storage(data_type, numel, std::move(data_ptr), nullptr, true);
+ storage_ = Storage(
+ data_type,
+ numel,
+ std::move(data_ptr),
+ /*allocator=*/nullptr,
+ /*resizable=*/false);
data_type_ = data_type;
storage_offset_ = 0;
}
return storage_.data();
}
const Allocator* allocator = storage_.allocator();
- // TODO: Get rid of StaticContext
+ // Storage might have nullptr allocator in rare cases, for example, if
+ // an external memory segment has been wrapped with Tensor and we don't
+ // know how to reallocate it. However, in order to preserve legacy C2
+ // behavior, we allow reallocating the memory using default allocator.
if (allocator == nullptr) {
allocator = caffe2::GetAllocator(storage_.device_type());
}