TfLiteStatus ArenaPlanner::ResolveTensorAllocation(int tensor_index) {
TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
if (tensor.allocation_type == kTfLiteArenaRw) {
- TF_LITE_ENSURE_STATUS(
- arena_.ResolveAlloc(context_, allocs_[tensor_index], &tensor.data.raw));
+ // Skip resolution if the size of the tensor is zero, leaving it as a
+ // nullptr.
+ if (allocs_[tensor_index].size != 0) {
+ TF_LITE_ENSURE_STATUS(arena_.ResolveAlloc(context_, allocs_[tensor_index],
+ &tensor.data.raw));
+ }
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
TF_LITE_ENSURE_STATUS(persistent_arena_.ResolveAlloc(
EXPECT_EQ(GetOffset(10), GetOffsetAfter(0));
// The outputs are never allocated because they are not connected to any
// inputs.
- EXPECT_EQ(GetOffset(5), 0);
- EXPECT_EQ(GetOffset(11), 0);
+ EXPECT_TRUE((*graph.tensors())[5].data.raw == nullptr);
+ EXPECT_TRUE((*graph.tensors())[11].data.raw == nullptr);
}
TEST_F(ArenaPlannerTest, GraphWithOneOp) {
SetGraph(&graph);
auto is_unallocated = [&](int tensor_index) {
- // TODO(ahentz): We'd to use nullptr to represent unallocated tensors, but
- // the current code still points them all to the beginning fo the alloc
- // (that is, zero offset).
- // return (*graph.tensors())[tensor_index].data.raw == nullptr;
- return GetOffset(tensor_index) == 0;
+ return (*graph.tensors())[tensor_index].data.raw == nullptr;
};
// The allocation plan is made at the beginning and is independent of
TfLiteIntArrayFree(new_size);
return kTfLiteError;
}
+
+ // Realloc space for kTfLiteDynamic tensors.
+ TfLiteTensorRealloc(bytesRequired, tensor);
tensor->bytes = bytesRequired;
}
if (tensor->dims) TfLiteIntArrayFree(tensor->dims);
TfLiteStatus BytesRequired(TfLiteType type, const int* dims, int dims_size,
size_t* bytes);
- // Request an tensor be resized implementation.
+ // Request an tensor be resized implementation. If the given tensor is of
+ // type kTfLiteDynamic it will also be allocated new memory.
TfLiteStatus ResizeTensorImpl(TfLiteTensor* tensor, TfLiteIntArray* new_size);
// Report a detailed error string (will be printed to stderr).
free(hwcn_weights->data.raw);
hwcn_weights->data.raw = nullptr;
}
+
+ // Note that hwcn_weights_status is a kTfLiteDynamic tensor, and
+ // ResizeTensor will actually allocate space for it. The would be more
+ // efficient if we placed hwcn_weights_status in the persistent arena.
auto hwcn_weights_status =
context->ResizeTensor(context, hwcn_weights, hwcn_weights_size);
if (hwcn_weights_status != kTfLiteOk) return hwcn_weights_status;
- hwcn_weights->data.raw = static_cast<char*>(malloc(hwcn_weights->bytes));
// TODO(petewarden): If Resize() is called when the size hasn't actually
// changed, this will do extra redundant work.