From: A. Unique TensorFlower Date: Mon, 5 Feb 2018 18:39:18 +0000 (-0800) Subject: Proper reallocation of dynamic tensors. X-Git-Tag: upstream/v1.7.0~31^2~1011 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=bccd80c45dab8f2856c176d45df7e7c72f35e14e;p=platform%2Fupstream%2Ftensorflow.git Proper reallocation of dynamic tensors. PiperOrigin-RevId: 184550199 --- diff --git a/tensorflow/contrib/lite/arena_planner.cc b/tensorflow/contrib/lite/arena_planner.cc index bf1bcdd..87b17c3 100644 --- a/tensorflow/contrib/lite/arena_planner.cc +++ b/tensorflow/contrib/lite/arena_planner.cc @@ -185,8 +185,12 @@ TfLiteStatus ArenaPlanner::CalculateAllocations(int first_node, int last_node) { TfLiteStatus ArenaPlanner::ResolveTensorAllocation(int tensor_index) { TfLiteTensor& tensor = *graph_info_->tensor(tensor_index); if (tensor.allocation_type == kTfLiteArenaRw) { - TF_LITE_ENSURE_STATUS( - arena_.ResolveAlloc(context_, allocs_[tensor_index], &tensor.data.raw)); + // Skip resolution if the size of the tensor is zero, leaving it as a + // nullptr. + if (allocs_[tensor_index].size != 0) { + TF_LITE_ENSURE_STATUS(arena_.ResolveAlloc(context_, allocs_[tensor_index], + &tensor.data.raw)); + } } if (tensor.allocation_type == kTfLiteArenaRwPersistent) { TF_LITE_ENSURE_STATUS(persistent_arena_.ResolveAlloc( diff --git a/tensorflow/contrib/lite/arena_planner_test.cc b/tensorflow/contrib/lite/arena_planner_test.cc index e10611e..a8a8755 100644 --- a/tensorflow/contrib/lite/arena_planner_test.cc +++ b/tensorflow/contrib/lite/arena_planner_test.cc @@ -193,8 +193,8 @@ TEST_F(ArenaPlannerTest, GraphWithNoOps) { EXPECT_EQ(GetOffset(10), GetOffsetAfter(0)); // The outputs are never allocated because they are not connected to any // inputs. - EXPECT_EQ(GetOffset(5), 0); - EXPECT_EQ(GetOffset(11), 0); + EXPECT_TRUE((*graph.tensors())[5].data.raw == nullptr); + EXPECT_TRUE((*graph.tensors())[11].data.raw == nullptr); } TEST_F(ArenaPlannerTest, GraphWithOneOp) { @@ -373,11 +373,7 @@ TEST_F(ArenaPlannerTest, LargerGraphAndStepwiseAllocation) { SetGraph(&graph); auto is_unallocated = [&](int tensor_index) { - // TODO(ahentz): We'd to use nullptr to represent unallocated tensors, but - // the current code still points them all to the beginning fo the alloc - // (that is, zero offset). - // return (*graph.tensors())[tensor_index].data.raw == nullptr; - return GetOffset(tensor_index) == 0; + return (*graph.tensors())[tensor_index].data.raw == nullptr; }; // The allocation plan is made at the beginning and is independent of diff --git a/tensorflow/contrib/lite/interpreter.cc b/tensorflow/contrib/lite/interpreter.cc index a8db149..9dd60ab 100644 --- a/tensorflow/contrib/lite/interpreter.cc +++ b/tensorflow/contrib/lite/interpreter.cc @@ -459,6 +459,9 @@ TfLiteStatus Interpreter::ResizeTensorImpl(TfLiteTensor* tensor, TfLiteIntArrayFree(new_size); return kTfLiteError; } + + // Realloc space for kTfLiteDynamic tensors. + TfLiteTensorRealloc(bytesRequired, tensor); tensor->bytes = bytesRequired; } if (tensor->dims) TfLiteIntArrayFree(tensor->dims); diff --git a/tensorflow/contrib/lite/interpreter.h b/tensorflow/contrib/lite/interpreter.h index c822557..3b077c7 100644 --- a/tensorflow/contrib/lite/interpreter.h +++ b/tensorflow/contrib/lite/interpreter.h @@ -307,7 +307,8 @@ class Interpreter { TfLiteStatus BytesRequired(TfLiteType type, const int* dims, int dims_size, size_t* bytes); - // Request an tensor be resized implementation. + // Request an tensor be resized implementation. If the given tensor is of + // type kTfLiteDynamic it will also be allocated new memory. TfLiteStatus ResizeTensorImpl(TfLiteTensor* tensor, TfLiteIntArray* new_size); // Report a detailed error string (will be printed to stderr). diff --git a/tensorflow/contrib/lite/kernels/conv.cc b/tensorflow/contrib/lite/kernels/conv.cc index 7a45647..1fba3cb 100644 --- a/tensorflow/contrib/lite/kernels/conv.cc +++ b/tensorflow/contrib/lite/kernels/conv.cc @@ -271,10 +271,13 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { free(hwcn_weights->data.raw); hwcn_weights->data.raw = nullptr; } + + // Note that hwcn_weights_status is a kTfLiteDynamic tensor, and + // ResizeTensor will actually allocate space for it. The would be more + // efficient if we placed hwcn_weights_status in the persistent arena. auto hwcn_weights_status = context->ResizeTensor(context, hwcn_weights, hwcn_weights_size); if (hwcn_weights_status != kTfLiteOk) return hwcn_weights_status; - hwcn_weights->data.raw = static_cast(malloc(hwcn_weights->bytes)); // TODO(petewarden): If Resize() is called when the size hasn't actually // changed, this will do extra redundant work.