Support boolean tensor input and output (#3808)
author오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Mon, 3 Dec 2018 06:42:42 +0000 (15:42 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Mon, 3 Dec 2018 06:42:42 +0000 (15:42 +0900)
Supprot boolean tensor input and output in nnapi delegate
Use temporary buffer for casting between boolean array and quantized int array

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
libs/support/tflite/src/nnapi_delegate.cpp

index dd3b300..6b882f6 100644 (file)
@@ -993,6 +993,8 @@ TfLiteStatus NNAPIDelegate::BuildGraph(::tflite::Interpreter* interpreter) {
   return kTfLiteOk;
 }
 
+#include <unordered_map>
+
 TfLiteStatus NNAPIDelegate::Invoke(::tflite::Interpreter* interpreter) {
   if (!nn_model_) {
     model_status_ = BuildGraph(interpreter);
@@ -1007,22 +1009,68 @@ TfLiteStatus NNAPIDelegate::Invoke(::tflite::Interpreter* interpreter) {
   ANeuralNetworksExecution* execution = nullptr;
   CHECK_NN(ANeuralNetworksExecution_create(nn_compiled_model_, &execution));
 
+  // Allocate temporary buffer to save casted boolean tensor
+  std::unordered_map<size_t, uint8_t*> input_boolean_tensors;
+  std::unordered_map<size_t, uint8_t*> output_boolean_tensors;
+  for (size_t i = 0; i < interpreter->inputs().size(); i++)
+  {
+    int input = interpreter->inputs()[i];
+    TfLiteTensor* tensor = interpreter->tensor(input);
+    if (tensor->type == kTfLiteBool)
+    {
+      size_t elements = tensor->bytes / sizeof(bool);
+      uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)];
+      input_boolean_tensors[i] = temp_tensor;
+      for (size_t i = 0; i < elements; i++)
+      {
+        temp_tensor[i] = (tensor->data.b[i] ? 0x00 : 0xff);
+      }
+    }
+  }
+  for (size_t i = 0; i < interpreter->outputs().size(); i++)
+  {
+    int output = interpreter->outputs()[i];
+    TfLiteTensor* tensor = interpreter->tensor(output);
+    if (tensor->type == kTfLiteBool)
+    {
+      uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)];
+      output_boolean_tensors[i] = temp_tensor;
+    }
+  }
+
   // Currently perform deep copy of input buffer
   for (size_t i = 0; i < interpreter->inputs().size(); i++) {
     int input = interpreter->inputs()[i];
     // TODO(aselle): Is this what we want or do we want input instead?
     // TODO(aselle): This should be called setInputValue maybe to be cons.
     TfLiteTensor* tensor = interpreter->tensor(input);
-    CHECK_NN(ANeuralNetworksExecution_setInput(
-        execution, i, nullptr, tensor->data.raw, tensor->bytes));
+    if (tensor->type == kTfLiteBool)
+    {
+      CHECK_NN(ANeuralNetworksExecution_setInput(
+          execution, i, nullptr, input_boolean_tensors[input], tensor->bytes * sizeof(uint8_t) / sizeof(bool)));
+    }
+    else
+    {
+      CHECK_NN(ANeuralNetworksExecution_setInput(
+          execution, i, nullptr, tensor->data.raw, tensor->bytes));
+    }
   }
 
   // Tell nn api where to place final data.
   for (size_t i = 0; i < interpreter->outputs().size(); i++) {
     int output = interpreter->outputs()[i];
     TfLiteTensor* tensor = interpreter->tensor(output);
-    CHECK_NN(ANeuralNetworksExecution_setOutput(
-        execution, i, nullptr, tensor->data.raw, tensor->bytes));
+
+    if (tensor->type == kTfLiteBool)
+    {
+      CHECK_NN(ANeuralNetworksExecution_setOutput(
+          execution, i, nullptr, output_boolean_tensors[output], tensor->bytes * sizeof(uint8_t) / sizeof(bool)));
+    }
+    else
+    {
+      CHECK_NN(ANeuralNetworksExecution_setOutput(
+          execution, i, nullptr, tensor->data.raw, tensor->bytes));
+    }
   }
 
   // The state_out of previous invocation need to be mapped to state_in of
@@ -1049,6 +1097,35 @@ TfLiteStatus NNAPIDelegate::Invoke(::tflite::Interpreter* interpreter) {
   ANeuralNetworksEvent_free(event);
   ANeuralNetworksExecution_free(execution);
 
+  // Tell nn api where to place final data.
+  for (size_t i = 0; i < interpreter->inputs().size(); i++) {
+    int input = interpreter->inputs()[i];
+    TfLiteTensor* tensor = interpreter->tensor(input);
+
+    if (tensor->type == kTfLiteBool)
+    {
+      uint8_t* temp_tensor = input_boolean_tensors[input];
+      input_boolean_tensors[input] = nullptr;
+      delete temp_tensor;
+    }
+  }
+  for (size_t i = 0; i < interpreter->outputs().size(); i++) {
+    int output = interpreter->outputs()[i];
+    TfLiteTensor* tensor = interpreter->tensor(output);
+
+    if (tensor->type == kTfLiteBool)
+    {
+      uint8_t* temp_tensor = output_boolean_tensors[output];
+      size_t elements = tensor->bytes / sizeof(bool);
+      for (size_t i = 0; i < elements; i++)
+      {
+        tensor->data.b[i] = ((temp_tensor[i] == 0x00) ? false : true);
+      }
+      output_boolean_tensors[output] = nullptr;
+      delete temp_tensor;
+    }
+  }
+
 #if 0
   printf("From the NN API:\n");
   TfLiteTensor* tensor = interpreter->tensor(interpreter->outputs()[0]);