return kTfLiteOk;
}
+#include <unordered_map>
+
TfLiteStatus NNAPIDelegate::Invoke(::tflite::Interpreter* interpreter) {
if (!nn_model_) {
model_status_ = BuildGraph(interpreter);
ANeuralNetworksExecution* execution = nullptr;
CHECK_NN(ANeuralNetworksExecution_create(nn_compiled_model_, &execution));
+ // Allocate temporary buffer to save casted boolean tensor
+ std::unordered_map<size_t, uint8_t*> input_boolean_tensors;
+ std::unordered_map<size_t, uint8_t*> output_boolean_tensors;
+ for (size_t i = 0; i < interpreter->inputs().size(); i++)
+ {
+ int input = interpreter->inputs()[i];
+ TfLiteTensor* tensor = interpreter->tensor(input);
+ if (tensor->type == kTfLiteBool)
+ {
+ size_t elements = tensor->bytes / sizeof(bool);
+ uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)];
+ input_boolean_tensors[i] = temp_tensor;
+ for (size_t i = 0; i < elements; i++)
+ {
+ temp_tensor[i] = (tensor->data.b[i] ? 0x00 : 0xff);
+ }
+ }
+ }
+ for (size_t i = 0; i < interpreter->outputs().size(); i++)
+ {
+ int output = interpreter->outputs()[i];
+ TfLiteTensor* tensor = interpreter->tensor(output);
+ if (tensor->type == kTfLiteBool)
+ {
+ uint8_t* temp_tensor = new uint8_t[tensor->bytes / sizeof(bool)];
+ output_boolean_tensors[i] = temp_tensor;
+ }
+ }
+
// Currently perform deep copy of input buffer
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input = interpreter->inputs()[i];
// TODO(aselle): Is this what we want or do we want input instead?
// TODO(aselle): This should be called setInputValue maybe to be cons.
TfLiteTensor* tensor = interpreter->tensor(input);
- CHECK_NN(ANeuralNetworksExecution_setInput(
- execution, i, nullptr, tensor->data.raw, tensor->bytes));
+ if (tensor->type == kTfLiteBool)
+ {
+ CHECK_NN(ANeuralNetworksExecution_setInput(
+ execution, i, nullptr, input_boolean_tensors[input], tensor->bytes * sizeof(uint8_t) / sizeof(bool)));
+ }
+ else
+ {
+ CHECK_NN(ANeuralNetworksExecution_setInput(
+ execution, i, nullptr, tensor->data.raw, tensor->bytes));
+ }
}
// Tell nn api where to place final data.
for (size_t i = 0; i < interpreter->outputs().size(); i++) {
int output = interpreter->outputs()[i];
TfLiteTensor* tensor = interpreter->tensor(output);
- CHECK_NN(ANeuralNetworksExecution_setOutput(
- execution, i, nullptr, tensor->data.raw, tensor->bytes));
+
+ if (tensor->type == kTfLiteBool)
+ {
+ CHECK_NN(ANeuralNetworksExecution_setOutput(
+ execution, i, nullptr, output_boolean_tensors[output], tensor->bytes * sizeof(uint8_t) / sizeof(bool)));
+ }
+ else
+ {
+ CHECK_NN(ANeuralNetworksExecution_setOutput(
+ execution, i, nullptr, tensor->data.raw, tensor->bytes));
+ }
}
// The state_out of previous invocation need to be mapped to state_in of
ANeuralNetworksEvent_free(event);
ANeuralNetworksExecution_free(execution);
+ // Tell nn api where to place final data.
+ for (size_t i = 0; i < interpreter->inputs().size(); i++) {
+ int input = interpreter->inputs()[i];
+ TfLiteTensor* tensor = interpreter->tensor(input);
+
+ if (tensor->type == kTfLiteBool)
+ {
+ uint8_t* temp_tensor = input_boolean_tensors[input];
+ input_boolean_tensors[input] = nullptr;
+ delete temp_tensor;
+ }
+ }
+ for (size_t i = 0; i < interpreter->outputs().size(); i++) {
+ int output = interpreter->outputs()[i];
+ TfLiteTensor* tensor = interpreter->tensor(output);
+
+ if (tensor->type == kTfLiteBool)
+ {
+ uint8_t* temp_tensor = output_boolean_tensors[output];
+ size_t elements = tensor->bytes / sizeof(bool);
+ for (size_t i = 0; i < elements; i++)
+ {
+ tensor->data.b[i] = ((temp_tensor[i] == 0x00) ? false : true);
+ }
+ output_boolean_tensors[output] = nullptr;
+ delete temp_tensor;
+ }
+ }
+
#if 0
printf("From the NN API:\n");
TfLiteTensor* tensor = interpreter->tensor(interpreter->outputs()[0]);