--- /dev/null
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+
+#include <iostream>
+#include <chrono>
+#include <algorithm>
+
+using namespace tflite;
+using namespace tflite::ops::builtin;
+
+inline void check(const TfLiteStatus &status) { assert(status != kTfLiteError); }
+
+std::unique_ptr<Interpreter> build_interpreter(const FlatBufferModel &model, bool use_nnapi)
+{
+ std::unique_ptr<Interpreter> interpreter;
+
+ BuiltinOpResolver resolver;
+
+ InterpreterBuilder builder(model, resolver);
+
+ check(builder(&interpreter));
+
+ interpreter->UseNNAPI(use_nnapi);
+
+ return std::move(interpreter);
+}
+
+void initialize_interpreter(Interpreter &interpreter)
+{
+ check(interpreter.AllocateTensors());
+
+ // TODO Find a better way to initialize tensors
+ for (const auto &id : interpreter.inputs())
+ {
+ auto tensor = interpreter.tensor(id);
+ auto ptr = tensor->data.uint8;
+ auto len = tensor->bytes;
+
+ for (size_t ind = 0; ind < len; ++ind)
+ {
+ ptr[ind] = ind;
+ }
+ }
+}
+
+void invoke_interpreter(Interpreter &interpreter)
+{
+ check(interpreter.Invoke());
+}
+
+template <typename T> bool operator==(const std::vector<T> &lhs, const std::vector<T> &rhs)
+{
+ if (lhs.size() != rhs.size())
+ {
+ return false;
+ }
+
+ for (size_t ind = 0; ind < lhs.size(); ++ind)
+ {
+ if (lhs.at(ind) != rhs.at(ind))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool operator==(const TfLiteTensor &lhs, const TfLiteTensor &rhs)
+{
+ if (lhs.bytes != rhs.bytes)
+ {
+ return false;
+ }
+
+ for (size_t off = 0; off < lhs.bytes; ++off)
+ {
+ if (lhs.data.uint8[off] != rhs.data.uint8[off])
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+int main(const int argc, char **argv)
+{
+ const auto filename = argv[1];
+
+ StderrReporter error_reporter;
+
+ auto model = FlatBufferModel::BuildFromFile(filename, &error_reporter);
+
+ std::cout << "[NNAPI TEST] Run T/F Lite Interpreter without NNAPI" << std::endl;
+
+ std::unique_ptr<Interpreter> pure = build_interpreter(*model, false);
+ initialize_interpreter(*pure);
+ invoke_interpreter(*pure);
+
+ std::cout << "[NNAPI TEST] Run T/F Lite Interpreter with NNAPI" << std::endl;
+
+ std::unique_ptr<Interpreter> delegated = build_interpreter(*model, true);
+ initialize_interpreter(*delegated);
+ invoke_interpreter(*delegated);
+
+ std::cout << "[NNAPI TEST] Compare the result" << std::endl;
+
+ assert(pure->inputs() == delegated->inputs());
+ assert(pure->outputs() == delegated->outputs());
+
+ for (const auto &id : pure->outputs())
+ {
+ std::cout << " Compare tensor #" << id << std::endl;
+ assert(*(pure->tensor(id)) == *(delegated->tensor(id)));
+ }
+
+ std::cout << "[NNAPI TEST] PASSED" << std::endl;
+
+ return 0;
+}