--- /dev/null
+#include "nnkit/support/tflite/AbstractBackend.h"
+
+#include <tensorflow/contrib/lite/kernels/register.h>
+#include <tensorflow/contrib/lite/model.h>
+
+#include <stdexcept>
+
+namespace
+{
+
+class GenericBackend final : public nnkit::support::tflite::AbstractBackend
+{
+public:
+ GenericBackend(const std::string &path)
+ {
+ ::tflite::StderrReporter error_reporter;
+
+ _model = ::tflite::FlatBufferModel::BuildFromFile(path.c_str(), &error_reporter);
+
+ ::tflite::ops::builtin::BuiltinOpResolver resolver;
+ ::tflite::InterpreterBuilder builder(*_model, resolver);
+
+ if (kTfLiteOk != builder(&_interp))
+ {
+ throw std::runtime_error{"Failed to build a tflite interpreter"};
+ }
+
+ _interp->SetNumThreads(1);
+ }
+
+public:
+ ::tflite::Interpreter &interpreter(void) override { return *_interp; }
+
+private:
+ std::unique_ptr<::tflite::FlatBufferModel> _model;
+ std::unique_ptr<::tflite::Interpreter> _interp;
+};
+
+}
+
+#include <nnkit/CmdlineArguments.h>
+#include <nncc/foundation/Memory.h>
+
+extern "C" std::unique_ptr<nnkit::Backend> make_backend(const nnkit::CmdlineArguments &args)
+{
+ return nncc::foundation::make_unique<GenericBackend>(args.at(0));
+}
--- /dev/null
+if(NOT TARGET nnkit_support_tflite)
+ return()
+endif(NOT TARGET nnkit_support_tflite)
+
+add_library(nnkit_tflite_backend SHARED Backend.cpp)
+target_link_libraries(nnkit_tflite_backend nnkit_support_tflite)
-add_subdirectory(intf)
+add_subdirectories()
--- /dev/null
+add_subdirectories()
--- /dev/null
+nncc_find_package(TensorFlowLite QUIET)
+
+if(NOT TensorFlowLite_FOUND)
+ return()
+endif(NOT TensorFlowLite_FOUND)
+
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_library(nnkit_support_tflite STATIC ${SOURCES})
+set_target_properties(nnkit_support_tflite PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(nnkit_support_tflite PUBLIC include)
+target_link_libraries(nnkit_support_tflite nnkit_intf_backend)
+target_link_libraries(nnkit_support_tflite tensorflowlite)
--- /dev/null
+#ifndef __NNKIT_SUPPORT_TFLITE_ABSTRACT_BACKEND_H__
+#define __NNKIT_SUPPORT_TFLITE_ABSTRACT_BACKEND_H__
+
+#include <nnkit/Backend.h>
+#include <tensorflow/contrib/lite/interpreter.h>
+
+namespace nnkit
+{
+namespace support
+{
+namespace tflite
+{
+
+class AbstractBackend : public nnkit::Backend
+{
+public:
+ virtual ~AbstractBackend() = default;
+
+public:
+ virtual ::tflite::Interpreter &interpreter(void) = 0;
+
+public:
+ void prepare(const std::function<void (TensorContext &)> &f) override;
+ void run(void) override;
+ void teardown(const std::function<void (TensorContext &)> &f) override;
+};
+
+} // namespace tflite
+} // namespace support
+} // namespace nnkit
+
+#endif // __NNKIT_SUPPORT_TFLITE_ABSTRACT_BACKEND_H__
--- /dev/null
+#ifndef __NNKIT_SUPPORT_TFLITE_TENSOR_CONTEXT_H__
+#define __NNKIT_SUPPORT_TFLITE_TENSOR_CONTEXT_H__
+
+#include "nnkit/support/tflite/TensorSet.h"
+
+#include <nnkit/TensorContext.h>
+
+namespace nnkit
+{
+namespace support
+{
+namespace tflite
+{
+
+class TensorContext final : public nnkit::TensorContext
+{
+public:
+ TensorContext(TensorSet &tensors) : _tensors(tensors)
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint32_t size(void) const override
+ {
+ return _tensors.size();
+ }
+
+public:
+ std::string name(uint32_t n) const override
+ {
+ return _tensors.at(n)->name;
+ }
+
+public:
+ nncc::core::ADT::tensor::Shape shape(uint32_t n) const override;
+
+public:
+ // Float (fp32) tensor support
+ bool isFloatTensor(uint32_t n) const override
+ {
+ return _tensors.at(n)->type == kTfLiteFloat32;
+ }
+
+ void getMutableFloatTensor(uint32_t n, const TensorContext::TypedAccessor<float> &f) override;
+ void getConstFloatTensor(uint32_t n, const TensorContext::TypedReader<float> &f) const override;
+
+private:
+ TensorSet &_tensors;
+};
+
+} // namespace tflite
+} // namespace support
+} // namespace nnkit
+
+#endif // __NNKIT_SUPPORT_TFLITE_TENSOR_CONTEXT_H__
--- /dev/null
+#ifndef __NNKIT_SUPPORT_TFLITE_TENSOR_SET_H__
+#define __NNKIT_SUPPORT_TFLITE_TENSOR_SET_H__
+
+#include <tensorflow/contrib/lite/context.h>
+
+#include <cstdint>
+
+namespace nnkit
+{
+namespace support
+{
+namespace tflite
+{
+
+struct TensorSet
+{
+ virtual ~TensorSet() = default;
+
+ virtual uint32_t size(void) const = 0;
+
+ virtual TfLiteTensor *at(uint32_t n) const = 0;
+};
+
+} // namespace tflite
+} // namespace support
+} // namespace nnkit
+
+
+#endif // __NNKIT_SUPPORT_TFLITE_TENSOR_SET_H__
--- /dev/null
+#ifndef __NNKIT_SUPPORT_TFLITE_TENSOR_SETS_H__
+#define __NNKIT_SUPPORT_TFLITE_TENSOR_SETS_H__
+
+#include "nnkit/support/tflite/TensorSet.h"
+
+#include <tensorflow/contrib/lite/interpreter.h>
+
+namespace nnkit
+{
+namespace support
+{
+namespace tflite
+{
+
+class InputTensorSet final : public TensorSet
+{
+public:
+ explicit InputTensorSet(::tflite::Interpreter &interp) : _interp(interp)
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint32_t size(void) const override { return _interp.inputs().size(); }
+
+public:
+ TfLiteTensor *at(uint32_t n) const override
+ {
+ return _interp.tensor(_interp.inputs().at(n));
+ }
+
+private:
+ ::tflite::Interpreter &_interp;
+};
+
+class OutputTensorSet final : public TensorSet
+{
+public:
+ OutputTensorSet(::tflite::Interpreter &interp) : _interp(interp)
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint32_t size(void) const override { return _interp.outputs().size(); }
+
+public:
+ TfLiteTensor *at(uint32_t n) const override
+ {
+ return _interp.tensor(_interp.outputs().at(n));
+ }
+
+private:
+ ::tflite::Interpreter &_interp;
+};
+
+} // namespace tflite
+} // namespace support
+} // namespace nnkit
+
+#endif // __NNKIT_SUPPORT_TFLITE_TENSOR_SETS_H__
--- /dev/null
+#ifndef __NNKIT_SUPPORT_TENSOR_UTILS_H__
+#define __NNKIT_SUPPORT_TENSOR_UTILS_H__
+
+#include <tensorflow/contrib/lite/context.h>
+#include <nncc/core/ADT/tensor/Shape.h>
+
+namespace nnkit
+{
+namespace support
+{
+namespace tflite
+{
+
+nncc::core::ADT::tensor::Shape tensor_shape(const TfLiteTensor *t);
+
+} // namespace tflite
+} // namespace support
+} // namespace nnkit
+
+#endif // __NNKIT_SUPPORT_TFLITE_TENSOR_UTILS_H__
--- /dev/null
+#include "nnkit/support/tflite/AbstractBackend.h"
+#include "nnkit/support/tflite/TensorSets.h"
+#include "nnkit/support/tflite/TensorContext.h"
+
+#include <cassert>
+
+static inline void ensure(TfLiteStatus status)
+{
+ assert(status == kTfLiteOk);
+}
+
+namespace nnkit
+{
+namespace support
+{
+namespace tflite
+{
+
+void AbstractBackend::prepare(const std::function<void (nnkit::TensorContext &)> &f)
+{
+ ensure(interpreter().AllocateTensors());
+
+ InputTensorSet inputs(interpreter());
+ TensorContext ctx(inputs);
+ f(ctx);
+}
+
+void AbstractBackend::run(void)
+{
+ ensure(interpreter().Invoke());
+}
+
+void AbstractBackend::teardown(const std::function<void (nnkit::TensorContext &)> &f)
+{
+ OutputTensorSet outputs(interpreter());
+ TensorContext ctx(outputs);
+ f(ctx);
+}
+
+} // namespace tflite
+} // namespace support
+} // namespace nnkit
--- /dev/null
+#include "nnkit/support/tflite/TensorContext.h"
+#include "nnkit/support/tflite/TensorUtils.h"
+
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+#include <nncc/core/ADT/tensor/Overlay.h>
+
+namespace nnkit
+{
+namespace support
+{
+namespace tflite
+{
+
+nncc::core::ADT::tensor::Shape TensorContext::shape(uint32_t n) const
+{
+ return tensor_shape(_tensors.at(n));
+}
+
+void TensorContext::getMutableFloatTensor(uint32_t n, const nnkit::TensorContext::TypedAccessor<float> &f)
+{
+ using nncc::core::ADT::tensor::LexicalLayout;
+ using nncc::core::ADT::tensor::make_overlay;
+
+ auto t = _tensors.at(n);
+
+ float *data = reinterpret_cast<float *>(t->data.f);
+ auto overlay = make_overlay<float, LexicalLayout>(shape(n), data);
+
+ f(*this, n, overlay);
+}
+
+void TensorContext::getConstFloatTensor(uint32_t n, const nnkit::TensorContext::TypedReader<float> &f) const
+{
+ using nncc::core::ADT::tensor::LexicalLayout;
+ using nncc::core::ADT::tensor::make_overlay;
+
+ auto t = _tensors.at(n);
+
+ float *data = reinterpret_cast<float *>(t->data.f);
+ auto overlay = make_overlay<float, LexicalLayout>(shape(n), data);
+
+ f(*this, n, overlay);
+}
+
+} // namespace tflite
+} // namespace support
+} // namespace nnkit
--- /dev/null
+#include "nnkit/support/tflite/TensorUtils.h"
+
+namespace nnkit
+{
+namespace support
+{
+namespace tflite
+{
+
+nncc::core::ADT::tensor::Shape tensor_shape(const TfLiteTensor *t)
+{
+ nncc::core::ADT::tensor::Shape shape;
+
+ const int rank = t->dims->size;
+
+ shape.resize(rank);
+ for (int axis = 0; axis < rank; ++axis)
+ {
+ shape.dim(axis) = t->dims->data[axis];
+ }
+
+ return shape;
+}
+
+} // namespace tflite
+} // namespace support
+} // namespace nnkit