[nnsuite] TensorFlow Lite-based conv nnkit backend (#431)
author박종현/동작제어Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Wed, 4 Jul 2018 08:46:50 +0000 (17:46 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Wed, 4 Jul 2018 08:46:50 +0000 (17:46 +0900)
This commit introduce a nnkit backend which inferences with sample
(random) conv model in nnsuite.

This implementation may serve as a reference data generator for compiler
test.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
contrib/nnkit/CMakeLists.txt
contrib/nnkit/contrib/CMakeLists.txt [new file with mode: 0644]
contrib/nnkit/contrib/nnsuite/CMakeLists.txt [new file with mode: 0644]
contrib/nnkit/contrib/nnsuite/conv/CMakeLists.txt [new file with mode: 0644]
contrib/nnkit/contrib/nnsuite/conv/tflite/CMakeLists.txt [new file with mode: 0644]
contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.cpp [new file with mode: 0644]
contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.h [new file with mode: 0644]
contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.test.cpp [new file with mode: 0644]
contrib/nnkit/contrib/nnsuite/conv/tflite/Entry.cpp [new file with mode: 0644]

index 230ce83..120d760 100644 (file)
@@ -13,3 +13,4 @@ add_subdirectory(libs)
 add_subdirectory(backends)
 add_subdirectory(actions)
 add_subdirectory(tools)
+add_subdirectory(contrib)
diff --git a/contrib/nnkit/contrib/CMakeLists.txt b/contrib/nnkit/contrib/CMakeLists.txt
new file mode 100644 (file)
index 0000000..5ea6cda
--- /dev/null
@@ -0,0 +1 @@
+add_subdirectories()
diff --git a/contrib/nnkit/contrib/nnsuite/CMakeLists.txt b/contrib/nnkit/contrib/nnsuite/CMakeLists.txt
new file mode 100644 (file)
index 0000000..5ea6cda
--- /dev/null
@@ -0,0 +1 @@
+add_subdirectories()
diff --git a/contrib/nnkit/contrib/nnsuite/conv/CMakeLists.txt b/contrib/nnkit/contrib/nnsuite/conv/CMakeLists.txt
new file mode 100644 (file)
index 0000000..3e0ecbb
--- /dev/null
@@ -0,0 +1 @@
+add_subdirectory(tflite)
diff --git a/contrib/nnkit/contrib/nnsuite/conv/tflite/CMakeLists.txt b/contrib/nnkit/contrib/nnsuite/conv/tflite/CMakeLists.txt
new file mode 100644 (file)
index 0000000..c1f3e92
--- /dev/null
@@ -0,0 +1,23 @@
+if(NOT TARGET nnkit_support_tflite)
+  return()
+endif(NOT TARGET nnkit_support_tflite)
+
+file(GLOB_RECURSE TESTS "*.test.cpp")
+file(GLOB_RECURSE SOURCES "*.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
+
+add_library(nnsuite_conv_tflite SHARED ${SOURCES})
+target_link_libraries(nnsuite_conv_tflite nnsuite_conv)
+target_link_libraries(nnsuite_conv_tflite nnkit_support_tflite)
+
+nncc_find_package(GTest QUIET)
+
+if(NOT GTest_FOUND)
+  return()
+endif(NOT GTest_FOUND)
+
+add_executable(nnsuite_conv_tflite_test ${TESTS})
+target_link_libraries(nnsuite_conv_tflite_test nnsuite_conv_tflite)
+target_link_libraries(nnsuite_conv_tflite_test nncc_foundation)
+target_link_libraries(nnsuite_conv_tflite_test gtest_main)
+add_test(nnsuite_conv_tflite_test nnsuite_conv_tflite_test)
diff --git a/contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.cpp b/contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.cpp
new file mode 100644 (file)
index 0000000..6d57a7e
--- /dev/null
@@ -0,0 +1,121 @@
+#include "ConvBackend.h"
+
+#include <nncc/core/ADT/kernel/Overlay.h>
+#include <nncc/core/ADT/kernel/NHWCLayout.h>
+
+#include <tensorflow/contrib/lite/kernels/register.h>
+#include <tensorflow/contrib/lite/model.h>
+#include <tensorflow/contrib/lite/builtin_op_data.h>
+
+#include <cstdlib>
+
+using namespace ::tflite;
+using namespace ::tflite::ops::builtin;
+
+template<typename T> T *typed_malloc(void)
+{
+  return reinterpret_cast<T *>(malloc(sizeof(T)));
+}
+
+// Comment from 'context.h'
+//
+// Parameters for asymmetric quantization. Quantized values can be converted
+// back to float using:
+//    real_value = scale * (quantized_value - zero_point);
+static inline TfLiteQuantizationParams make_default_quantization(void)
+{
+  return TfLiteQuantizationParams{1.0f, 0};
+}
+
+static inline std::vector<int> as_dims(const nncc::core::ADT::feature::Shape &shape)
+{
+  const int N = 1;
+  const int C = static_cast<int>(shape.depth());
+  const int H = static_cast<int>(shape.height());
+  const int W = static_cast<int>(shape.width());
+
+  return std::vector<int>{N, H, W, C};
+}
+
+static inline std::vector<int> as_dims(const nncc::core::ADT::kernel::Shape &shape)
+{
+  const int N = static_cast<int>(shape.count());
+  const int C = static_cast<int>(shape.depth());
+  const int H = static_cast<int>(shape.height());
+  const int W = static_cast<int>(shape.width());
+
+  return std::vector<int>{N, H, W, C};
+}
+
+ConvBackend::ConvBackend(const nnsuite::conv::Model &model)
+    : _ifm_name{model.ifm_name()}, _ofm_name{model.ofm_name()}
+{
+  using nncc::core::ADT::kernel::Overlay;
+  using nncc::core::ADT::kernel::NHWCLayout;
+
+  using nncc::core::ADT::kernel::make_overlay;
+  using nncc::core::ADT::kernel::num_elements;
+
+  // Set kernel data
+  const auto &ker_shape = model.ker_shape();
+
+  _kernel.resize(num_elements(ker_shape));
+
+  auto kernel_overlay = make_overlay<float, NHWCLayout>(ker_shape, _kernel.data());
+
+  for (uint32_t n = 0; n < ker_shape.count(); ++n)
+  {
+    for (uint32_t ch = 0; ch < ker_shape.depth(); ++ch)
+    {
+      for (uint32_t row = 0; row < ker_shape.height(); ++row)
+      {
+        for (uint32_t col = 0; col < ker_shape.width(); ++col)
+        {
+          kernel_overlay.at(n, ch, row, col) = model.ker_data().at(n, ch, row, col);
+        }
+      }
+    }
+  }
+
+  // Set bias data
+  _bias.resize(ker_shape.count(), 0.0f);
+
+  // Initialize interpreter
+  auto quantization = make_default_quantization();
+
+  // Create Tensors
+  //  0 -> OFM
+  //  1 -> IFM
+  //  2 -> Kernel
+  //  3 -> Bias
+  _interp.AddTensors(4);
+
+  _interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, _ofm_name.c_str(),
+                                       as_dims(model.ofm_shape()), quantization);
+
+  _interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, _ifm_name.c_str(),
+                                       as_dims(model.ifm_shape()), quantization);
+
+  _interp.SetTensorParametersReadOnly(2, kTfLiteFloat32 /* type */, "kernel" /* name */,
+                                      as_dims(model.ker_shape()), quantization,
+                                      reinterpret_cast<const char *>(_kernel.data()),
+                                      _kernel.size() * sizeof(float));
+
+  _interp.SetTensorParametersReadOnly(3, kTfLiteFloat32 /* type */, "bias" /* name */,
+                                      {static_cast<int>(_bias.size())}, quantization,
+                                      reinterpret_cast<const char *>(_bias.data()),
+                                      _bias.size() * sizeof(float));
+
+  auto param = typed_malloc<TfLiteConvParams>();
+
+  param->padding = kTfLitePaddingValid;
+  param->stride_width = 1;
+  param->stride_height = 1;
+  param->activation = kTfLiteActNone;
+
+  _interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
+                                 BuiltinOpResolver().FindOp(BuiltinOperator_CONV_2D));
+
+  _interp.SetInputs({1});
+  _interp.SetOutputs({0});
+}
diff --git a/contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.h b/contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.h
new file mode 100644 (file)
index 0000000..5b11f1a
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __CONV_BACKEND_H__
+#define __CONV_BACKEND_H__
+
+#include <nnsuite/conv/Model.h>
+#include <nnkit/support/tflite/AbstractBackend.h>
+
+#include <vector>
+
+class ConvBackend final : public nnkit::support::tflite::AbstractBackend
+{
+public:
+  explicit ConvBackend(const nnsuite::conv::Model &model);
+
+public:
+  ::tflite::Interpreter &interpreter(void) override { return _interp; }
+
+private:
+  // NOTE tflite interpreter just stores the pointer of its name
+  const std::string _ifm_name;
+  const std::string _ofm_name;
+
+  // NOTE kernel data should live longer than tflite interpreter itself
+  std::vector<float> _kernel;
+
+  // NOTE bias is mssing in conv sample model, but conv op kernel in
+  //      tensorflow lite interpreter does not work without bias.
+  //
+  //      Let's feed zero-bias as a workaround
+  std::vector<float> _bias;
+
+private:
+  ::tflite::Interpreter _interp;
+};
+
+#endif // __BACKEND_H__
diff --git a/contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.test.cpp b/contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.test.cpp
new file mode 100644 (file)
index 0000000..f7be071
--- /dev/null
@@ -0,0 +1,120 @@
+#include "ConvBackend.h"
+
+#include <nncc/core/ADT/kernel/Overlay.h>
+#include <nncc/core/ADT/kernel/NHWCLayout.h>
+
+#include <nncc/core/ADT/tensor/Overlay.h>
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+#include <nncc/core/ADT/tensor/IndexEnumerator.h>
+
+#include <nncc/foundation/math/Float.h>
+
+#include <gtest/gtest.h>
+
+using namespace nncc::core::ADT;
+
+static inline tensor::Shape as_tensor_shape(const feature::Shape &shape)
+{
+  return tensor::Shape{1, shape.height(), shape.width(), shape.depth()};
+}
+
+class TestModel : public nnsuite::conv::Model
+{
+public:
+  TestModel(const std::string &ifm_name, const feature::Shape &ifm_shape,
+            const std::string &ofm_name, const feature::Shape &ofm_shape,
+            const kernel::Shape &ker_shape, const kernel::Layout &ker_layout, float *ker_data)
+    : _ifm_name(ifm_name), _ifm_shape(ifm_shape),
+      _ofm_name(ofm_name), _ofm_shape(ofm_shape),
+      _ker{ker_shape, ker_layout, ker_data}
+  {
+    // DO NOTHING
+  }
+
+public:
+  const std::string &ifm_name(void) const override { return _ifm_name; }
+  const feature::Shape &ifm_shape(void) const override { return _ifm_shape; }
+
+public:
+  const std::string &ofm_name(void) const override { return _ofm_name; }
+  const feature::Shape &ofm_shape(void) const override { return _ofm_shape; }
+
+public:
+  const kernel::Shape &ker_shape(void) const override { return _ker.shape(); }
+  const kernel::Reader<float> &ker_data(void) const override { return _ker; }
+
+private:
+  const std::string _ifm_name;
+  const feature::Shape _ifm_shape;
+
+private:
+  const std::string _ofm_name;
+  const feature::Shape _ofm_shape;
+
+private:
+  const kernel::Overlay<float> _ker;
+};
+
+TEST(CONV_BACKEND, conv_3x3)
+{
+  const std::string ofm_name{"ofm"};
+  const feature::Shape ofm_shape{1, 1, 1};
+  float ofm_data[1] = { 204.0f }; // EXPECTED
+
+  const std::string ifm_name{"ifm"};
+  const feature::Shape ifm_shape{1, 3, 3};
+  float ifm_data[9] = { 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f };
+
+  const kernel::Shape ker_shape{1, 1, 3, 3};
+  float ker_data[9] = { 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f };
+
+  using kernel::NHWCLayout;
+  using tensor::LexicalLayout;
+
+  TestModel model{ifm_name, ifm_shape, ofm_name, ofm_shape, ker_shape, NHWCLayout{}, ker_data};
+
+  ConvBackend backend{model};
+
+  backend.prepare([&] (nnkit::TensorContext &ctx)
+  {
+    ASSERT_EQ(ctx.size(), 1);
+    ASSERT_EQ(ctx.name(0), ifm_name);
+    // TODO Check IFM shape
+
+    auto fill = [&] (const nnkit::TensorContext &, uint32_t, tensor::Accessor<float> &t)
+    {
+      const auto tensor_shape = as_tensor_shape(ifm_shape);
+      const auto overlay = tensor::make_overlay<float, LexicalLayout>(tensor_shape, ifm_data);
+
+      for (tensor::IndexEnumerator e{tensor_shape}; e.valid(); e.advance())
+      {
+        const auto &index = e.current();
+        t.at(index) = overlay.at(index);
+      }
+    };
+
+    ctx.getMutableFloatTensor(0, fill);
+  });
+
+  backend.run();
+
+  backend.teardown([&] (nnkit::TensorContext &ctx)
+  {
+    ASSERT_EQ(ctx.size(), 1);
+    ASSERT_EQ(ctx.name(0), ofm_name);
+
+    auto verify = [&] (const nnkit::TensorContext &, uint32_t, const tensor::Reader<float> &t)
+    {
+      const auto tensor_shape = as_tensor_shape(ofm_shape);
+      const auto overlay = tensor::make_overlay<float, LexicalLayout>(tensor_shape, ofm_data);
+
+      for (tensor::IndexEnumerator e{tensor_shape}; e.valid(); e.advance())
+      {
+        const auto &index = e.current();
+        EXPECT_EQ(t.at(index), overlay.at(index));
+      }
+    };
+
+    ctx.getConstFloatTensor(0, verify);
+  });
+}
diff --git a/contrib/nnkit/contrib/nnsuite/conv/tflite/Entry.cpp b/contrib/nnkit/contrib/nnsuite/conv/tflite/Entry.cpp
new file mode 100644 (file)
index 0000000..f979a7c
--- /dev/null
@@ -0,0 +1,28 @@
+#include "ConvBackend.h"
+
+#include <nnsuite/conv/RandomFixedModel.h>
+
+#include <nnkit/Backend.h>
+#include <nnkit/CmdlineArguments.h>
+
+#include <nncc/foundation/Memory.h>
+
+#include <chrono>
+#include <iostream>
+
+extern "C" std::unique_ptr<nnkit::Backend> make_backend(const nnkit::CmdlineArguments &args)
+{
+  // Set random seed
+  int32_t seed = std::chrono::system_clock::now().time_since_epoch().count();
+
+  if (args.size() > 0)
+  {
+    seed = std::stoi(args.at(0), nullptr, 0);
+  }
+
+  std::cout << "SEED: " << seed << std::endl;
+
+  const nnsuite::conv::RandomFixedModel model{seed};
+
+  return nncc::foundation::make_unique<ConvBackend>(model);
+}