Relocate nnkit backends for nnsuite conv model (#1828)
author박종현/동작제어Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Thu, 11 Oct 2018 04:48:54 +0000 (13:48 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 11 Oct 2018 04:48:54 +0000 (13:48 +0900)
Now, the implementation of nnkit backends for nnsuite
conv model lives in nnsuite directory (not in nnkit directory).

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
19 files changed:
contrib/nnkit/CMakeLists.txt
contrib/nnkit/contrib/CMakeLists.txt [deleted file]
contrib/nnkit/contrib/nnsuite/CMakeLists.txt [deleted file]
contrib/nnkit/contrib/nnsuite/conv/CMakeLists.txt [deleted file]
contrib/nnsuite/conv/CMakeLists.txt
contrib/nnsuite/conv/model/CMakeLists.txt [new file with mode: 0644]
contrib/nnsuite/conv/model/include/nnsuite/conv/Model.h [moved from contrib/nnsuite/conv/include/nnsuite/conv/Model.h with 100% similarity]
contrib/nnsuite/conv/model/include/nnsuite/conv/RandomModel.h [moved from contrib/nnsuite/conv/include/nnsuite/conv/RandomModel.h with 100% similarity]
contrib/nnsuite/conv/model/src/RandomModel.cpp [moved from contrib/nnsuite/conv/src/RandomModel.cpp with 100% similarity]
contrib/nnsuite/conv/nnkit-caffe/CMakeLists.txt [moved from contrib/nnkit/contrib/nnsuite/conv/caffe/CMakeLists.txt with 100% similarity]
contrib/nnsuite/conv/nnkit-caffe/ConvBackend.cpp [moved from contrib/nnkit/contrib/nnsuite/conv/caffe/ConvBackend.cpp with 100% similarity]
contrib/nnsuite/conv/nnkit-caffe/ConvBackend.h [moved from contrib/nnkit/contrib/nnsuite/conv/caffe/ConvBackend.h with 100% similarity]
contrib/nnsuite/conv/nnkit-caffe/ConvBackend.test.cpp [moved from contrib/nnkit/contrib/nnsuite/conv/caffe/ConvBackend.test.cpp with 84% similarity]
contrib/nnsuite/conv/nnkit-caffe/Entry.cpp [moved from contrib/nnkit/contrib/nnsuite/conv/caffe/Entry.cpp with 100% similarity]
contrib/nnsuite/conv/nnkit-tflite/CMakeLists.txt [moved from contrib/nnkit/contrib/nnsuite/conv/tflite/CMakeLists.txt with 100% similarity]
contrib/nnsuite/conv/nnkit-tflite/ConvBackend.cpp [moved from contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.cpp with 83% similarity]
contrib/nnsuite/conv/nnkit-tflite/ConvBackend.h [moved from contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.h with 100% similarity]
contrib/nnsuite/conv/nnkit-tflite/ConvBackend.test.cpp [moved from contrib/nnkit/contrib/nnsuite/conv/tflite/ConvBackend.test.cpp with 84% similarity]
contrib/nnsuite/conv/nnkit-tflite/Entry.cpp [moved from contrib/nnkit/contrib/nnsuite/conv/tflite/Entry.cpp with 100% similarity]

index 558d393..918d517 100644 (file)
@@ -2,4 +2,3 @@ add_subdirectory(libs)
 add_subdirectory(backends)
 add_subdirectory(actions)
 add_subdirectory(tools)
-add_subdirectory(contrib)
diff --git a/contrib/nnkit/contrib/CMakeLists.txt b/contrib/nnkit/contrib/CMakeLists.txt
deleted file mode 100644 (file)
index 5ea6cda..0000000
+++ /dev/null
@@ -1 +0,0 @@
-add_subdirectories()
diff --git a/contrib/nnkit/contrib/nnsuite/CMakeLists.txt b/contrib/nnkit/contrib/nnsuite/CMakeLists.txt
deleted file mode 100644 (file)
index 5ea6cda..0000000
+++ /dev/null
@@ -1 +0,0 @@
-add_subdirectories()
diff --git a/contrib/nnkit/contrib/nnsuite/conv/CMakeLists.txt b/contrib/nnkit/contrib/nnsuite/conv/CMakeLists.txt
deleted file mode 100644 (file)
index b897d12..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-add_subdirectory(tflite)
-add_subdirectory(caffe)
index a2c28ee..a120de2 100644 (file)
@@ -1,6 +1,3 @@
-file(GLOB_RECURSE SOURCES "src/*.cpp")
-
-add_library(nnsuite_conv STATIC ${SOURCES})
-set_target_properties(nnsuite_conv PROPERTIES POSITION_INDEPENDENT_CODE ON)
-target_include_directories(nnsuite_conv PUBLIC include)
-target_link_libraries(nnsuite_conv PUBLIC nncc_core)
+add_subdirectory(model)
+add_subdirectory(nnkit-caffe)
+add_subdirectory(nnkit-tflite)
diff --git a/contrib/nnsuite/conv/model/CMakeLists.txt b/contrib/nnsuite/conv/model/CMakeLists.txt
new file mode 100644 (file)
index 0000000..a2c28ee
--- /dev/null
@@ -0,0 +1,6 @@
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+
+add_library(nnsuite_conv STATIC ${SOURCES})
+set_target_properties(nnsuite_conv PROPERTIES POSITION_INDEPENDENT_CODE ON)
+target_include_directories(nnsuite_conv PUBLIC include)
+target_link_libraries(nnsuite_conv PUBLIC nncc_core)
@@ -37,9 +37,8 @@ public:
   TestModel(const std::string &ifm_name, const feature::Shape &ifm_shape,
             const std::string &ofm_name, const feature::Shape &ofm_shape,
             const kernel::Shape &ker_shape, const kernel::Layout &ker_layout, float *ker_data)
-    : _ifm_name(ifm_name), _ifm_shape(ifm_shape),
-      _ofm_name(ofm_name), _ofm_shape(ofm_shape),
-      _ker{ker_shape, ker_layout, ker_data}
+      : _ifm_name(ifm_name), _ifm_shape(ifm_shape), _ofm_name(ofm_name), _ofm_shape(ofm_shape),
+        _ker{ker_shape, ker_layout, ker_data}
   {
     // DO NOTHING
   }
@@ -72,14 +71,14 @@ TEST(CONV_BACKEND, conv_3x3)
 {
   const std::string ofm_name{"ofm"};
   const feature::Shape ofm_shape{1, 1, 1};
-  float ofm_data[1] = { 204.0f }; // EXPECTED
+  float ofm_data[1] = {204.0f}; // EXPECTED
 
   const std::string ifm_name{"ifm"};
   const feature::Shape ifm_shape{1, 3, 3};
-  float ifm_data[9] = { 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f };
+  float ifm_data[9] = {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f};
 
   const kernel::Shape ker_shape{1, 1, 3, 3};
-  float ker_data[9] = { 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f };
+  float ker_data[9] = {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f};
 
   using kernel::NCHWLayout;
   using tensor::LexicalLayout;
@@ -88,14 +87,12 @@ TEST(CONV_BACKEND, conv_3x3)
 
   auto backend = ConvBackend::create(model);
 
-  backend->prepare([&] (nnkit::TensorContext &ctx)
-  {
+  backend->prepare([&](nnkit::TensorContext &ctx) {
     ASSERT_EQ(ctx.size(), 1);
     ASSERT_EQ(ctx.name(0), ifm_name);
     // TODO Check IFM shape
 
-    auto fill = [&] (const nnkit::TensorContext &, uint32_t, tensor::Accessor<float> &t)
-    {
+    auto fill = [&](const nnkit::TensorContext &, uint32_t, tensor::Accessor<float> &t) {
       const auto tensor_shape = morph::caffe::as_tensor_shape(ifm_shape);
       const auto overlay = tensor::make_overlay<float, LexicalLayout>(tensor_shape, ifm_data);
 
@@ -111,13 +108,11 @@ TEST(CONV_BACKEND, conv_3x3)
 
   backend->run();
 
-  backend->teardown([&] (nnkit::TensorContext &ctx)
-  {
+  backend->teardown([&](nnkit::TensorContext &ctx) {
     ASSERT_EQ(ctx.size(), 1);
     ASSERT_EQ(ctx.name(0), ofm_name);
 
-    auto verify = [&] (const nnkit::TensorContext &, uint32_t, const tensor::Reader<float> &t)
-    {
+    auto verify = [&](const nnkit::TensorContext &, uint32_t, const tensor::Reader<float> &t) {
       const auto tensor_shape = morph::caffe::as_tensor_shape(ofm_shape);
       const auto overlay = tensor::make_overlay<float, LexicalLayout>(tensor_shape, ofm_data);
 
@@ -34,7 +34,7 @@ using namespace ::tflite::ops::builtin;
  *
  * NOTE This function throws std::bac_alloc exception on allocation failure
  */
-template<typename T> T *typed_malloc(void)
+template <typename T> T *typed_malloc(void)
 {
   if (auto res = reinterpret_cast<T *>(malloc(sizeof(T))))
   {
@@ -122,15 +122,13 @@ ConvBackend::ConvBackend(const nnsuite::conv::Model &model)
   _interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, _ifm_name.c_str(),
                                        as_dims(model.ifm_shape()), quantization);
 
-  _interp.SetTensorParametersReadOnly(2, kTfLiteFloat32 /* type */, "kernel" /* name */,
-                                      as_dims(model.ker_shape()), quantization,
-                                      reinterpret_cast<const char *>(_kernel.data()),
-                                      _kernel.size() * sizeof(float));
+  _interp.SetTensorParametersReadOnly(
+      2, kTfLiteFloat32 /* type */, "kernel" /* name */, as_dims(model.ker_shape()), quantization,
+      reinterpret_cast<const char *>(_kernel.data()), _kernel.size() * sizeof(float));
 
-  _interp.SetTensorParametersReadOnly(3, kTfLiteFloat32 /* type */, "bias" /* name */,
-                                      {static_cast<int>(_bias.size())}, quantization,
-                                      reinterpret_cast<const char *>(_bias.data()),
-                                      _bias.size() * sizeof(float));
+  _interp.SetTensorParametersReadOnly(
+      3, kTfLiteFloat32 /* type */, "bias" /* name */, {static_cast<int>(_bias.size())},
+      quantization, reinterpret_cast<const char *>(_bias.data()), _bias.size() * sizeof(float));
 
   auto param = typed_malloc<TfLiteConvParams>();
 
@@ -140,7 +138,7 @@ ConvBackend::ConvBackend(const nnsuite::conv::Model &model)
   param->activation = kTfLiteActNone;
 
   _interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
-                                 BuiltinOpResolver().FindOp(BuiltinOperator_CONV_2D));
+                                BuiltinOpResolver().FindOp(BuiltinOperator_CONV_2D));
 
   _interp.SetInputs({1});
   _interp.SetOutputs({0});
@@ -40,9 +40,8 @@ public:
   TestModel(const std::string &ifm_name, const feature::Shape &ifm_shape,
             const std::string &ofm_name, const feature::Shape &ofm_shape,
             const kernel::Shape &ker_shape, const kernel::Layout &ker_layout, float *ker_data)
-    : _ifm_name(ifm_name), _ifm_shape(ifm_shape),
-      _ofm_name(ofm_name), _ofm_shape(ofm_shape),
-      _ker{ker_shape, ker_layout, ker_data}
+      : _ifm_name(ifm_name), _ifm_shape(ifm_shape), _ofm_name(ofm_name), _ofm_shape(ofm_shape),
+        _ker{ker_shape, ker_layout, ker_data}
   {
     // DO NOTHING
   }
@@ -75,14 +74,14 @@ TEST(CONV_BACKEND, conv_3x3)
 {
   const std::string ofm_name{"ofm"};
   const feature::Shape ofm_shape{1, 1, 1};
-  float ofm_data[1] = { 204.0f }; // EXPECTED
+  float ofm_data[1] = {204.0f}; // EXPECTED
 
   const std::string ifm_name{"ifm"};
   const feature::Shape ifm_shape{1, 3, 3};
-  float ifm_data[9] = { 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f };
+  float ifm_data[9] = {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f};
 
   const kernel::Shape ker_shape{1, 1, 3, 3};
-  float ker_data[9] = { 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f };
+  float ker_data[9] = {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f};
 
   using kernel::NHWCLayout;
   using tensor::LexicalLayout;
@@ -91,14 +90,12 @@ TEST(CONV_BACKEND, conv_3x3)
 
   ConvBackend backend{model};
 
-  backend.prepare([&] (nnkit::TensorContext &ctx)
-  {
+  backend.prepare([&](nnkit::TensorContext &ctx) {
     ASSERT_EQ(ctx.size(), 1);
     ASSERT_EQ(ctx.name(0), ifm_name);
     // TODO Check IFM shape
 
-    auto fill = [&] (const nnkit::TensorContext &, uint32_t, tensor::Accessor<float> &t)
-    {
+    auto fill = [&](const nnkit::TensorContext &, uint32_t, tensor::Accessor<float> &t) {
       const auto tensor_shape = as_tensor_shape(ifm_shape);
       const auto overlay = tensor::make_overlay<float, LexicalLayout>(tensor_shape, ifm_data);
 
@@ -114,13 +111,11 @@ TEST(CONV_BACKEND, conv_3x3)
 
   backend.run();
 
-  backend.teardown([&] (nnkit::TensorContext &ctx)
-  {
+  backend.teardown([&](nnkit::TensorContext &ctx) {
     ASSERT_EQ(ctx.size(), 1);
     ASSERT_EQ(ctx.name(0), ofm_name);
 
-    auto verify = [&] (const nnkit::TensorContext &, uint32_t, const tensor::Reader<float> &t)
-    {
+    auto verify = [&](const nnkit::TensorContext &, uint32_t, const tensor::Reader<float> &t) {
       const auto tensor_shape = as_tensor_shape(ofm_shape);
       const auto overlay = tensor::make_overlay<float, LexicalLayout>(tensor_shape, ofm_data);