[Filter/TF-Lite] upload the tensorflow lite core
authorHyoungjooAhn <hello.ahn@samsung.com>
Thu, 5 Jul 2018 07:54:20 +0000 (16:54 +0900)
committer함명주/동작제어Lab(SR)/Principal Engineer/삼성전자 <myungjoo.ham@samsung.com>
Fri, 6 Jul 2018 02:47:39 +0000 (11:47 +0900)
some of main functions are uploaded

Signed-off-by: HyoungjooAhn <hello.ahn@samsung.com>
.gitignore
debian/rules
gst/tensor_filter/CMakeLists.txt
gst/tensor_filter/tensor_filter.c
gst/tensor_filter/tensor_filter_tensorflow_lite.c
gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc [new file with mode: 0644]
gst/tensor_filter/tensor_filter_tensorflow_lite_core.h [new file with mode: 0644]
packaging/nnstreamer.spec

index 84425f9..363ea04 100644 (file)
@@ -12,6 +12,7 @@
 # vi files
 *.h~
 *.c~
+*.cc~
 *.swp
 
 # binary files
index 016bea1..85dc28f 100755 (executable)
@@ -16,3 +16,5 @@
 %:
        dh $@ --buildsystem=cmake --builddirectory=build
 
+override_dh_auto_configure:
+       dh_auto_configure -- -DDISABLE_TENSORFLOW_LITE=ON
index c0e9fc2..829a3cd 100644 (file)
@@ -1,5 +1,7 @@
 CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
 
+OPTION(DISBALE_TENSORFLOW_LITE "Disable tensorflow-lite support" OFF)
+
 ADD_LIBRARY(tensor_filter SHARED
        tensor_filter.c
        tensor_filter_tensorflow_lite.c
@@ -10,8 +12,27 @@ TARGET_LINK_LIBRARIES(tensor_filter dl ${pkgs_LIBRARIES})
 TARGET_INCLUDE_DIRECTORIES(tensor_filter PUBLIC ${pkgs_INCLUDE_DIRS})
 TARGET_COMPILE_OPTIONS(tensor_filter PUBLIC ${pkgs_CFLAGS_OTHER})
 
-INSTALL(TARGETS tensor_filter
-       RUNTIME DESTINATION ${EXEC_PREFIX}
-       LIBRARY DESTINATION ${LIB_INSTALL_DIR}
-       ARCHIVE DESTINATION ${LIB_INSTALL_DIR}
-       )
+# check whether TENSORFLOW_LITE is available.
+# DISABLE_TENSORFLOW_LITE is defined at /debian/rules according to the build environment
+IF(DISABLE_TENSORFLOW_LITE) # NOT AVAILABE
+       INSTALL(TARGETS tensor_filter
+               RUNTIME DESTINATION ${EXEC_PREFIX}
+               LIBRARY DESTINATION ${LIB_INSTALL_DIR}
+               ARCHIVE DESTINATION ${LIB_INSTALL_DIR}
+               )
+ELSE(DISABLE_TENSORFLOW_LITE) # AVAILABE
+       ADD_LIBRARY(tensor_filter_tflitecore SHARED
+               tensor_filter_tensorflow_lite_core.cc
+               )
+
+       TARGET_LINK_LIBRARIES(tensor_filter_tflitecore tensor_filter ${pkgs_LIBRARIES} tensorflow-lite)
+       TARGET_INCLUDE_DIRECTORIES(tensor_filter_tflitecore PUBLIC ${pkgs_INCLUDE_DIRS})
+       TARGET_COMPILE_OPTIONS(tensor_filter_tflitecore PUBLIC ${pkgs_CFLAGS_OTHER})
+
+       INSTALL(TARGETS tensor_filter tensor_filter_tflitecore
+               RUNTIME DESTINATION ${EXEC_PREFIX}
+               LIBRARY DESTINATION ${LIB_INSTALL_DIR}
+               ARCHIVE DESTINATION ${LIB_INSTALL_DIR}
+               )
+ENDIF(DISABLE_TENSORFLOW_LITE)
+
index 4c4899c..02eb026 100644 (file)
@@ -86,7 +86,12 @@ GstTensor_Filter_Framework *tensor_filter_supported[] = {
   [_T_F_UNDEFINED] = NULL,
 
   [_T_F_CUSTOM] = &NNS_support_custom,
+
+#ifdef DISABLE_TENSORFLOW_LITE
+  [_T_F_TENSORFLOW_LITE] = NULL,
+#else
   [_T_F_TENSORFLOW_LITE] = &NNS_support_tensorflow_lite,
+#endif
   [_T_F_TENSORFLOW] = NULL,
   [_T_F_CAFFE2] = NULL,
 
index 703cdcb..93d2f32 100644 (file)
@@ -53,6 +53,7 @@
  */
 
 #include "tensor_filter.h"
+#include "tensor_filter_tensorflow_lite_core.h"
 #include <glib.h>
 
 /**
diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.cc
new file mode 100644 (file)
index 0000000..571bf61
--- /dev/null
@@ -0,0 +1,112 @@
+/**
+ * Copyright (C) 2017 - 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * PROPRIETARY/CONFIDENTIAL
+ *
+ * This software is the confidential and proprietary information of
+ * SAMSUNG ELECTRONICS ("Confidential Information"). You shall not
+ * disclose such Confidential Information and shall use it only in
+ * accordance with the terms of the license agreement you entered
+ * into with SAMSUNG ELECTRONICS.  SAMSUNG make no representations
+ * or warranties about the suitability of the software, either
+ * express or implied, including but not limited to the implied
+ * warranties of merchantability, fitness for a particular purpose,
+ * or non-infringement. SAMSUNG shall not be liable for any damages
+ * suffered by licensee as a result of using, modifying or distributing
+ * this software or its derivatives.
+ */
+
+/**
+ * @file   tensor_filter_tensorflow_lite_core.cc
+ * @author HyoungJoo Ahn <hello.ahn@samsung.com>
+ * @date   7/5/2018
+ * @brief  connection with tflite libraries.
+ *
+ * @bug     No know bugs.
+ * @todo    Invoke() should be implemented.
+ * @todo    If it is required, class will be implemented as a singleton.
+ */
+#include "tensor_filter_tensorflow_lite_core.h"
+
+/**
+ * @brief      call the creator of TFLiteCore class.
+ * @param      _model_path     : the logical path to '{model_name}.tffile' file
+ * @return     TFLiteCore class
+ */
+extern void *
+tflite_core_new (char *_model_path)
+{
+  return new TFLiteCore (_model_path);
+}
+
+/**
+ * @brief      delete the TFLiteCore class.
+ * @param      _tflite : the class object
+ * @return     Nothing
+ */
+extern void
+tflite_core_delete (void *tflite)
+{
+  TFLiteCore *c = (TFLiteCore *) tflite;
+  delete c;
+}
+
+/**
+ * @brief      get model path
+ * @param      _tflite : the class object
+ * @return     model path
+ */
+extern char *
+tflite_core_getModelPath (void *tflite)
+{
+  TFLiteCore *c = (TFLiteCore *) tflite;
+  return c->getModelPath ();
+}
+
+/**
+ * @brief      get the Dimension of Input Tensor of model
+ * @param      _tflite : the class object
+ * @return     the input dimension
+ */
+int *
+tflite_core_getInputDim (void *tflite)
+{
+  TFLiteCore *c = (TFLiteCore *) tflite;
+  return c->getInputTensorDim ();
+}
+
+/**
+ * @brief      get the Dimension of Output Tensor of model
+ * @param      _tflite : the class object
+ * @return     the output dimension
+ */
+int *
+tflite_core_getOutputDim (void *tflite)
+{
+  TFLiteCore *c = (TFLiteCore *) tflite;
+  return c->getOutputTensorDim ();
+}
+
+/**
+ * @brief      get the size of Input Tensor of model
+ * @param      _tflite : the class object
+ * @return     how many input tensors are
+ */
+int
+tflite_core_getInputSize (void *tflite)
+{
+  TFLiteCore *c = (TFLiteCore *) tflite;
+  return c->getInputTensorSize ();
+}
+
+/**
+ * @brief      get the size of Output Tensor of model
+ * @param      _tflite : the class object
+ * @return     how many output tensors are
+ */
+int
+tflite_core_getOutputSize (void *tflite)
+{
+  TFLiteCore *c = (TFLiteCore *) tflite;
+  return c->getOutputTensorSize ();
+}
diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite_core.h b/gst/tensor_filter/tensor_filter_tensorflow_lite_core.h
new file mode 100644 (file)
index 0000000..1a7e41b
--- /dev/null
@@ -0,0 +1,182 @@
+/**
+ * Copyright (C) 2017 - 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ *
+ * PROPRIETARY/CONFIDENTIAL
+ *
+ * This software is the confidential and proprietary information of
+ * SAMSUNG ELECTRONICS ("Confidential Information"). You shall not
+ * disclose such Confidential Information and shall use it only in
+ * accordance with the terms of the license agreement you entered
+ * into with SAMSUNG ELECTRONICS.  SAMSUNG make no representations
+ * or warranties about the suitability of the software, either
+ * express or implied, including but not limited to the implied
+ * warranties of merchantability, fitness for a particular purpose,
+ * or non-infringement. SAMSUNG shall not be liable for any damages
+ * suffered by licensee as a result of using, modifying or distributing
+ * this software or its derivatives.
+ */
+
+/**
+ * @file   tensor_filter_tensorflow_lite_core.h
+ * @author HyoungJoo Ahn <hello.ahn@samsung.com>
+ * @date   7/5/2018
+ * @brief  connection with tflite libraries.
+ *
+ * @bug     No know bugs.
+ * @todo    Invoke() should be implemented.
+ * @todo    If it is required, class will be implemented as a singleton.
+ */
+#ifndef TENSOR_FILTER_TENSORFLOW_LITE_H
+#define TENSOR_FILTER_TENSORFLOW_LITE_H
+
+#ifdef __cplusplus
+#include <iostream>
+
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/optional_debug_tools.h"
+#include "tensorflow/contrib/lite/string_util.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+
+/**
+ * @brief ring cache structure
+ */
+class TFLiteCore
+{
+public:
+  /**
+   * member functions.
+   */
+  TFLiteCore (char *_model_path);
+  char *getModelPath ()
+  {
+    return model_path;
+  }
+  int loadModel ();
+  const char *getInputTensorName ();
+  const char *getOutputTensorName ();
+  int getInputTensorSize ()
+  {
+    return input_size;
+  }
+  int getOutputTensorSize ()
+  {
+    return output_size;
+  }
+  int *getInputTensorDim ();
+  int *getOutputTensorDim ();
+
+private:
+  /**
+   * member variables.
+   */
+  char *model_path;
+  int tensor_size;
+  int node_size;
+  int input_size;
+  int output_size;
+  const char *input_name;
+  const char *output_name;
+  int input_idx;
+  int output_idx;
+  std::unique_ptr < tflite::Interpreter > interpreter;
+  std::unique_ptr < tflite::FlatBufferModel > model;
+};
+
+/**
+ * @brief      TFLiteCore creator
+ * @param      _model_path     : the logical path to '{model_name}.tffile' file
+ * @note       the model of _model_path will be loaded simultaneously
+ * @return     Nothing
+ */
+TFLiteCore::TFLiteCore (char *_model_path)
+{
+  model_path = _model_path;
+  loadModel ();
+}
+
+/**
+ * @brief      load the tflite model
+ * @note       the model will be loaded
+ * @return     Nothing
+ */
+int
+TFLiteCore::loadModel ()
+{
+  if (!interpreter) {
+    model =
+        std::unique_ptr < tflite::FlatBufferModel >
+        (tflite::FlatBufferModel::BuildFromFile (model_path));
+    if (!model) {
+      std::cout << "Failed to mmap model" << std::endl;
+      return -1;
+    }
+    model->error_reporter ();
+    std::cout << "model loaded" << std::endl;
+
+    tflite::ops::builtin::BuiltinOpResolver resolver;
+    tflite::InterpreterBuilder (*model, resolver) (&interpreter);
+    if (!interpreter) {
+      std::cout << "Failed to construct interpreter" << std::endl;
+      return -2;
+    }
+  }
+  // fill class parameters
+  tensor_size = interpreter->tensors_size ();
+  node_size = interpreter->nodes_size ();
+  input_size = interpreter->inputs ().size ();
+  input_name = interpreter->GetInputName (0);
+  output_size = interpreter->outputs ().size ();
+  output_name = interpreter->GetOutputName (0);
+
+  int t_size = interpreter->tensors_size ();
+  for (int i = 0; i < t_size; i++) {
+    if (strcmp (interpreter->tensor (i)->name,
+            interpreter->GetInputName (0)) == 0)
+      input_idx = i;
+    if (strcmp (interpreter->tensor (i)->name,
+            interpreter->GetOutputName (0)) == 0)
+      output_idx = i;
+  }
+  return 1;
+}
+
+/**
+ * @brief      return the Dimension of Input Tensor.
+ * @return the array of integer.
+ */
+int *
+TFLiteCore::getInputTensorDim ()
+{
+  return interpreter->tensor (input_idx)->dims->data;
+}
+
+/**
+ * @brief      return the Dimension of Output Tensor.
+ * @return the array of integer.
+ */
+int *
+TFLiteCore::getOutputTensorDim ()
+{
+  return interpreter->tensor (output_idx)->dims->data;
+}
+
+/**
+ * @brief      the definition of functions to be used at C files.
+ */
+extern "C"
+{
+#endif
+
+  extern void *tflite_core_new (char *_model_path);
+  extern void tflite_core_delete (void *tflite);
+  extern char *tflite_core_getModelPath (void *tflite);
+  extern int *tflite_core_getInputDim (void *tflite);
+  extern int *tflite_core_getOutputDim (void *tflite);
+  extern int tflite_core_getInputSize (void *tflite);
+  extern int tflite_core_getOutputSize (void *tflite);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
index 87589e0..7c991af 100644 (file)
@@ -30,6 +30,8 @@ BuildRequires:        gst-plugins-base
 BuildRequires: gtest-devel
 # a few test cases uses python
 BuildRequires: python
+# for tensorflow-lite
+BuildRequires: tensorflow-lite-devel
 
 %if 0%{?testcoverage}
 BuildRequires: taos-ci-unittest-coverage-assessment