[Filter/TF] Skeleton for Tensorflow support
authorjijoong.moon <jijoong.moon@samsung.com>
Thu, 2 Aug 2018 02:54:48 +0000 (11:54 +0900)
committer함명주/동작제어Lab(SR)/Principal Engineer/삼성전자 <myungjoo.ham@samsung.com>
Fri, 17 Aug 2018 06:36:11 +0000 (15:36 +0900)
Add Skeleton code to support tensorflow model

**Self evaluation:**
1. Build test:  [X]Passed [ ]Failed [ ]Skipped
2. Run test:  [ ]Passed [ ]Failed [X]Skipped

Signed-off-by: jijoong.moon <jijoong.moon@samsung.com>
CMakeLists.txt
debian/control
gst/tensor_filter/CMakeLists.txt
gst/tensor_filter/tensor_filter.c
gst/tensor_filter/tensor_filter.h
gst/tensor_filter/tensor_filter_tensorflow.c [new file with mode: 0644]
gst/tensor_filter/tensor_filter_tensorflow_core.cc [new file with mode: 0644]
include/tensor_filter_tensorflow_core.h [new file with mode: 0644]

index fc08670..aa93ae3 100644 (file)
@@ -4,6 +4,7 @@ PROJECT(nnstreamer C CXX)
 
 OPTION(TIZEN "Enable Tizen build mode" OFF)
 OPTION(DISABLE_TENSORFLOW_LITE "Disable tensorflow-lite support" OFF)
+OPTION(DISABLE_TENSORFLOW "Disable tensorflow support" OFF)
 OPTION(INSTALL_EXAMPLE_APP "Install example applications" OFF)
 
 ADD_DEFINITIONS(-DVERSION="0.0.1")
@@ -41,6 +42,8 @@ IF(TIZEN OR GTEST_LIB)
        SET(gtestLink gtest pthread)
        SET(gtestSrc "")
        SET(gtestInc "")
+       SET(DISABLE_TENSORFLOW ON)
+       MESSAGE("DISABLE_TENSORFLOW : ${DISABLE_TENSORFLOW}")
 ELSE(TIZEN OR GTEST_LIB)
        IF (NOT EXISTS /usr/src/gtest/src/gtest-all.cc)
                MESSAGE(FATAL FATAL_ERROR "You need to install libgtest-dev or libgtest.so.")
index cb17257..5dacc4e 100644 (file)
@@ -6,7 +6,7 @@ Build-Depends: gcc, cmake, libgstreamer1.0-dev, libgstreamer-plugins-base1.0-dev
  libgtest-dev,
  debhelper (>=9),
  gstreamer1.0-tools, gstreamer1.0-plugins-base, gstreamer1.0-plugins-good,
- libpng-dev, tensorflow-lite-dev
+ libpng-dev, tensorflow-lite-dev, tensorflow-dev
 Standards-Version: 3.9.6
 Homepage: https://github.sec.samsung.net/STAR/nnstreamer
 
index a35fcb0..bc56830 100644 (file)
@@ -1,41 +1,66 @@
 # check whether TENSORFLOW_LITE is available.
 # DISABLE_TENSORFLOW_LITE is defined at /debian/rules according to the build environment
+
+set(FILTER_SOURCE tensor_filter.c tensor_filter_custom.c)
+set(FILTER_LIB tensor_filter)
+set(FILTER_TARGET tensor_filter)
+
+IF(NOT DISABLE_TENSORLOW_LITE)
+  list(APPEND FILTER_SOURCE tensor_filter_tensorflow_lite.c)
+  list(APPEND FILTER_LIB tensor_filter_tflitecore)
+  list(APPEND FILTER_TARGET tensor_filter_tflitecore)
+  PKG_CHECK_MODULES(TENSORFLOW_LITE REQUIRED tensorflow-lite)
+  LINK_DIRECTORIES(${TENSORFLOW_LITE_LIBRARY_DIRS})
+ENDIF(NOT DISABLE_TENSORLOW_LITE)
+
+IF(NOT DISABLE_TENSORFLOW)
+  list(APPEND FILTER_SOURCE tensor_filter_tensorflow.c)
+  list(APPEND FILTER_LIB tensor_filter_tfcore)
+  list(APPEND FILTER_TARGET tensor_filter_tfcore)
+  PKG_CHECK_MODULES(TENSORFLOW REQUIRED tensorflow)
+  LINK_DIRECTORIES(${TENSORFLOW_LIBRARY_DIRS})
+ENDIF(NOT DISABLE_TENSORFLOW)
+
+ADD_LIBRARY(tensor_filter SHARED
+  ${FILTER_SOURCE}
+  )
+TARGET_LINK_LIBRARIES(${FILTER_LIB} dl ${pkgs_LIBRARIES})
+TARGET_INCLUDE_DIRECTORIES(tensor_filter PUBLIC ${pkgs_INCLUDE_DIRS})
+TARGET_COMPILE_OPTIONS(tensor_filter PUBLIC ${pkgs_CFLAGS_OTHER})
+
 IF(DISABLE_TENSORFLOW_LITE) # NOT AVAILABE
-       ADD_DEFINITIONS(-DDISABLE_TENSORFLOW_LITE)
-       ADD_LIBRARY(tensor_filter SHARED
-               tensor_filter.c
-               tensor_filter_custom.c
-               )
-       TARGET_LINK_LIBRARIES(tensor_filter dl ${pkgs_LIBRARIES})
-       TARGET_INCLUDE_DIRECTORIES(tensor_filter PUBLIC ${pkgs_INCLUDE_DIRS})
-       TARGET_COMPILE_OPTIONS(tensor_filter PUBLIC ${pkgs_CFLAGS_OTHER})
-
-       INSTALL(TARGETS tensor_filter
-               RUNTIME DESTINATION ${EXEC_PREFIX}
-               LIBRARY DESTINATION ${GST_INSTALL_DIR}
-               ARCHIVE DESTINATION ${LIB_INSTALL_DIR}
-               )
-ELSE(DISABLE_TENSORFLOW_LITE) # AVAILABE
-       ADD_LIBRARY(tensor_filter SHARED
-               tensor_filter.c
-               tensor_filter_tensorflow_lite.c
-               tensor_filter_custom.c
-               )
-       TARGET_LINK_LIBRARIES(tensor_filter tensor_filter_tflitecore dl ${pkgs_LIBRARIES})
-       TARGET_INCLUDE_DIRECTORIES(tensor_filter PUBLIC ${pkgs_INCLUDE_DIRS})
-       TARGET_COMPILE_OPTIONS(tensor_filter PUBLIC ${pkgs_CFLAGS_OTHER})
-
-       ADD_LIBRARY(tensor_filter_tflitecore SHARED
-               tensor_filter_tensorflow_lite_core.cc
-               )
-
-       TARGET_LINK_LIBRARIES(tensor_filter_tflitecore ${pkgs_LIBRARIES} tensorflow-lite)
-       TARGET_INCLUDE_DIRECTORIES(tensor_filter_tflitecore PUBLIC ${pkgs_INCLUDE_DIRS})
-       TARGET_COMPILE_OPTIONS(tensor_filter_tflitecore PUBLIC ${pkgs_CFLAGS_OTHER})
-
-       INSTALL(TARGETS tensor_filter tensor_filter_tflitecore
-               RUNTIME DESTINATION ${EXEC_PREFIX}
-               LIBRARY DESTINATION ${GST_INSTALL_DIR}
-               ARCHIVE DESTINATION ${LIB_INSTALL_DIR}
-               )
+  ADD_DEFINITIONS(-DDISABLE_TENSORFLOW_LITE)
 ENDIF(DISABLE_TENSORFLOW_LITE)
+
+IF(DISABLE_TENSORFLOW) # NOT AVAILABE
+  ADD_DEFINITIONS(-DDISABLE_TENSORFLOW)
+ENDIF(DISABLE_TENSORFLOW)
+
+IF(NOT DISABLE_TENSORFLOW_LITE) # AVAILABE
+  PKG_CHECK_MODULES(TENSORFLOW_LITE REQUIRED tensorflow-lite)
+  ADD_LIBRARY(tensor_filter_tflitecore SHARED
+    tensor_filter_tensorflow_lite_core.cc
+    )
+
+  TARGET_LINK_LIBRARIES(tensor_filter_tflitecore ${pkgs_LIBRARIES} tensorflow-lite)
+  TARGET_INCLUDE_DIRECTORIES(tensor_filter_tflitecore PUBLIC ${pkgs_INCLUDE_DIRS} ${TENSORFLOW_LITE_INCLUDE_DIRS})
+  TARGET_COMPILE_OPTIONS(tensor_filter_tflitecore PUBLIC ${pkgs_CFLAGS_OTHER})
+  SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${EXTRA_CFLAGS} -Wno-sign-compare")
+  SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CXXFLAGS} -Wno-sign-compare")
+ENDIF(NOT DISABLE_TENSORFLOW_LITE) # AVAILABE
+
+IF(NOT DISABLE_TENSORFLOW) # AVAILABE
+  ADD_LIBRARY(tensor_filter_tfcore SHARED
+    tensor_filter_tensorflow_core.cc
+    )
+
+  TARGET_LINK_LIBRARIES(tensor_filter_tfcore ${pkgs_LIBRARIES} ${TENSORFLOW_LIBRARIES})
+  TARGET_INCLUDE_DIRECTORIES(tensor_filter_tfcore PUBLIC ${pkgs_INCLUDE_DIRS} ${TENSORFLOW_INCLUDE_DIRS})
+  TARGET_COMPILE_OPTIONS(tensor_filter_tfcore PUBLIC ${pkgs_CFLAGS_OTHER})
+ENDIF(NOT DISABLE_TENSORFLOW) # AVAILABE
+
+INSTALL(TARGETS ${FILTER_TARGET}
+  RUNTIME DESTINATION ${EXEC_PREFIX}
+  LIBRARY DESTINATION ${GST_INSTALL_DIR}
+  ARCHIVE DESTINATION ${LIB_INSTALL_DIR}
+  )
index fa2fcc2..ab00b14 100644 (file)
@@ -67,7 +67,11 @@ GstTensor_Filter_Framework *tensor_filter_supported[] = {
 #else
   [_T_F_TENSORFLOW_LITE] = &NNS_support_tensorflow_lite,
 #endif
+#ifdef DISABLE_TENSORFLOW
   [_T_F_TENSORFLOW] = NULL,
+#else
+  [_T_F_TENSORFLOW] = &NNS_support_tensorflow,
+#endif
   [_T_F_CAFFE2] = NULL,
 
   0,
index 29fbd46..4f57432 100644 (file)
@@ -176,6 +176,7 @@ struct _GstTensor_Filter_Framework
 };
 
 extern GstTensor_Filter_Framework NNS_support_tensorflow_lite;
+extern GstTensor_Filter_Framework NNS_support_tensorflow;
 extern GstTensor_Filter_Framework NNS_support_custom;
 
 extern GstTensor_Filter_Framework *tensor_filter_supported[];
diff --git a/gst/tensor_filter/tensor_filter_tensorflow.c b/gst/tensor_filter/tensor_filter_tensorflow.c
new file mode 100644 (file)
index 0000000..5ff9caa
--- /dev/null
@@ -0,0 +1,176 @@
+/**
+ * GStreamer Tensor_Filter, Tensorflow Module
+ * Copyright (C) 2018 Jijoong Moon <jjioong.moon@samsung.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ */
+/**
+ * @file       tensor_filter_tensorflow.c
+ * @date       02 Aug 2018
+ * @brief      Tensorflow module for tensor_filter gstreamer plugin
+ * @see                http://github.com/TO-BE-DETERMINED-SOON
+ * @see                https://github.sec.samsung.net/STAR/nnstreamer
+ * @author     Jijoong Moon <jijoong.moon@samsung.com>
+ * @bug                No known bugs except for NYI items
+ *
+ * This is the per-NN-framework plugin (tensorflow) for tensor_filter.
+ * Fill in "GstTensor_Filter_Framework" for tensor_filter.h/c
+ *
+ */
+
+#include "tensor_filter.h"
+#include "tensor_filter_tensorflow_core.h"
+#include <glib.h>
+
+/**
+ * @brief internal data of tensorflow lite
+ */
+struct _Tf_data
+{
+  void *tf_private_data;
+};
+typedef struct _Tf_data tf_data;
+
+/**
+ * @brief Load tensorflow lite modelfile
+ * @param filter : tensor_filter instance
+ * @param private_data : tensorflow lite plugin's private data
+ * @return 0 if successfully loaded. 1 if skipped (already loaded). -1 if error
+ */
+static int
+tf_loadModelFile (const GstTensor_Filter * filter, void **private_data)
+{
+  tf_data *tf;
+  if (filter->privateData != NULL) {
+    /** @todo : Check the integrity of filter->data and filter->modelFilename, nnfw */
+    return 1;
+  }
+  tf = g_new0 (tf_data, 1); /** initialize tf Fill Zero! */
+  *private_data = tf;
+  tf->tf_private_data = tf_core_new (filter->prop.modelFilename);
+  if (tf->tf_private_data) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+/**
+ * @brief The open callback for GstTensor_Filter_Framework. Called before anything else
+ * @param filter : tensor_filter instance
+ * @param private_data : tensorflow lite plugin's private data
+ */
+static void
+tf_open (const GstTensor_Filter * filter, void **private_data)
+{
+  int retval = tf_loadModelFile (filter, private_data);
+  g_assert (retval == 0);       /** This must be called only once */
+}
+
+/**
+ * @brief The mandatory callback for GstTensor_Filter_Framework
+ * @param[in] inptr The input tensor
+ * @param[out] outptr The output tensor
+ */
+static uint8_t *
+tf_invoke (const GstTensor_Filter * filter, void **private_data,
+    const uint8_t * inptr, uint8_t * outptr)
+{
+  int retval;
+  uint8_t *allocated_outptr;
+  tf_data *tf;
+  tf = *private_data;
+  g_assert (filter->privateData && *private_data == filter->privateData);
+  retval =
+      tf_core_invoke (tf->tf_private_data, (uint8_t *) inptr,
+      &allocated_outptr);
+  g_assert (retval == 0);
+  return allocated_outptr;
+}
+
+/**
+ * @brief The optional callback for GstTensor_Filter_Framework
+ */
+static int
+tf_getInputDim (const GstTensor_Filter * filter, void **private_data,
+    tensor_dim inputDimension, tensor_type * type)
+{
+  int temp_idx = 0;
+  tf_data *tf;
+  tf = *private_data;
+  temp_idx = tf_core_getInputSize (tf->tf_private_data);
+  if (temp_idx > 0)
+    temp_idx--;
+  else
+    temp_idx = 0;
+  g_assert (filter->privateData && *private_data == filter->privateData);
+  return tf_core_getInputDim (tf->tf_private_data, temp_idx,
+      inputDimension, type);
+}
+
+/**
+ * @brief The optional callback for GstTensor_Filter_Framework
+ */
+static int
+tf_getOutputDim (const GstTensor_Filter * filter, void **private_data,
+    tensor_dim outputDimension, tensor_type * type)
+{
+  int temp_idx = 0;
+  tf_data *tf;
+  tf = *private_data;
+  temp_idx = tf_core_getOutputSize (tf->tf_private_data);
+  if (temp_idx > 0)
+    temp_idx--;
+  else
+    temp_idx = 0;
+  g_assert (filter->privateData && *private_data == filter->privateData);
+  return tf_core_getOutputDim (tf->tf_private_data, temp_idx,
+      outputDimension, type);
+}
+
+/**
+ * @brief The set-input-dim callback for GstTensor_Filter_Framework
+ */
+static int
+tf_setInputDim (const GstTensor_Filter * filter, void **private_data,
+    const tensor_dim iDimension, const tensor_type iType,
+    tensor_dim oDimension, tensor_type * oType)
+{
+  /** @todo call tflite core apis */
+  return 0;                     /** NYI */
+}
+
+/**
+ * @brief Free privateData and move on.
+ */
+static void
+tf_close (const GstTensor_Filter * filter, void **private_data)
+{
+  tf_data *tf;
+  tf = *private_data;
+  tf_core_delete (tf->tf_private_data);
+  g_free (tf);
+  *private_data = NULL;
+  g_assert (filter->privateData == NULL);
+}
+
+GstTensor_Filter_Framework NNS_support_tensorflow = {
+  .name = "tensorflow",
+  .allow_in_place = FALSE,      /** @todo: support this to optimize performance later. */
+  .allocate_in_invoke = TRUE,
+  .invoke_NN = tf_invoke,
+  .getInputDimension = tf_getInputDim,
+  .getOutputDimension = tf_getOutputDim,
+  .setInputDimension = tf_setInputDim,
+  .open = tf_open,
+  .close = tf_close,
+};
diff --git a/gst/tensor_filter/tensor_filter_tensorflow_core.cc b/gst/tensor_filter/tensor_filter_tensorflow_core.cc
new file mode 100644 (file)
index 0000000..6ca8d8b
--- /dev/null
@@ -0,0 +1,283 @@
+/**
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ * Copyright (C) 2018 HyoungJoo Ahn <hello.ahn@samsung.com>
+ * Copyright (C) 2018 Jijoong Moon <jjioong.moon@samsung.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ */
+/**
+ * @file   tensor_filter_tensorflow_core.cc
+ * @author HyoungJoo Ahn <hello.ahn@samsung.com>
+ * @author Jijoong Moon <jijoong.moon@samsung.com>
+ * @date   08/02/2018
+ * @brief  connection with tensorflow libraries.
+ *
+ * @bug     No known bugs.
+ */
+
+#include <sys/time.h>
+#include <unistd.h>
+#include <algorithm>
+
+#include "tensor_filter_tensorflow_core.h"
+
+/**
+ * @brief Macro for debug mode.
+ */
+#ifndef DBG
+#define DBG FALSE
+#endif
+
+/**
+ * @brief Macro for debug message.
+ */
+#define _print_log(...) if (DBG) g_message (__VA_ARGS__)
+
+/**
+ * @brief      TFCore creator
+ * @param      _model_path     : the logical path to '{model_name}.tffile' file
+ * @note       the model of _model_path will be loaded simultaneously
+ * @return     Nothing
+ */
+TFCore::TFCore (const char *_model_path)
+{
+  model_path = _model_path;
+  input_idx_list_len = 0;
+  output_idx_list_len = 0;
+
+  loadModel ();
+}
+
+/**
+ * @brief      TFCore Destructor
+ * @return     Nothing
+ */
+TFCore::~TFCore ()
+{
+  delete[]input_idx_list;
+  delete[]output_idx_list;
+}
+
+/**
+ * @brief      get millisecond for time profiling.
+ * @note       it returns the millisecond.
+ * @param t    : the time struct.
+ * @return the millisecond of t.
+ */
+double
+TFCore::get_ms (struct timeval t)
+{
+  return (t.tv_sec * 1000000 + t.tv_usec);
+}
+
+/**
+ * @brief      load the tf model
+ * @note       the model will be loaded
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFCore::loadModel ()
+{
+
+  return 0;
+}
+
+/**
+ * @brief      return the data type of the tensor
+ * @param tensor_idx   : the index of the tensor
+ * @param[out] type    : the data type of the input tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFCore::getTensorType (int tensor_idx, tensor_type * type)
+{
+
+  return 0;
+}
+
+/**
+ * @brief      return the Dimension of Input Tensor.
+ * @param idx  : the index of the input tensor
+ * @param[out] dim     : the array of the input tensor
+ * @param[out] type    : the data type of the input tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFCore::getInputTensorDim (int idx, tensor_dim dim, tensor_type * type)
+{
+  if (idx >= input_size) {
+    return -1;
+  }
+  int ret = getTensorDim (input_idx_list[idx], dim, type);
+  return ret;
+}
+
+/**
+ * @brief      return the Dimension of Output Tensor.
+ * @param idx  : the index of the output tensor
+ * @param[out] dim     : the array of the output tensor
+ * @param[out] type    : the data type of the output tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFCore::getOutputTensorDim (int idx, tensor_dim dim, tensor_type * type)
+{
+  if (idx >= output_size) {
+    return -1;
+  }
+  int ret = getTensorDim (output_idx_list[idx], dim, type);
+  return ret;
+}
+
+/**
+ * @brief      return the Dimension of Tensor.
+ * @param tensor_idx   : the real index of model of the tensor
+ * @param[out] dim     : the array of the tensor
+ * @param[out] type    : the data type of the tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFCore::getTensorDim (int tensor_idx, tensor_dim dim, tensor_type * type)
+{
+
+  return 0;
+}
+
+/**
+ * @brief      return the number of Input Tensors.
+ * @return     the number of Input Tensors.
+ */
+int
+TFCore::getInputTensorSize ()
+{
+  return input_size;
+}
+
+/**
+ * @brief      return the number of Output Tensors.
+ * @return     the number of Output Tensors
+ */
+int
+TFCore::getOutputTensorSize ()
+{
+  return output_size;
+}
+
+/**
+ * @brief      run the model with the input.
+ * @param[in] inptr : The input tensor
+ * @param[out]  outptr : The output tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+TFCore::invoke (uint8_t * inptr, uint8_t ** outptr)
+{
+  return 0;
+}
+
+extern void *
+tf_core_new (const char *_model_path)
+{
+  return new TFCore (_model_path);
+}
+
+/**
+ * @brief      delete the TFCore class.
+ * @param      tf      : the class object
+ * @return     Nothing
+ */
+extern void
+tf_core_delete (void *tf)
+{
+  TFCore *c = (TFCore *) tf;
+  delete c;
+}
+
+/**
+ * @brief      get model path
+ * @param      tf      : the class object
+ * @return     model path
+ */
+extern const char *
+tf_core_getModelPath (void *tf)
+{
+  TFCore *c = (TFCore *) tf;
+  return c->getModelPath ();
+}
+
+/**
+ * @brief      get the Dimension of Input Tensor of model
+ * @param      tf      : the class object
+ * @param idx  : the index of the input tensor
+ * @param[out] dim     : the array of the input tensor
+ * @param[out] type    : the data type of the input tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+tf_core_getInputDim (void *tf, int idx, tensor_dim dim, tensor_type * type)
+{
+  TFCore *c = (TFCore *) tf;
+  return c->getInputTensorDim (idx, dim, type);
+}
+
+/**
+ * @brief      get the Dimension of Output Tensor of model
+ * @param      tf      : the class object
+ * @param idx  : the index of the output tensor
+ * @param[out] dim     : the array of the output tensor
+ * @param[out] type    : the data type of the output tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+tf_core_getOutputDim (void *tf, int idx, tensor_dim dim, tensor_type * type)
+{
+  TFCore *c = (TFCore *) tf;
+  return c->getOutputTensorDim (idx, dim, type);
+}
+
+/**
+ * @brief      get the size of Input Tensor of model
+ * @param      tf      : the class object
+ * @return     the number of Input Tensors.
+ */
+int
+tf_core_getInputSize (void *tf)
+{
+  TFCore *c = (TFCore *) tf;
+  return c->getInputTensorSize ();
+}
+
+/**
+ * @brief      get the size of Output Tensor of model
+ * @param      tf      : the class object
+ * @return     the number of Output Tensors.
+ */
+int
+tf_core_getOutputSize (void *tf)
+{
+  TFCore *c = (TFCore *) tf;
+  return c->getOutputTensorSize ();
+}
+
+/**
+ * @brief      invoke the model
+ * @param      tf      : the class object
+ * @param[in] inptr : The input tensor
+ * @param[out]  outptr : The output tensor
+ * @return 0 if OK. non-zero if error.
+ */
+int
+tf_core_invoke (void *tf, uint8_t * inptr, uint8_t ** outptr)
+{
+  TFCore *c = (TFCore *) tf;
+  return c->invoke (inptr, outptr);
+}
diff --git a/include/tensor_filter_tensorflow_core.h b/include/tensor_filter_tensorflow_core.h
new file mode 100644 (file)
index 0000000..3cc1332
--- /dev/null
@@ -0,0 +1,112 @@
+/**
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd. All rights reserved.
+ * Copyright (C) 2018 HyoungJoo Ahn <hello.ahn@samsung.com>
+ * Copyright (C) 2018 Jijoong Moon <jjioong.moon@samsung.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Library General Public License for more details.
+ *
+ */
+/**
+ * @file   tensor_filter_tensorflow_lite_core.h
+ * @author HyoungJoo Ahn <hello.ahn@samsung.com>
+ * @author Jijoong Moon <jijoong.moon@samsung.com>
+ * @date   08/02/2018
+ * @brief  connection with tensorflow libraries.
+ *
+ * @bug     No known bugs.
+ */
+#ifndef TENSOR_FILTER_TENSORFLOW_H
+#define TENSOR_FILTER_TENSORFLOW_H
+
+#ifdef __cplusplus
+#include <iostream>
+#include <stdint.h>
+#include <glib.h>
+
+#include <tensorflow/core/platform/init_main.h>
+#include <tensorflow/core/public/session.h>
+#include <tensorflow/core/framework/tensor_shape.h>
+
+#include "tensor_typedef.h"
+
+/**
+ * @brief      ring cache structure
+ */
+class TFCore
+{
+public:
+  /**
+   * member functions.
+   */
+  TFCore (const char *_model_path);
+   ~TFCore ();
+
+  /**
+   * @brief    get the model path.
+   * @return   saved model path.
+   */
+  const char *getModelPath ()
+  {
+    return model_path;
+  }
+  int loadModel ();
+  const char *getInputTensorName ();
+  const char *getOutputTensorName ();
+
+  double get_ms (struct timeval t);
+  int getInputTensorSize ();
+  int getOutputTensorSize ();
+  int getInputTensorDim (int idx, tensor_dim dim, tensor_type * type);
+  int getOutputTensorDim (int idx, tensor_dim dim, tensor_type * type);
+  int getInputTensorDimSize ();
+  int getOutputTensorDimSize ();
+  int invoke (uint8_t * inptr, uint8_t ** outptr);
+
+private:
+  /**
+   * member variables.
+   */
+  const char *model_path;
+  int tensor_size;
+  int node_size;
+  int input_size;
+  int output_size;
+  int *input_idx_list;
+  int *output_idx_list;
+  int input_idx_list_len;
+  int output_idx_list_len;
+  int getTensorType (int tensor_idx, tensor_type * type);
+  int getTensorDim (int tensor_idx, tensor_dim dim, tensor_type * type);
+};
+
+/**
+ * @brief      the definition of functions to be used at C files.
+ */
+extern "C"
+{
+#endif
+
+  extern void *tf_core_new (const char *_model_path);
+  extern void tf_core_delete (void *tf);
+  extern const char *tf_core_getModelPath (void *tf);
+  extern int tf_core_getInputDim (void *tf, int idx, tensor_dim dim,
+      tensor_type * type);
+  extern int tf_core_getOutputDim (void *tf, int idx, tensor_dim dim,
+      tensor_type * type);
+  extern int tf_core_getInputSize (void *tf);
+  extern int tf_core_getOutputSize (void *tf);
+  extern int tf_core_invoke (void *tf, uint8_t * inptr, uint8_t ** outptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif