[Example] remove unnecessary examples
authorJaeyun <jy1210.jung@samsung.com>
Wed, 16 Jan 2019 09:25:20 +0000 (18:25 +0900)
committerMyungJoo Ham <myungjoo.ham@gmail.com>
Thu, 17 Jan 2019 12:11:41 +0000 (21:11 +0900)
Remove unnecessary examples and dependency.

All the exec-examples are moved to nnstreamer-example repository.
Remained examples are needed for unit-test and app-test.

Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
20 files changed:
debian/control
meson.build
nnstreamer_example/CMakeLists.txt
nnstreamer_example/example_decoder_image_labelling/CMakeLists.txt [deleted file]
nnstreamer_example/example_decoder_image_labelling/meson.build [deleted file]
nnstreamer_example/example_decoder_image_labelling/nnstreamer_example_decoder_image_labelling.c [deleted file]
nnstreamer_example/example_filter_performance_profile/CMakeLists.txt [deleted file]
nnstreamer_example/example_filter_performance_profile/meson.build [deleted file]
nnstreamer_example/example_filter_performance_profile/nnstreamer_example_filter_performance_profile.c [deleted file]
nnstreamer_example/example_object_detection/CMakeLists.txt [deleted file]
nnstreamer_example/example_object_detection/get_model.sh [deleted file]
nnstreamer_example/example_object_detection/gst-launch-object-detection.sh [deleted file]
nnstreamer_example/example_object_detection/meson.build [deleted file]
nnstreamer_example/example_object_detection/nnstreamer_example_object_detection.cc [deleted file]
nnstreamer_example/example_object_detection_tensorflow/CMakeLists.txt [deleted file]
nnstreamer_example/example_object_detection_tensorflow/get_model.sh [deleted file]
nnstreamer_example/example_object_detection_tensorflow/meson.build [deleted file]
nnstreamer_example/example_object_detection_tensorflow/nnstreamer_example_object_detection_tensorflow.cc [deleted file]
nnstreamer_example/meson.build
packaging/nnstreamer.spec

index b44eec9..01b8773 100644 (file)
@@ -6,7 +6,7 @@ Build-Depends: gcc, cmake, libgstreamer1.0-dev, libgstreamer-plugins-base1.0-dev
  libgtest-dev,
  debhelper (>=9),
  gstreamer1.0-tools, gstreamer1.0-plugins-base, gstreamer1.0-plugins-good,
- libpng-dev, tensorflow-lite-dev, tensorflow-dev [amd64 arm64], libcairo2-dev, libopencv-dev,
+ libpng-dev, tensorflow-lite-dev, tensorflow-dev [amd64 arm64], libopencv-dev,
  liborc-0.4-dev, ssat, python, python-numpy, libprotobuf-dev [amd64 arm64]
 Standards-Version: 3.9.6
 Homepage: https://github.com/nnsuite/nnstreamer
index fae2ea6..ed5a002 100644 (file)
@@ -101,7 +101,9 @@ endif
 subdir('gst')
 
 # Build nnstreamer examples
-subdir('nnstreamer_example')
+if get_option('enable-test') or get_option('install-example')
+  subdir('nnstreamer_example')
+endif
 
 # Build unittests
 if get_option('enable-test')
index 3327e7f..d681c02 100644 (file)
@@ -7,7 +7,3 @@ ADD_SUBDIRECTORY(custom_example_LSTM)
 ADD_SUBDIRECTORY(example_cam)
 ADD_SUBDIRECTORY(example_sink)
 ADD_SUBDIRECTORY(example_filter)
-ADD_SUBDIRECTORY(example_object_detection)
-ADD_SUBDIRECTORY(example_object_detection_tensorflow)
-ADD_SUBDIRECTORY(example_decoder_image_labelling)
-ADD_SUBDIRECTORY(example_filter_performance_profile)
diff --git a/nnstreamer_example/example_decoder_image_labelling/CMakeLists.txt b/nnstreamer_example/example_decoder_image_labelling/CMakeLists.txt
deleted file mode 100644 (file)
index 41ba40a..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-pkg_check_modules(app_ex_imglabel_pkgs gstreamer-1.0 glib-2.0 REQUIRED)
-
-ADD_EXECUTABLE(nnstreamer_example_decoder_image_labelling nnstreamer_example_decoder_image_labelling.c)
-
-TARGET_LINK_LIBRARIES(nnstreamer_example_decoder_image_labelling ${app_ex_imglabel_pkgs_LIBRARIES})
-TARGET_INCLUDE_DIRECTORIES(nnstreamer_example_decoder_image_labelling PUBLIC ${app_ex_imglabel_pkgs_INCLUDE_DIRS})
-TARGET_COMPILE_OPTIONS(nnstreamer_example_decoder_image_labelling PUBLIC ${app_ex_imglabel_pkgs_CFLAGS_OTHER})
-
-IF (INSTALL_EXAMPLE_APP)
-       INSTALL(TARGETS nnstreamer_example_decoder_image_labelling RUNTIME DESTINATION ${EXAMPLE_EXEC_PREFIX})
-ENDIF (INSTALL_EXAMPLE_APP)
diff --git a/nnstreamer_example/example_decoder_image_labelling/meson.build b/nnstreamer_example/example_decoder_image_labelling/meson.build
deleted file mode 100644 (file)
index b93deb5..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-nnstreamer_example_decoder_image_labelling = executable('nnstreamer_example_decoder_image_labelling',
-  'nnstreamer_example_decoder_image_labelling.c',
-  dependencies: [glib_dep, gst_dep],
-  install: get_option('install-example'),
-  install_dir: examples_install_dir
-)
diff --git a/nnstreamer_example/example_decoder_image_labelling/nnstreamer_example_decoder_image_labelling.c b/nnstreamer_example/example_decoder_image_labelling/nnstreamer_example_decoder_image_labelling.c
deleted file mode 100644 (file)
index 0afc191..0000000
+++ /dev/null
@@ -1,347 +0,0 @@
-/**
- * @file       nnstreamer_example_decoder.c
- * @date       4 Oct 2018
- * @brief      Tensor stream example with tensor decoder
- * @see        https://github.com/nnsuite/nnstreamer
- * @author     Jinhyuck Park <jinhyuck83.park@samsung.com>
- * @bug                No known bugs.
- *
- * NNStreamer example for image recognition with only gstreamer plug-in's include decoder.
- *
- * Pipeline :
- * v4l2src -- tee --  -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- textoverlay -- videoconvert -- ximagesink
- *                  |                                                                    |
- *                  --- videoscale -- tensor_converter -- tensor_filter -- tensor_decoder
- *
- *
- * 'tensor_filter' for image recognition.
- * Download tflite moel 'Mobilenet_1.0_224_quant' from below link,
- * https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/g3doc/models.md#image-classification-quantized-models
- *
- * 'tensor decoder' updates recognition result links to text_sink of textoverlay.
- *
- * Run example :
- * Before running this example, GST_PLUGIN_PATH should be updated for nnstreamer plug-in.
- * $ export GST_PLUGIN_PATH=$GST_PLUGIN_PATH:<nnstreamer plugin path>
- * $ ./nnstreamer_example_decoder
- */
-
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <glib.h>
-#include <gst/gst.h>
-
-/**
- * @brief Macro for debug mode.
- */
-#ifndef DBG
-#define DBG FALSE
-#endif
-
-/**
- * @brief Macro for debug message.
- */
-#define _print_log(...) if (DBG) g_message (__VA_ARGS__)
-
-/**
- * @brief Macro to check error case.
- */
-#define _check_cond_err(cond) \
-  if (!(cond)) { \
-    _print_log ("app failed! [line : %d]", __LINE__); \
-    goto error; \
-  }
-
-/**
- * @brief Data structure for tflite model info.
- */
-typedef struct
-{
-  gchar *model_path; /**< tflite model file path */
-} tflite_info_s;
-
-/**
- * @brief Data structure for app.
- */
-typedef struct
-{
-  GMainLoop *loop; /**< main event loop */
-  GstElement *pipeline; /**< gst pipeline for data stream */
-  GstBus *bus; /**< gst bus for data pipeline */
-
-  gboolean running; /**< true when app is running */
-  guint received; /**< received buffer count */
-  tflite_info_s tflite_info; /**< tflite model info */
-} AppData;
-
-/**
- * @brief Data for pipeline and result.
- */
-static AppData g_app;
-
-/**
- * @brief Free data in tflite info structure.
- */
-static void
-_tflite_free_info (tflite_info_s * tflite_info)
-{
-  g_return_if_fail (tflite_info != NULL);
-
-  if (tflite_info->model_path) {
-    g_free (tflite_info->model_path);
-    tflite_info->model_path = NULL;
-  }
-
-}
-
-/**
- * @brief Check tflite model and load labels.
- *
- * This example uses 'Mobilenet_1.0_224_quant' for image classification.
- */
-static gboolean
-_tflite_init_info (tflite_info_s * tflite_info, const gchar * path)
-{
-  const gchar tflite_model[] = "mobilenet_v1_1.0_224_quant.tflite";
-
-  g_return_val_if_fail (tflite_info != NULL, FALSE);
-
-  tflite_info->model_path = NULL;
-
-  /** check model file exists */
-  tflite_info->model_path = g_strdup_printf ("%s/%s", path, tflite_model);
-
-  if (access (tflite_info->model_path, F_OK) != 0) {
-    _print_log ("cannot find tflite model [%s]", tflite_info->model_path);
-    return FALSE;
-  }
-
-  return TRUE;
-}
-
-
-/**
- * @brief Free resources in app data.
- */
-static void
-_free_app_data (void)
-{
-  if (g_app.loop) {
-    g_main_loop_unref (g_app.loop);
-    g_app.loop = NULL;
-  }
-
-  if (g_app.bus) {
-    gst_bus_remove_signal_watch (g_app.bus);
-    gst_object_unref (g_app.bus);
-    g_app.bus = NULL;
-  }
-
-  if (g_app.pipeline) {
-    gst_object_unref (g_app.pipeline);
-    g_app.pipeline = NULL;
-  }
-
-  _tflite_free_info (&g_app.tflite_info);
-}
-
-/**
- * @brief Function to print error message.
- */
-static void
-_parse_err_message (GstMessage * message)
-{
-  gchar *debug;
-  GError *error;
-
-  g_return_if_fail (message != NULL);
-
-  switch (GST_MESSAGE_TYPE (message)) {
-    case GST_MESSAGE_ERROR:
-      gst_message_parse_error (message, &error, &debug);
-      break;
-
-    case GST_MESSAGE_WARNING:
-      gst_message_parse_warning (message, &error, &debug);
-      break;
-
-    default:
-      return;
-  }
-
-  gst_object_default_error (GST_MESSAGE_SRC (message), error, debug);
-  g_error_free (error);
-  g_free (debug);
-}
-
-/**
- * @brief Function to print qos message.
- */
-static void
-_parse_qos_message (GstMessage * message)
-{
-  GstFormat format;
-  guint64 processed;
-  guint64 dropped;
-
-  gst_message_parse_qos_stats (message, &format, &processed, &dropped);
-  _print_log ("format[%d] processed[%" G_GUINT64_FORMAT "] dropped[%"
-      G_GUINT64_FORMAT "]", format, processed, dropped);
-}
-
-/**
- * @brief Callback for message.
- */
-static void
-_message_cb (GstBus * bus, GstMessage * message, gpointer user_data)
-{
-  switch (GST_MESSAGE_TYPE (message)) {
-    case GST_MESSAGE_EOS:
-      _print_log ("received eos message");
-      g_main_loop_quit (g_app.loop);
-      break;
-
-    case GST_MESSAGE_ERROR:
-      _print_log ("received error message");
-      _parse_err_message (message);
-      g_main_loop_quit (g_app.loop);
-      break;
-
-    case GST_MESSAGE_WARNING:
-      _print_log ("received warning message");
-      _parse_err_message (message);
-      break;
-
-    case GST_MESSAGE_STREAM_START:
-      _print_log ("received start message");
-      break;
-
-    case GST_MESSAGE_QOS:
-      _parse_qos_message (message);
-      break;
-
-    default:
-      break;
-  }
-}
-
-
-/**
- * @brief Set window title.
- * @param name GstXImageSink element name
- * @param title window title
- */
-static void
-_set_window_title (const gchar * name, const gchar * title)
-{
-  GstTagList *tags;
-  GstPad *sink_pad;
-  GstElement *element;
-
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), name);
-
-  g_return_if_fail (element != NULL);
-
-  sink_pad = gst_element_get_static_pad (element, "sink");
-
-  if (sink_pad) {
-    tags = gst_tag_list_new (GST_TAG_TITLE, title, NULL);
-    gst_pad_send_event (sink_pad, gst_event_new_tag (tags));
-    gst_object_unref (sink_pad);
-  }
-
-  gst_object_unref (element);
-}
-
-
-/**
- * @brief Main function.
- */
-int
-main (int argc, char **argv)
-{
-  const gchar tflite_model_path[] = "./tflite_model";
-  const gchar tflite_label[] = "./tflite_model/labels.txt";
-  /** 224x224 for tflite model */
-  const guint width = 224;
-  const guint height = 224;
-
-  gchar *str_pipeline;
-  gulong handle_id;
-  GstElement *element;
-
-  _print_log ("start app..");
-
-  /** init app variable */
-  g_app.running = FALSE;
-  g_app.received = 0;
-
-  _check_cond_err (_tflite_init_info (&g_app.tflite_info, tflite_model_path));
-
-  /** init gstreamer */
-  gst_init (&argc, &argv);
-
-  /** main loop */
-  g_app.loop = g_main_loop_new (NULL, FALSE);
-  _check_cond_err (g_app.loop != NULL);
-
-  /** init pipeline */
-  str_pipeline =
-      g_strdup_printf
-      ("textoverlay name=overlay font-desc=Sans,24 ! videoconvert ! ximagesink name=img_test "
-      "v4l2src name=cam_src ! videoscale ! video/x-raw,width=640,height=480,format=RGB ! tee name=t_raw "
-      "t_raw. ! queue ! overlay.video_sink "
-      "t_raw. ! queue ! videoscale ! video/x-raw,width=%d,height=%d ! tensor_converter !"
-      "tensor_filter framework=tensorflow-lite model=%s ! "
-      "tensor_decoder mode=image_labeling option1=%s ! overlay.text_sink",
-      width, height, g_app.tflite_info.model_path, tflite_label);
-  g_app.pipeline = gst_parse_launch (str_pipeline, NULL);
-  g_free (str_pipeline);
-  _check_cond_err (g_app.pipeline != NULL);
-
-  /** bus and message callback */
-  g_app.bus = gst_element_get_bus (g_app.pipeline);
-  _check_cond_err (g_app.bus != NULL);
-
-  gst_bus_add_signal_watch (g_app.bus);
-  handle_id = g_signal_connect (g_app.bus, "message",
-      (GCallback) _message_cb, NULL);
-  _check_cond_err (handle_id > 0);
-
-  /** start pipeline */
-  gst_element_set_state (g_app.pipeline, GST_STATE_PLAYING);
-
-  g_app.running = TRUE;
-
-  /** set window title */
-  _set_window_title ("img_test", "NNStreamer Example");
-
-  /** run main loop */
-  g_main_loop_run (g_app.loop);
-  /** quit when received eos or error message */
-  g_app.running = FALSE;
-  /** cam source element */
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), "cam_src");
-
-  gst_element_set_state (element, GST_STATE_READY);
-  gst_element_set_state (g_app.pipeline, GST_STATE_READY);
-
-  g_usleep (200 * 1000);
-
-  gst_element_set_state (element, GST_STATE_NULL);
-  gst_element_set_state (g_app.pipeline, GST_STATE_NULL);
-
-  g_usleep (200 * 1000);
-  gst_object_unref (element);
-
-error:
-  _print_log ("close app..");
-
-  _free_app_data ();
-  return 0;
-}
diff --git a/nnstreamer_example/example_filter_performance_profile/CMakeLists.txt b/nnstreamer_example/example_filter_performance_profile/CMakeLists.txt
deleted file mode 100644 (file)
index 4392648..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-pkg_check_modules(app_ex_performance_pkgs gstreamer-1.0 glib-2.0 REQUIRED)
-
-SET(EXAMPLE_APP_NAME "nnstreamer_example_filter_performance_profile")
-ADD_EXECUTABLE(${EXAMPLE_APP_NAME} ${EXAMPLE_APP_NAME}.c)
-
-TARGET_LINK_LIBRARIES(${EXAMPLE_APP_NAME} ${app_ex_performance_pkgs_LIBRARIES})
-TARGET_INCLUDE_DIRECTORIES(${EXAMPLE_APP_NAME} PUBLIC ${app_ex_performance_pkgs_INCLUDE_DIRS})
-TARGET_COMPILE_OPTIONS(${EXAMPLE_APP_NAME} PUBLIC ${app_ex_performance_pkgs_CFLAGS_OTHER})
-
-IF (INSTALL_EXAMPLE_APP)
-       INSTALL(TARGETS ${EXAMPLE_APP_NAME} RUNTIME DESTINATION ${EXAMPLE_EXEC_PREFIX})
-ENDIF (INSTALL_EXAMPLE_APP)
diff --git a/nnstreamer_example/example_filter_performance_profile/meson.build b/nnstreamer_example/example_filter_performance_profile/meson.build
deleted file mode 100644 (file)
index 95173ff..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-nnstreamer_example_filter_performance_profile = executable('nnstreamer_example_filter_performance_profile',
-  'nnstreamer_example_filter_performance_profile.c',
-  dependencies: [glib_dep, gst_dep],
-  install: get_option('install-example'),
-  install_dir: examples_install_dir
-)
diff --git a/nnstreamer_example/example_filter_performance_profile/nnstreamer_example_filter_performance_profile.c b/nnstreamer_example/example_filter_performance_profile/nnstreamer_example_filter_performance_profile.c
deleted file mode 100644 (file)
index 63b98d3..0000000
+++ /dev/null
@@ -1,1055 +0,0 @@
-/**
- * @file       nnstreamer_example_filter_performance_profile.c
- * @date       27 August 2018
- * @brief      A NNStreamer Example of tensor_filter using TensorFlow Lite:
- *             Perfromance Profiling (i.e., FPS)
- * @see                https://github.com/nnsuite/nnstreamer
- * @author     Wook Song <wook16.song@samsung.com>
- * @bug                No known bugs.
- *
- * A NNStreamer example application (with tensor_filter using TensorFlow Lite)
- * for performance profiling.
- *
- * Pipeline :
- * v4l2src -- videoconvert -- tee (optional) -- queue -- textoverlay -- fpsdisplaysink
- *                             |
- *                              -- queue -- videoscale -- videoconvert -- tensor_converter -- tensor_filter -- tensor_sink
- *
- * This example application currently only supports MOBINET for Tensorflow Lite via 'tensor_filter'.
- * Download tflite moel 'Mobilenet_1.0_224_quant' from below link,
- * https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/g3doc/models.md#image-classification-quantized-models
- * By using the 'new-data' signal of tensor_sink, Frames per Second (FPS) is measured
- * as well as the clasification result is fed to 'textoverlay'.
- *
- * How to run this application: Before running this example,
- * GST_PLUGIN_PATH should be updated for the path where the nnstreamer plug-ins are placed.
- * $ export GST_PLUGIN_PATH=/where/NNSTreamer/plugins/located:$GST_PLUGIN_PATH
- * The model file and its related miscellaneous files (e.g. *.tflite, labels.txt, and etc.)
- * are also required to be placed in the ./model directory.
- *
- * $ ./nnstreamer_example_filter_performance_profile --help
- * Usage:
- * nnstreamer_example_filter_performance_profile [OPTION...]
-
- * Help Options:
- * -h, --help                                                             Show help options
-
- * Application Options:
- * -c, --capture=/dev/videoX                                              A device node of the video capture device you wish to use
- * -f, --file=/where/your/video/file/located                              A video file location to play
- * --width= (Defaults: 1920)                                              Width of input source
- * --height= (Defaults: 1080)                                             Height of input source
- * --framerates= (Defaults: 5/1)                                          Frame rates of input source
- * --tensor-filter-desc=mobinet-tflite|... (Defaults: mobinet-tflite)     NN model and framework description for tensor_filter
- * --nnline-only                                                          Do not play audio/video input source
- *
- * For example, in order to run the Mobinet Tensorflow Lite model using the NNStreamer pipeline for the input source,
- * from the video capture device (/dev/video0), of which the resolution is 1920x1080 and the frame rates is 5,
- *
- * $ ./nnstreamer_example_filter_performance_profile -c /dev/video0 --tensor-filter-desc=mobinet-tflite
- */
-
-#define _GNU_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <gst/gst.h>
-
-/**
- * @brief A data type definition for the command line option, -c/--capture
- */
-typedef enum _input_src_t
-{
-  CAM_SRC = 0,
-  FILE_SRC,
-} input_src_t;
-
-/**
- * @brief A data type definition for the command line option, --tensor-filter-desc
- */
-typedef enum _nn_tensor_filter_desc_t
-{
-  DESC_NOT_SUPPORTED = -1,
-  /* default */
-  TF_LITE_MOBINET = 0,
-} nn_tensor_filter_desc_t;
-
-/**
- * @brief Constant values
- */
-enum constant_values
-{
-  MAX_NUM_OF_SIGNALS = 128,
-  DEFAULT_WIDTH_INPUT_SRC = 1920,
-  DEFAULT_HEIGHT_INPUT_SRC = 1080,
-  DEFAULT_WIDTH_TFLITE_MOBINET = 224,
-  DEFAULT_HEIGHT_TFLITE_MOBINET = 224,
-};
-static const char DEFAULT_FRAME_RATES_INPUT_SRC[] = "5/1";
-static const char DEFAULT_FORMAT_TENSOR_CONVERTER[] = "RGB";
-static const char DEFAULT_PATH_MODEL_TENSOR_FILTER[] = "./model/";
-static const char NAME_APP_PIPELINE[] = "NNStreamer Pipeline";
-static const char NAME_PROP_DEVICE_V4L2SRC[] = "device";
-static const char NAME_V4L2_PIPELINE_INPUT_SRC[] = "usbcam";
-static const char NAME_V4L2_PIPELINE_INPUT_VIDEOCONVERT[] =
-    "Colorspace converter for input source";
-static const char NAME_V4L2_PIPELINE_INPUT_CAPSFILTER[] =
-    "CAPS filter for input source";
-static const char NAME_V4L2_PIPELINE_TEE[] = "TEE";
-static const char NAME_V4L2_PIPELINE_OUTPUT_QUEUE[] = "Queue for image sink";
-static const char NAME_V4L2_PIPELINE_OUTPUT_SINK[] = "Xv-based image sink";
-static const char NAME_V4L2_PIPELINE_OUTPUT_TEXTOVERLAY[] =
-    "Textoverlay to display the inference result";
-static const char NAME_NN_TFLITE_PIPELINE_QUEUE[] = "Queue for NN-TFlite";
-static const char NAME_NN_TFLITE_PIPELINE_VIDEOSCALE[] =
-    "Video scaler for NN-TFlite";
-static const char NAME_NN_TFLITE_PIPELINE_VIDEOCONVERT[] =
-    "Colorspace converter  for NN-TFlite";
-static const char NAME_NN_TFLITE_PIPELINE_INPUT_CAPSFILTER[] =
-    "CAPS filter for input source of NN-TFlite";
-static const char NAME_NN_TFLITE_PIPELINE_TENSOR_CONVERTER[] =
-    "Tensor converter for NN-TFlite";
-static const char NAME_NN_TFLITE_PIPELINE_TENSOR_FILTER[] =
-    "Tensor filter for NN-TFlite";
-static const char NAME_NN_TFLITE_PIPELINE_TENSOR_SINK[] =
-    "Tensor sink for NN-TFlite";
-
-static const char *DESC_LIST_TENSOR_FILTER[] = {
-  [TF_LITE_MOBINET] = "mobinet-tflite",
-  /* sentinel */
-  NULL,
-};
-
-static const char *FRAMEWORK_LIST_TENSOR_FILTER[] = {
-  [TF_LITE_MOBINET] = "tensorflow-lite",
-  /* sentinel */
-  NULL,
-};
-
-static const char *NAME_LIST_OF_MODEL_FILE_TENSOR_FILTER[] = {
-  [TF_LITE_MOBINET] = "mobilenet_v1_1.0_224_quant.tflite",
-  /* sentinel */
-  NULL,
-};
-
-/**
- * TODO: Currently, only one misc file per each model is supported.
- */
-static const char *NAME_LIST_OF_MISC_FILE_TENSOR_FILTER[] = {
-  [TF_LITE_MOBINET] = "labels.txt",
-  /* sentinel */
-  NULL,
-};
-
-/**
- * @brief A data type definition for the information needed to set up the GstElements corresponding to the input source: v4l2src
- */
-typedef struct _v4l2src_property_info_t
-{
-  gchar *device;
-} v4l2src_property_info_t;
-
-/**
- * @brief A data type definition for the information needed to set up the GstElements corresponding to the input source
- */
-typedef union _src_property_info_t
-{
-  v4l2src_property_info_t v4l2src_property_info;
-} src_property_info_t;
-
-/**
- * @brief A data type definition for the input and output pipeline
- *
- * GstElements required to construct the input and output pipeline are here.
- */
-typedef struct _v4l2src_pipeline_container_t
-{
-  GstElement *input_source;
-  GstElement *input_videoconvert;
-  GstElement *input_capsfilter;
-  GstElement *tee;
-  GstElement *output_queue;
-  GstElement *output_textoverlay;
-  GstElement *output_sink; /**< fpsdisplaysink */
-} v4l2src_pipeline_container_t;
-
-/**
- * @brief A data type definition for the NNStreamer pipeline
- *
- * GstElements required to construct the NNStreamer pipeline are here.
- */
-typedef struct _nn_tflite_pipeline_container_t
-{
-  GstElement *nn_tflite_queue;
-  GstElement *nn_tflite_videoscale;
-  GstElement *nn_tflite_videoconvert;
-  GstElement *nn_tflite_capsfilter;
-  GstElement *nn_tflite_tensor_converter;
-  GstElement *nn_tflite_tensor_filter;
-  GstElement *nn_tflite_tensor_sink;
-} nn_tflite_pipeline_container_t;
-
-/**
- * @brief A data type definition for the pipelines
- */
-typedef struct _pipeline_container_t
-{
-  v4l2src_pipeline_container_t v4l2src_pipeline_container;
-  nn_tflite_pipeline_container_t nn_tflite_pipeline_container;
-} pipeline_container_t;
-
-/**
- * @brief A data type definition for the model specific information: the Mobinet Tensorflow-lite model
- */
-typedef struct _tflite_mobinet_info_t
-{
-  GList *labels;
-} tflite_mobinet_info_t;
-
-/**
- * @brief A data type definition for the NNStreamer application context data
- */
-typedef struct _nnstrmr_app_context_t
-{
-  /* Variables for the command line options */
-  input_src_t input_src; /**< -c/--capture */
-  gint input_src_width; /**< --width */
-  gint input_src_height; /**< --height */
-  gchar *input_src_framerates; /**< --framerates */
-  nn_tensor_filter_desc_t nn_tensorfilter_desc; /**< --tensor-filter-desc */
-  gboolean flag_nnline_only; /**< --nnline-only */
-  /* Variables for the information need to initialize this application */
-  gchar *nn_tensor_filter_model_path; /**< the path where the NN model files located */
-  tflite_mobinet_info_t tflite_mobinet_info; /**< model specific information for mobinet+tf-lite */
-  src_property_info_t src_property_info; /**< information required to set up the input source */
-  GMainLoop *mainloop;
-  GstElement *pipeline;
-  pipeline_container_t pipeline_container; /**< pipeline container, which indirectly includes the GstElements */
-  GstPad *tee_output_line_pad; /**< a static src pad of tee for the output pipeline */
-  GstPad *tee_nn_line_pad; /**< a static src pad of tee for the nnstreamer pipeline */
-  /* Variables for the GSignal maintenance */
-  GMutex signals_mutex;
-  guint signals_connected[MAX_NUM_OF_SIGNALS];
-  gint signal_idx;
-  /* Variables for the performance profiling */
-  GstClockTime time_pipeline_start;
-  GstClockTime time_last_profile;
-} nnstrmr_app_context_t;
-
-/**
- * @brief callback function for watching bus of the pipeline
- */
-static gboolean
-_cb_bus_watch (GstBus * bus, GstMessage * msg, gpointer app_ctx_gptr)
-{
-  nnstrmr_app_context_t *app_ctx = (nnstrmr_app_context_t *) app_ctx_gptr;
-  gchar *debug;
-  GError *error;
-
-  switch (GST_MESSAGE_TYPE (msg)) {
-    case GST_MESSAGE_STREAM_STATUS:
-    {
-      GstStreamStatusType streamstatus;
-      gst_message_parse_stream_status (msg, &streamstatus, NULL);
-      g_print ("gstreamer stream status %d ==> %s\n",
-          streamstatus, GST_OBJECT_NAME (msg->src));
-      break;
-    }
-    case GST_MESSAGE_STATE_CHANGED:
-    {
-      if (GST_MESSAGE_SRC (msg) == GST_OBJECT (app_ctx->pipeline)) {
-        GstState old_state, new_state, pending_state;
-        gst_message_parse_state_changed (msg, &old_state, &new_state,
-            &pending_state);
-        if ((old_state == GST_STATE_PAUSED) && (new_state == GST_STATE_PLAYING)) {
-          GstClock *clock;
-          clock = gst_element_get_clock (app_ctx->pipeline);
-          app_ctx->time_pipeline_start = gst_clock_get_time (clock);
-          app_ctx->time_last_profile = gst_clock_get_time (clock);
-        }
-      }
-      break;
-    }
-    case GST_MESSAGE_EOS:
-    {
-      g_print ("INFO: End of stream!\n");
-      g_main_loop_quit (app_ctx->mainloop);
-      break;
-    }
-    case GST_MESSAGE_ERROR:
-    {
-      gst_message_parse_error (msg, &error, &debug);
-      g_free (debug);
-
-      g_printerr ("ERR: %s\n", error->message);
-      g_error_free (error);
-
-      g_main_loop_quit (app_ctx->mainloop);
-      break;
-    }
-    default:
-      break;
-  }
-  return TRUE;
-}
-
-/**
- * @brief Set up the v4l2src GstElement
- * @param v4l2src a pointer of the v4l2src GstElement
- * @param ctx the application context data
- * @return TRUE, if it is succeeded
- */
-static gboolean
-_set_properties_of_v4l2src (GstElement * v4l2src,
-    const nnstrmr_app_context_t ctx)
-{
-  gchar *dev = ctx.src_property_info.v4l2src_property_info.device;
-  gboolean ret = TRUE;
-
-  if (!g_file_test (dev, G_FILE_TEST_EXISTS)) {
-    g_printerr ("ERR: the device node %s does not exist\n", dev);
-    ret = FALSE;
-  }
-
-  g_object_set (G_OBJECT (v4l2src), NAME_PROP_DEVICE_V4L2SRC, dev, NULL);
-
-  g_free (dev);
-
-  return ret;
-}
-
-/**
- * @brief Parse the command line option arguments
- * @param argc
- * @param argv
- * @param ctx a pointer of the application context data
- * @return none
- */
-static void
-_set_and_parse_option_info (int argc, char *argv[], nnstrmr_app_context_t * ctx)
-{
-  gchar *cap_dev_node = NULL;
-  gchar *file_path = NULL;
-  gchar *framerates = NULL;
-  gchar *tensorfilter_desc = NULL;
-  gchar *width_arg_desc =
-      g_strdup_printf (" (Defaults: %d)", DEFAULT_WIDTH_INPUT_SRC);
-  gchar *height_arg_desc =
-      g_strdup_printf (" (Defaults: %d)", DEFAULT_HEIGHT_INPUT_SRC);
-  gchar *framerates_arg_desc =
-      g_strdup_printf (" (Defaults: %s)", DEFAULT_FRAME_RATES_INPUT_SRC);
-  gchar *tf_desc_arg_desc =
-      g_strdup_printf ("%s|... (Defaults: %s)", DESC_LIST_TENSOR_FILTER[0],
-      DESC_LIST_TENSOR_FILTER[0]);
-  gint width = -1;
-  gint height = -1;
-  gint ret = 0;
-  gboolean flag_nnline_only = FALSE;
-  GError *error = NULL;
-  GOptionContext *optionctx;
-
-  const GOptionEntry main_entries[] = {
-    {"capture", 'c', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &cap_dev_node,
-          "A device node of the video capture device you wish to use",
-        "/dev/videoX"},
-    {"file", 'f', G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &file_path,
-        "A video file location to play", "/where/your/video/file/located"},
-    {"width", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_INT, &width,
-        "Width of input source", width_arg_desc},
-    {"height", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_INT, &height,
-        "Height of input source", height_arg_desc},
-    {"framerates", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING, &framerates,
-        "Frame rates of input source", framerates_arg_desc},
-    {"tensor-filter-desc", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_STRING,
-          &tensorfilter_desc,
-          "NN model and framework description for tensor_filter",
-        tf_desc_arg_desc},
-    {"nnline-only", 0, G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE,
-          &flag_nnline_only, "Do not play audio/video input source",
-        NULL},
-    {NULL}
-  };
-
-  optionctx = g_option_context_new (NULL);
-  g_option_context_add_main_entries (optionctx, main_entries, NULL);
-
-  if (!g_option_context_parse (optionctx, &argc, &argv, &error)) {
-    g_print ("option parsing failed: %s\n", error->message);
-    ret = -1;
-    goto common_cleanup;
-  }
-
-  if ((cap_dev_node != NULL) && (file_path != NULL)) {
-    g_printerr ("ERR: \'capture\' and \'file\' options "
-        "cannot be used simultaneously\n");
-    g_free (cap_dev_node);
-    g_free (file_path);
-    ret = -1;
-    goto common_cleanup;
-  } else if ((cap_dev_node == NULL) && (file_path == NULL)) {
-    g_printerr ("ERR: one of the application options should be provided; "
-        "-c, --capture=/dev/videoX or "
-        "-f, --file=/where/your/video/file/located\n");
-    ret = -1;
-    goto common_cleanup;
-  }
-
-  if (cap_dev_node != NULL) {
-    ctx->input_src = CAM_SRC;
-    ctx->src_property_info.v4l2src_property_info.device = cap_dev_node;
-  } else {
-    /* TODO */
-    ctx->input_src = FILE_SRC;
-  }
-
-  if (width == -1) {
-    width = DEFAULT_WIDTH_INPUT_SRC;
-  }
-  ctx->input_src_width = width;
-
-  if (height == -1) {
-    height = DEFAULT_HEIGHT_INPUT_SRC;
-  }
-  ctx->input_src_height = height;
-
-  if (framerates == NULL) {
-    framerates = g_strndup (DEFAULT_FRAME_RATES_INPUT_SRC,
-        strlen (DEFAULT_FRAME_RATES_INPUT_SRC));
-  }
-  ctx->input_src_framerates = framerates;
-
-  if (tensorfilter_desc == NULL) {
-    ctx->nn_tensorfilter_desc = TF_LITE_MOBINET;
-  } else {
-    int i = 0;
-    const char *desc;
-
-    ctx->nn_tensorfilter_desc = DESC_NOT_SUPPORTED;
-    while ((desc = DESC_LIST_TENSOR_FILTER[i]) != NULL) {
-      if (!strncmp (desc, tensorfilter_desc, strlen (tensorfilter_desc))) {
-        ctx->nn_tensorfilter_desc = i;
-      }
-      i++;
-    }
-
-    if (ctx->nn_tensorfilter_desc == DESC_NOT_SUPPORTED) {
-      g_printerr
-          ("ERR: tensor_filter does not support the pair of the framework and model: %s\n",
-          tensorfilter_desc);
-      g_free (tensorfilter_desc);
-      ret = -1;
-      goto common_cleanup;
-    }
-  }
-
-  ctx->nn_tensor_filter_model_path =
-      g_strconcat (DEFAULT_PATH_MODEL_TENSOR_FILTER,
-      NAME_LIST_OF_MODEL_FILE_TENSOR_FILTER[ctx->nn_tensorfilter_desc], NULL);
-  if (!g_file_test (ctx->nn_tensor_filter_model_path, G_FILE_TEST_EXISTS
-          || G_FILE_TEST_IS_REGULAR)) {
-    g_printerr ("ERR: the model file %s corresponding to %s does not exist\n",
-        ctx->nn_tensor_filter_model_path,
-        DESC_LIST_TENSOR_FILTER[ctx->nn_tensorfilter_desc]);
-    g_free (ctx->nn_tensor_filter_model_path);
-    ret = -1;
-    goto common_cleanup;
-  }
-
-  ctx->flag_nnline_only = flag_nnline_only;
-
-common_cleanup:
-  g_free (width_arg_desc);
-  g_free (height_arg_desc);
-  g_free (framerates_arg_desc);
-  g_free (tf_desc_arg_desc);
-  g_option_context_free (optionctx);
-  if (ret != 0) {
-    g_main_loop_unref (ctx->mainloop);
-    exit (ret);
-  }
-}
-
-/**
- * @brief Construct the v4l2src input and output pipeline
- *
- * In this function, the GstElements included in the pipelines are made, added,
- * and linked. Setting the properties for those GstElements are also done.
- *
- * @param ctx a pointer of the application context data
- * @return TRUE, if it is succeeded
- */
-static gboolean
-_construct_v4l2src_pipeline (nnstrmr_app_context_t * ctx)
-{
-  GstElement *pipeline = ctx->pipeline;
-  v4l2src_pipeline_container_t *pipeline_cntnr =
-      &((ctx->pipeline_container).v4l2src_pipeline_container);
-  gboolean ret;
-  GstCaps *caps;
-  gchar *str_caps;
-
-  pipeline_cntnr->input_source =
-      gst_element_factory_make ("v4l2src", NAME_V4L2_PIPELINE_INPUT_SRC);
-  pipeline_cntnr->input_videoconvert =
-      gst_element_factory_make ("videoconvert",
-      NAME_V4L2_PIPELINE_INPUT_VIDEOCONVERT);
-  pipeline_cntnr->input_capsfilter =
-      gst_element_factory_make ("capsfilter",
-      NAME_V4L2_PIPELINE_INPUT_CAPSFILTER);
-  pipeline_cntnr->tee =
-      gst_element_factory_make ("tee", NAME_V4L2_PIPELINE_TEE);
-  pipeline_cntnr->output_queue =
-      gst_element_factory_make ("queue", NAME_V4L2_PIPELINE_OUTPUT_QUEUE);
-  pipeline_cntnr->output_textoverlay =
-      gst_element_factory_make ("textoverlay",
-      NAME_V4L2_PIPELINE_OUTPUT_TEXTOVERLAY);
-  pipeline_cntnr->output_sink =
-      gst_element_factory_make ("fpsdisplaysink",
-      NAME_V4L2_PIPELINE_OUTPUT_SINK);
-
-  if (!pipeline_cntnr->input_source || !pipeline_cntnr->input_videoconvert
-      || !pipeline_cntnr->input_capsfilter || !pipeline_cntnr->tee
-      || !pipeline_cntnr->output_queue || !pipeline_cntnr->output_textoverlay
-      || !pipeline_cntnr->output_sink) {
-    g_printerr ("ERR: cannot create one (or more) of the elements "
-        "which the application pipeline consists of\n");
-    g_free (ctx->input_src_framerates);
-    return FALSE;
-  }
-
-  str_caps =
-      g_strdup_printf ("video/x-raw,width=%d,height=%d,framerate=%s",
-      ctx->input_src_width, ctx->input_src_height, ctx->input_src_framerates);
-  caps = gst_caps_from_string (str_caps);
-  g_object_set (G_OBJECT (pipeline_cntnr->input_capsfilter),
-      "caps", caps, NULL);
-  g_free (ctx->input_src_framerates);
-  g_free (str_caps);
-  gst_caps_unref (caps);
-
-  g_object_set (G_OBJECT (pipeline_cntnr->output_textoverlay), "valignment",
-      /** top */ 2, NULL);
-  g_object_set (G_OBJECT (pipeline_cntnr->output_textoverlay), "font-desc",
-      "Sans, 24", NULL);
-
-  ret = _set_properties_of_v4l2src (pipeline_cntnr->input_source, *ctx);
-  if (ret == FALSE) {
-    return ret;
-  }
-
-  gst_bin_add_many (GST_BIN (pipeline), pipeline_cntnr->input_source,
-      pipeline_cntnr->input_videoconvert, pipeline_cntnr->input_capsfilter,
-      pipeline_cntnr->tee, pipeline_cntnr->output_queue,
-      pipeline_cntnr->output_textoverlay, pipeline_cntnr->output_sink, NULL);
-
-  ret = gst_element_link_many (pipeline_cntnr->input_source,
-      pipeline_cntnr->input_videoconvert, pipeline_cntnr->input_capsfilter,
-      pipeline_cntnr->tee, pipeline_cntnr->output_queue,
-      pipeline_cntnr->output_textoverlay, pipeline_cntnr->output_sink, NULL);
-  if (ret == FALSE) {
-    g_printerr ("ERR: cannot link one (or more) of the elements "
-        "which the application pipeline consists of\n");
-    return FALSE;
-  }
-
-  ctx->tee_output_line_pad =
-      gst_element_get_static_pad (pipeline_cntnr->tee, "src_0");
-  ctx->tee_nn_line_pad =
-      gst_element_get_request_pad (pipeline_cntnr->tee, "src_%u");
-
-
-  return TRUE;
-}
-
-/**
- * @brief A signal handler for 'new-data' emitted by 'tensor-sink'
- *
- * The suffix _nn means that this handler is registered at the NNStreamer pipeline.
- * The performance profiling is done by this fuction.
- *
- * @param object a pointer of the 'tensor-sink' GstElement
- * @buffer buffer a pointer of the buffer in the 'tensor-sink' GstElement
- * @buffer user_data a pointer of the application context data
- * @return none
- */
-static void
-_handle_tensor_sink_new_data_nn (GstElement * object, GstBuffer * buffer,
-    gpointer user_data)
-{
-  /* Performance profiling */
-  nnstrmr_app_context_t *ctx = (nnstrmr_app_context_t *) user_data;
-  GstClock *clock;
-  GstClockTime now;
-  static guint total_passed = 0;
-  gint64 msecs_elapsed;
-  gint64 msecs_interval;
-
-  if (!GST_CLOCK_TIME_IS_VALID (ctx->time_pipeline_start)
-      || !GST_CLOCK_TIME_IS_VALID (ctx->time_last_profile)) {
-    return;
-  }
-
-  total_passed++;
-
-  clock = gst_element_get_clock (ctx->pipeline);
-  now = gst_clock_get_time (clock);
-  msecs_elapsed =
-      GST_TIME_AS_MSECONDS (GST_CLOCK_DIFF (ctx->time_pipeline_start, now));
-  msecs_interval =
-      GST_TIME_AS_MSECONDS (GST_CLOCK_DIFF (ctx->time_last_profile, now));
-  ctx->time_last_profile = now;
-
-  g_print ("Avg. FPS = %lf (processed: %u, elapsed time (ms): %" G_GINT64_FORMAT
-      "), ", (gdouble) total_passed * G_GINT64_CONSTANT (1000) / msecs_elapsed,
-      total_passed, msecs_elapsed);
-  g_print ("Cur. FPS = %lf\n",
-      (gdouble) 1 * G_GINT64_CONSTANT (1000) / msecs_interval);
-}
-
-/**
- * @brief Construct the filesec input and output pipeline (TODO)
- *
- * @param ctx a pointer of the application context data
- * @return TRUE, if it is succeeded
- */
-static gboolean
-_construct_filesrc_pipeline (nnstrmr_app_context_t * ctx)
-{
-  return TRUE;
-}
-
-/**
- * @brief Construct the NNStreamer pipeline
- *
- * In this function, the GstElements included in the pipelines are made, added,
- * and linked. The sink pad of the queue is also linked to the src pad of tee in
- * the input source pipeline. Setting the properties, 'framework' and 'model',
- * for the 'tensor_filter' GstElement are done here.
- *
- * @param ctx a pointer of the application context data
- * @return TRUE, if it is succeeded
- */
-static gboolean
-_construct_nn_tflite_pipeline (nnstrmr_app_context_t * ctx)
-{
-  GstElement *pipeline = ctx->pipeline;
-  nn_tflite_pipeline_container_t *pipeline_cntnr =
-      &((ctx->pipeline_container).nn_tflite_pipeline_container);
-  GstCaps *caps;
-  gchar *str_caps;
-  gboolean ret;
-  GstPad *pad;
-
-  pipeline_cntnr->nn_tflite_queue =
-      gst_element_factory_make ("queue", NAME_NN_TFLITE_PIPELINE_QUEUE);
-  pipeline_cntnr->nn_tflite_videoscale =
-      gst_element_factory_make ("videoscale",
-      NAME_NN_TFLITE_PIPELINE_VIDEOSCALE);
-  pipeline_cntnr->nn_tflite_videoconvert =
-      gst_element_factory_make ("videoconvert",
-      NAME_NN_TFLITE_PIPELINE_VIDEOCONVERT);
-  pipeline_cntnr->nn_tflite_capsfilter =
-      gst_element_factory_make ("capsfilter",
-      NAME_NN_TFLITE_PIPELINE_INPUT_CAPSFILTER);
-  pipeline_cntnr->nn_tflite_tensor_converter =
-      gst_element_factory_make ("tensor_converter",
-      NAME_NN_TFLITE_PIPELINE_TENSOR_CONVERTER);
-  pipeline_cntnr->nn_tflite_tensor_filter =
-      gst_element_factory_make ("tensor_filter",
-      NAME_NN_TFLITE_PIPELINE_TENSOR_FILTER);
-  pipeline_cntnr->nn_tflite_tensor_sink =
-      gst_element_factory_make ("tensor_sink",
-      NAME_NN_TFLITE_PIPELINE_TENSOR_SINK);
-
-  g_object_set (G_OBJECT (pipeline_cntnr->nn_tflite_tensor_sink),
-      "max-lateness", (gint64) - 1, NULL);
-  g_object_set (G_OBJECT (pipeline_cntnr->nn_tflite_tensor_filter), "framework",
-      FRAMEWORK_LIST_TENSOR_FILTER[ctx->nn_tensorfilter_desc], NULL);
-  g_object_set (G_OBJECT (pipeline_cntnr->nn_tflite_tensor_filter), "model",
-      ctx->nn_tensor_filter_model_path, NULL);
-  g_free (ctx->nn_tensor_filter_model_path);
-
-  str_caps =
-      g_strdup_printf ("video/x-raw,width=%d,height=%d,format=%s",
-      DEFAULT_WIDTH_TFLITE_MOBINET, DEFAULT_HEIGHT_TFLITE_MOBINET,
-      DEFAULT_FORMAT_TENSOR_CONVERTER);
-  caps = gst_caps_from_string (str_caps);
-  g_object_set (G_OBJECT (pipeline_cntnr->nn_tflite_capsfilter), "caps", caps,
-      NULL);
-  g_free (str_caps);
-  gst_caps_unref (caps);
-
-  gst_bin_add_many (GST_BIN (pipeline), pipeline_cntnr->nn_tflite_queue,
-      pipeline_cntnr->nn_tflite_videoscale,
-      pipeline_cntnr->nn_tflite_videoconvert,
-      pipeline_cntnr->nn_tflite_capsfilter,
-      pipeline_cntnr->nn_tflite_tensor_converter,
-      pipeline_cntnr->nn_tflite_tensor_filter,
-      pipeline_cntnr->nn_tflite_tensor_sink, NULL);
-
-  ret = gst_element_link_many (pipeline_cntnr->nn_tflite_queue,
-      pipeline_cntnr->nn_tflite_videoscale,
-      pipeline_cntnr->nn_tflite_videoconvert,
-      pipeline_cntnr->nn_tflite_capsfilter,
-      pipeline_cntnr->nn_tflite_tensor_converter,
-      pipeline_cntnr->nn_tflite_tensor_filter,
-      pipeline_cntnr->nn_tflite_tensor_sink, NULL);
-  if (ret == FALSE) {
-    g_printerr ("ERR: cannot link one (or more) of the elements "
-        "which the application pipeline consists of\n");
-    return ret;
-  }
-
-  pad = gst_element_get_static_pad (pipeline_cntnr->nn_tflite_queue, "sink");
-  if (gst_pad_link (ctx->tee_nn_line_pad, pad) != GST_PAD_LINK_OK) {
-    g_printerr ("ERR: cannot link the pad %s of %s to the pad %s of %s\n: ",
-        gst_pad_get_name (ctx->tee_nn_line_pad), NAME_V4L2_PIPELINE_TEE,
-        gst_pad_get_name (pad), NAME_NN_TFLITE_PIPELINE_QUEUE);
-  }
-  gst_object_unref (pad);
-
-  return TRUE;
-}
-
-/**
- * @brief Load model-specific files (or information) and initialize the application context using it
- *
- * @param ctx a pointer of the application context data
- * @return none
- */
-static void
-_load_model_specific (nnstrmr_app_context_t * ctx)
-{
-  switch (ctx->nn_tensorfilter_desc) {
-    case TF_LITE_MOBINET:
-    {
-      FILE *fp;
-      char *eachline;
-      size_t readcnt, len;
-      gchar *path_label = g_strconcat (DEFAULT_PATH_MODEL_TENSOR_FILTER,
-          NAME_LIST_OF_MISC_FILE_TENSOR_FILTER[ctx->nn_tensorfilter_desc],
-          NULL);
-
-      ctx->tflite_mobinet_info.labels = NULL;
-
-      fp = fopen (path_label, "r");
-      g_free (path_label);
-      len = 0;
-      if (fp != NULL) {
-        while ((readcnt = getline (&eachline, &len, fp)) != -1) {
-          ctx->tflite_mobinet_info.labels =
-              g_list_append (ctx->tflite_mobinet_info.labels,
-              g_strndup (eachline, readcnt));
-        }
-        fclose (fp);
-      } else {
-        g_printerr
-            ("ERR: failed to load the model specific files for MOBINET with Tensowflow-lite: %s\n",
-            NAME_LIST_OF_MISC_FILE_TENSOR_FILTER[TF_LITE_MOBINET]);
-        return;
-      }
-      break;
-    }
-    default:
-    {
-      g_printerr ("ERR: undefined tensor_filter model description\n");
-    }
-  }
-}
-
-/**
- * @brief Finalize the model specific information in the application context data
- *
- * @param ctx a pointer of the application context data
- * @return none
- */
-static void
-_cleanup_model_specific (nnstrmr_app_context_t * ctx)
-{
-  switch (ctx->nn_tensorfilter_desc) {
-    case TF_LITE_MOBINET:
-    {
-      if (ctx->tflite_mobinet_info.labels != NULL) {
-        g_list_free_full (ctx->tflite_mobinet_info.labels, free);
-      }
-      break;
-    }
-    default:
-    {
-      g_printerr ("ERR: undefined tensor_filter model description\n");
-    }
-  }
-
-}
-
-/**
- * @brief A callback function to block the src pad of tee in the input and output pipeline
- *
- * In order to handle the command line option, --nnline-only, this probe function for the src pad
- * of tee blocks the input and output pipeline and dynamically unlinks the output pipeline from the whole pipeline.
- *
- * @param ctx a pointer of the application context data
- * @return GST_PAD_PROBE_REMOVE
- */
-static GstPadProbeReturn
-_cb_probe_tee_output_line_pad (GstPad * pad, GstPadProbeInfo * info,
-    gpointer user_data)
-{
-  nnstrmr_app_context_t *ctx = (nnstrmr_app_context_t *) user_data;
-
-  gst_element_set_state (ctx->pipeline, GST_STATE_PAUSED);
-
-  switch (ctx->input_src) {
-    case CAM_SRC:
-    {
-      GstElement *output_queue =
-          ctx->pipeline_container.v4l2src_pipeline_container.output_queue;
-      GstElement *output_textoverlay =
-          ctx->pipeline_container.v4l2src_pipeline_container.output_textoverlay;
-      GstElement *output_sink =
-          ctx->pipeline_container.v4l2src_pipeline_container.output_sink;
-      GstPad *sinkpad_output_queue =
-          gst_element_get_static_pad (output_queue, "sink");
-
-      /* Unlink sinkpad of queue on output line from tee on input source line */
-      gst_pad_unlink (ctx->tee_output_line_pad, sinkpad_output_queue);
-      gst_object_unref (sinkpad_output_queue);
-
-      gst_element_set_state (output_queue, GST_STATE_NULL);
-      gst_element_set_state (output_textoverlay, GST_STATE_NULL);
-      gst_element_set_state (output_sink, GST_STATE_NULL);
-      gst_element_unlink_many (output_queue, output_textoverlay, output_sink,
-          NULL);
-      gst_bin_remove_many (GST_BIN (ctx->pipeline), output_queue,
-          output_textoverlay, output_sink, NULL);
-      break;
-    }
-    case FILE_SRC:
-    {
-      /* TODO */
-      break;
-    }
-    default:
-    {
-      g_printerr ("ERR: undefined input source\n");
-    }
-  }
-
-  gst_element_set_state (ctx->pipeline, GST_STATE_PLAYING);
-
-  return GST_PAD_PROBE_REMOVE;
-}
-
-/**
- * @brief A signal handler for 'new-data' emitted by 'tensor-sink'
- *
- * The suffix _output means that this handler is registered at the output pipeline.
- * Extracting the clasification result made by the mobinet tensorflow-lite model and
- * feeding it to the 'textoverlay' GstElement are done here.
- *
- * @param object a pointer of the 'tensor-sink' GstElement
- * @buffer buffer a pointer of the buffer in the 'tensor-sink' GstElement
- * @buffer user_data a pointer of the application context data
- * @return none
- */
-static void
-_handle_tensor_sink_new_data_output (GstElement * object, GstBuffer * buffer,
-    gpointer user_data)
-{
-  nnstrmr_app_context_t *ctx = (nnstrmr_app_context_t *) user_data;
-  switch (ctx->input_src) {
-    case CAM_SRC:
-    {
-      v4l2src_pipeline_container_t *v4l2src_pipeline_cntnr =
-          &((ctx->pipeline_container).v4l2src_pipeline_container);
-      GstMemory *mem = gst_buffer_get_all_memory (buffer);
-      GstMapInfo map_info;
-
-      if (gst_memory_map (mem, &map_info, GST_MAP_READ)) {
-        int max_score_idx = -1;
-        guint8 max_score = 0;
-        int i;
-        gchar *class_result;
-
-        for (i = 0; i < map_info.size; i++) {
-          if ((guint8) map_info.data[i] > max_score) {
-            max_score = (guint8) map_info.data[i];
-            max_score_idx = i;
-          }
-        }
-        class_result = "UNKNOWN";
-        if (max_score_idx != -1) {
-          class_result =
-              (gchar *) g_list_nth_data (ctx->tflite_mobinet_info.labels,
-              max_score_idx);
-        }
-        g_object_set (G_OBJECT (v4l2src_pipeline_cntnr->output_textoverlay),
-            "text", class_result, NULL);
-        gst_memory_unmap (mem, &map_info);
-      }
-
-    }
-    default:
-    {
-      /* Do nothing */
-    }
-  }
-}
-
-/**
- * @brief A helper function for registering signal handlers at the output pipeline side
- *
- * @param ctx a pointer of the application context data
- * @return none
- */
-static void
-_register_signals_output (nnstrmr_app_context_t * ctx)
-{
-  guint signal_id;
-  nn_tflite_pipeline_container_t *nn_pipeline_cntnr =
-      &((ctx->pipeline_container).nn_tflite_pipeline_container);
-
-  signal_id =
-      g_signal_connect (G_OBJECT (nn_pipeline_cntnr->nn_tflite_tensor_sink),
-      "new-data", G_CALLBACK (_handle_tensor_sink_new_data_output), ctx);
-  g_mutex_lock (&ctx->signals_mutex);
-  ctx->signals_connected[ctx->signal_idx++] = signal_id;
-  g_mutex_unlock (&ctx->signals_mutex);
-}
-
-/**
- * @brief A helper function for registering signal handlers at the nnstreamer pipeline side
- *
- * @param ctx a pointer of the application context data
- * @return none
- */
-static void
-_register_signals_nn (nnstrmr_app_context_t * ctx)
-{
-  guint signal_id;
-  nn_tflite_pipeline_container_t *nn_pipeline_cntnr =
-      &((ctx->pipeline_container).nn_tflite_pipeline_container);
-
-  signal_id =
-      g_signal_connect (G_OBJECT (nn_pipeline_cntnr->nn_tflite_tensor_sink),
-      "new-data", G_CALLBACK (_handle_tensor_sink_new_data_nn), ctx);
-  g_mutex_lock (&ctx->signals_mutex);
-  ctx->signals_connected[ctx->signal_idx++] = signal_id;
-  g_mutex_unlock (&ctx->signals_mutex);
-}
-
-/**
- * @brief A helper function for unregistering all signal handlers and finalizing them
- *
- * @param ctx a pointer of the application context data
- * @return none
- */
-static void
-_unregister_signals (nnstrmr_app_context_t * ctx)
-{
-  int i;
-  nn_tflite_pipeline_container_t *nn_pipeline_cntnr =
-      &((ctx->pipeline_container).nn_tflite_pipeline_container);
-
-  for (i = 0; i < ctx->signal_idx; i++) {
-    g_signal_handler_disconnect (G_OBJECT
-        (nn_pipeline_cntnr->nn_tflite_tensor_sink), ctx->signals_connected[i]);
-  }
-}
-
-/**
- * @brief Main function.
- */
-int
-main (int argc, char *argv[])
-{
-  nnstrmr_app_context_t app_ctx = { };
-  gboolean ret;
-  GstBus *bus;
-  guint bus_watch_id;
-
-  /* Initailization */
-  gst_init (&argc, &argv);
-  app_ctx.mainloop = g_main_loop_new (NULL, FALSE);
-  _set_and_parse_option_info (argc, argv, &app_ctx);
-
-  app_ctx.signal_idx = 0;
-  /* This is not mandatory porcedure */
-  g_mutex_init (&app_ctx.signals_mutex);
-
-  app_ctx.time_last_profile = GST_CLOCK_TIME_NONE;
-  app_ctx.time_pipeline_start = GST_CLOCK_TIME_NONE;
-
-  /* Create gstreamer elements */
-  app_ctx.pipeline = gst_pipeline_new (NAME_APP_PIPELINE);
-
-  if (!app_ctx.pipeline) {
-    g_printerr ("ERR: cannot create the application pipeline, %s\n",
-        NAME_APP_PIPELINE);
-    g_main_loop_unref (app_ctx.mainloop);
-    return -1;
-  }
-
-  _load_model_specific (&app_ctx);
-
-  switch (app_ctx.input_src) {
-    case CAM_SRC:
-    {
-      /* Set up the pipeline */
-      ret = _construct_v4l2src_pipeline (&app_ctx);
-      if (ret == FALSE) {
-        goto common_cleanup;
-      }
-      break;
-    }
-    case FILE_SRC:
-    {
-      /* TODO */
-      _construct_filesrc_pipeline (&app_ctx);
-      break;
-    }
-    default:
-    {
-      g_printerr ("ERR: undefined input source\n");
-    }
-  }
-
-  _construct_nn_tflite_pipeline (&app_ctx);
-
-  /**
-   * When the --nnline-only command line option is provided, the output pipeline
-   * is dynamically unlinked from the whole pipeline.
-   */
-  if (app_ctx.flag_nnline_only) {
-    gst_pad_add_probe (app_ctx.tee_output_line_pad, GST_PAD_PROBE_TYPE_BLOCK,
-        _cb_probe_tee_output_line_pad, &app_ctx, NULL);
-  } else {
-    _register_signals_output (&app_ctx);
-  }
-  _register_signals_nn (&app_ctx);
-
-  /* Add a bus watcher */
-  bus = gst_pipeline_get_bus (GST_PIPELINE (app_ctx.pipeline));
-  bus_watch_id = gst_bus_add_watch (bus, _cb_bus_watch, &app_ctx);
-  gst_object_unref (bus);
-
-  /* Set the pipeline to "playing" state */
-  gst_element_set_state (app_ctx.pipeline, GST_STATE_PLAYING);
-
-  /* Run the main loop */
-  g_main_loop_run (app_ctx.mainloop);
-
-  /* Out of the main loop, clean up */
-  g_source_remove (bus_watch_id);
-  gst_object_unref (app_ctx.tee_output_line_pad);
-  gst_object_unref (app_ctx.tee_nn_line_pad);
-
-common_cleanup:
-  _unregister_signals (&app_ctx);
-  _cleanup_model_specific (&app_ctx);
-  gst_element_set_state (app_ctx.pipeline, GST_STATE_NULL);
-  gst_object_unref (GST_OBJECT (app_ctx.pipeline));
-  g_main_loop_unref (app_ctx.mainloop);
-
-  return 0;
-}
diff --git a/nnstreamer_example/example_object_detection/CMakeLists.txt b/nnstreamer_example/example_object_detection/CMakeLists.txt
deleted file mode 100644 (file)
index 30cfbeb..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-pkg_check_modules(app_ex_obj_pkgs gstreamer-1.0 gstreamer-video-1.0 glib-2.0 cairo REQUIRED)
-
-ADD_EXECUTABLE(nnstreamer_example_object_detection nnstreamer_example_object_detection.cc)
-
-TARGET_LINK_LIBRARIES(nnstreamer_example_object_detection ${app_ex_obj_pkgs_LIBRARIES} m)
-TARGET_INCLUDE_DIRECTORIES(nnstreamer_example_object_detection PUBLIC ${app_ex_obj_pkgs_INCLUDE_DIRS})
-TARGET_COMPILE_OPTIONS(nnstreamer_example_object_detection PUBLIC ${app_ex_obj_pkgs_CFLAGS_OTHER})
-
-IF (INSTALL_EXAMPLE_APP)
-       INSTALL(TARGETS nnstreamer_example_object_detection RUNTIME DESTINATION ${EXAMPLE_EXEC_PREFIX})
-       INSTALL(FILES get_model.sh gst-launch-object-detection.sh DESTINATION ${EXAMPLE_EXEC_PREFIX})
-ENDIF (INSTALL_EXAMPLE_APP)
diff --git a/nnstreamer_example/example_object_detection/get_model.sh b/nnstreamer_example/example_object_detection/get_model.sh
deleted file mode 100755 (executable)
index 4d5447e..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/bin/env bash
-wget https://github.com/nnsuite/testcases/raw/master/DeepLearningModels/tensorflow-lite/ssd_mobilenet_v2_coco/ssd_mobilenet_v2_coco.tflite
-wget https://github.com/nnsuite/testcases/raw/master/DeepLearningModels/tensorflow-lite/ssd_mobilenet_v2_coco/coco_labels_list.txt
-wget https://github.com/nnsuite/testcases/raw/master/DeepLearningModels/tensorflow-lite/ssd_mobilenet_v2_coco/box_priors.txt
diff --git a/nnstreamer_example/example_object_detection/gst-launch-object-detection.sh b/nnstreamer_example/example_object_detection/gst-launch-object-detection.sh
deleted file mode 100755 (executable)
index 23431a2..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env bash
-gst-launch-1.0 \
-       v4l2src name=cam_src ! videoscale ! videoconvert ! video/x-raw,width=640,height=480,format=RGB,framerate=30/1 ! tee name=t \
-       t. ! queue leaky=2 max-size-buffers=2 ! videoscale ! video/x-raw,width=300,height=300,format=RGB ! tensor_converter ! \
-               tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! \
-               tensor_filter framework=tensorflow-lite model=ssd_mobilenet_v2_coco.tflite ! \
-               tensor_decoder mode=bounding_boxes option1=ssd option2=coco_labels_list.txt option3=box_priors.txt option4=640:480 option5=300:300 ! \
-               compositor name=mix sink_0::zorder=2 sink_1::zorder=1 ! videoconvert ! ximagesink \
-       t. ! queue leaky=2 max-size-buffers=10 ! mix.
diff --git a/nnstreamer_example/example_object_detection/meson.build b/nnstreamer_example/example_object_detection/meson.build
deleted file mode 100644 (file)
index eb31949..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-cairo_dep = dependency('cairo')
-
-nnstreamer_example_object_detection = executable('nnstreamer_example_object_detection',
-  'nnstreamer_example_object_detection.cc',
-  dependencies: [glib_dep, gst_dep, gst_video_dep, cairo_dep, libm_dep],
-  install: get_option('install-example'),
-  install_dir: examples_install_dir
-)
-
-if get_option('install-example')
-  install_data(['get_model.sh', 'gst-launch-object-detection.sh'],
-    install_dir: examples_install_dir
-  )
-endif
diff --git a/nnstreamer_example/example_object_detection/nnstreamer_example_object_detection.cc b/nnstreamer_example/example_object_detection/nnstreamer_example_object_detection.cc
deleted file mode 100644 (file)
index d51e7a6..0000000
+++ /dev/null
@@ -1,737 +0,0 @@
-/**
- * @file       nnstreamer_example_object_detection.cc
- * @date       22 October 2018
- * @brief      Tensor stream example with TF-Lite model for object detection
- * @author     HyoungJoo Ahn <hello.ahn@samsung.com>
- * @bug                No known bugs.
- *
- * Run example :
- * Before running this example, GST_PLUGIN_PATH should be updated for nnstreamer plug-in.
- * $ export GST_PLUGIN_PATH=$GST_PLUGIN_PATH:<nnstreamer plugin path>
- * $ ./nnstreamer_example_object_detection
- *
- * Required model and resources are stored at below link
- * https://github.com/nnsuite/testcases/tree/master/DeepLearningModels/tensorflow-lite/ssd_mobilenet_v2_coco
- */
-
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <glib.h>
-#include <gst/gst.h>
-#include <gst/video/video.h>
-
-#include <cstring>
-#include <vector>
-#include <iostream>
-#include <fstream>
-#include <algorithm>
-
-#include <math.h>
-#include <cairo.h>
-#include <cairo-gobject.h>
-
-/**
- * @brief Macro for debug mode.
- */
-#ifndef DBG
-#define DBG FALSE
-#endif
-
-/**
- * @brief Macro for debug message.
- */
-#define _print_log(...) if (DBG) g_message (__VA_ARGS__)
-
-/**
- * @brief Macro to check error case.
- */
-#define _check_cond_err(cond) \
-  if (!(cond)) { \
-    _print_log ("app failed! [line : %d]", __LINE__); \
-    goto error; \
-  }
-
-#define Y_SCALE         10.0f
-#define X_SCALE         10.0f
-#define H_SCALE         5.0f
-#define W_SCALE         5.0f
-
-#define VIDEO_WIDTH     640
-#define VIDEO_HEIGHT    480
-#define MODEL_WIDTH     300
-#define MODEL_HEIGHT    300
-
-#define BOX_SIZE        4
-#define LABEL_SIZE      91
-#define DETECTION_MAX   1917
-
-/**
- * @brief Max objects in display.
- */
-#define MAX_OBJECT_DETECTION 5
-
-typedef struct
-{
-  gint x;
-  gint y;
-  gint width;
-  gint height;
-  gint class_id;
-  gfloat prob;
-} DetectedObject;
-
-typedef struct
-{
-  gboolean valid;
-  GstVideoInfo vinfo;
-} CairoOverlayState;
-
-/**
- * @brief Data structure for tflite model info.
- */
-typedef struct
-{
-  gchar *model_path; /**< tflite model file path */
-  gchar *label_path; /**< label file path */
-  gchar *box_prior_path; /**< box prior file path */
-  gfloat box_priors[BOX_SIZE][DETECTION_MAX]; /**< box prior */
-  GList *labels; /**< list of loaded labels */
-} TFLiteModelInfo;
-
-/**
- * @brief Data structure for app.
- */
-typedef struct
-{
-  GMainLoop *loop; /**< main event loop */
-  GstElement *pipeline; /**< gst pipeline for data stream */
-  GstBus *bus; /**< gst bus for data pipeline */
-  gboolean running; /**< true when app is running */
-  GMutex mutex; /**< mutex for processing */
-  TFLiteModelInfo tflite_info; /**< tflite model info */
-  CairoOverlayState overlay_state;
-  std::vector < DetectedObject > detected_objects;
-} AppData;
-
-/**
- * @brief Data for pipeline and result.
- */
-static AppData g_app;
-
-/**
- * @brief Read strings from file.
- */
-static gboolean
-read_lines (const gchar * file_name, GList ** lines)
-{
-  std::ifstream file (file_name);
-  if (!file) {
-    _print_log ("Failed to open file %s", file_name);
-    return FALSE;
-  }
-
-  std::string str;
-  while (std::getline (file, str)) {
-    *lines = g_list_append (*lines, g_strdup (str.c_str ()));
-  }
-
-  return TRUE;
-}
-
-/**
- * @brief Load box priors.
- */
-static gboolean
-tflite_load_box_priors (TFLiteModelInfo * tflite_info)
-{
-  GList *box_priors = NULL;
-  gchar *box_row;
-
-  g_return_val_if_fail (tflite_info != NULL, FALSE);
-  g_return_val_if_fail (read_lines (tflite_info->box_prior_path, &box_priors),
-      FALSE);
-
-  for (int row = 0; row < BOX_SIZE; row++) {
-    int column = 0;
-    int i = 0, j = 0;
-    char buff[11];
-
-    memset (buff, 0, 11);
-    box_row = (gchar *) g_list_nth_data (box_priors, row);
-
-    while ((box_row[i] != '\n') && (box_row[i] != '\0')) {
-      if (box_row[i] != ' ') {
-        buff[j] = box_row[i];
-        j++;
-      } else {
-        if (j != 0) {
-          tflite_info->box_priors[row][column++] = atof (buff);
-          memset (buff, 0, 11);
-        }
-        j = 0;
-      }
-      i++;
-    }
-
-    tflite_info->box_priors[row][column++] = atof (buff);
-  }
-
-  g_list_free_full (box_priors, g_free);
-  return TRUE;
-}
-
-/**
- * @brief Load labels.
- */
-static gboolean
-tflite_load_labels (TFLiteModelInfo * tflite_info)
-{
-  g_return_val_if_fail (tflite_info != NULL, FALSE);
-
-  return read_lines (tflite_info->label_path, &tflite_info->labels);
-}
-
-/**
- * @brief Check tflite model and load labels.
- */
-static gboolean
-tflite_init_info (TFLiteModelInfo * tflite_info, const gchar * path)
-{
-  const gchar tflite_model[] = "ssd_mobilenet_v2_coco.tflite";
-  const gchar tflite_label[] = "coco_labels_list.txt";
-  const gchar tflite_box_priors[] = "box_priors.txt";
-
-  g_return_val_if_fail (tflite_info != NULL, FALSE);
-
-  tflite_info->model_path = g_strdup_printf ("%s/%s", path, tflite_model);
-  tflite_info->label_path = g_strdup_printf ("%s/%s", path, tflite_label);
-  tflite_info->box_prior_path =
-      g_strdup_printf ("%s/%s", path, tflite_box_priors);
-
-  tflite_info->labels = NULL;
-
-  g_return_val_if_fail (tflite_load_box_priors (tflite_info), FALSE);
-  g_return_val_if_fail (tflite_load_labels (tflite_info), FALSE);
-
-  return TRUE;
-}
-
-/**
- * @brief Free data in tflite info structure.
- */
-static void
-tflite_free_info (TFLiteModelInfo * tflite_info)
-{
-  g_return_if_fail (tflite_info != NULL);
-
-  if (tflite_info->model_path) {
-    g_free (tflite_info->model_path);
-    tflite_info->model_path = NULL;
-  }
-
-  if (tflite_info->label_path) {
-    g_free (tflite_info->label_path);
-    tflite_info->label_path = NULL;
-  }
-
-  if (tflite_info->box_prior_path) {
-    g_free (tflite_info->box_prior_path);
-    tflite_info->box_prior_path = NULL;
-  }
-
-  if (tflite_info->labels) {
-    g_list_free_full (tflite_info->labels, g_free);
-    tflite_info->labels = NULL;
-  }
-}
-
-/**
- * @brief Free resources in app data.
- */
-static void
-free_app_data (void)
-{
-  if (g_app.loop) {
-    g_main_loop_unref (g_app.loop);
-    g_app.loop = NULL;
-  }
-
-  if (g_app.bus) {
-    gst_bus_remove_signal_watch (g_app.bus);
-    gst_object_unref (g_app.bus);
-    g_app.bus = NULL;
-  }
-
-  if (g_app.pipeline) {
-    gst_object_unref (g_app.pipeline);
-    g_app.pipeline = NULL;
-  }
-
-  g_app.detected_objects.clear ();
-
-  tflite_free_info (&g_app.tflite_info);
-  g_mutex_clear (&g_app.mutex);
-}
-
-/**
- * @brief Function to print error message.
- */
-static void
-parse_err_message (GstMessage * message)
-{
-  gchar *debug;
-  GError *error;
-
-  g_return_if_fail (message != NULL);
-
-  switch (GST_MESSAGE_TYPE (message)) {
-    case GST_MESSAGE_ERROR:
-      gst_message_parse_error (message, &error, &debug);
-      break;
-
-    case GST_MESSAGE_WARNING:
-      gst_message_parse_warning (message, &error, &debug);
-      break;
-
-    default:
-      return;
-  }
-
-  gst_object_default_error (GST_MESSAGE_SRC (message), error, debug);
-  g_error_free (error);
-  g_free (debug);
-}
-
-/**
- * @brief Function to print qos message.
- */
-static void
-parse_qos_message (GstMessage * message)
-{
-  GstFormat format;
-  guint64 processed;
-  guint64 dropped;
-
-  gst_message_parse_qos_stats (message, &format, &processed, &dropped);
-  _print_log ("format[%d] processed[%" G_GUINT64_FORMAT "] dropped[%"
-      G_GUINT64_FORMAT "]", format, processed, dropped);
-}
-
-/**
- * @brief Callback for message.
- */
-static void
-bus_message_cb (GstBus * bus, GstMessage * message, gpointer user_data)
-{
-  switch (GST_MESSAGE_TYPE (message)) {
-    case GST_MESSAGE_EOS:
-      _print_log ("received eos message");
-      g_main_loop_quit (g_app.loop);
-      break;
-
-    case GST_MESSAGE_ERROR:
-      _print_log ("received error message");
-      parse_err_message (message);
-      g_main_loop_quit (g_app.loop);
-      break;
-
-    case GST_MESSAGE_WARNING:
-      _print_log ("received warning message");
-      parse_err_message (message);
-      break;
-
-    case GST_MESSAGE_STREAM_START:
-      _print_log ("received start message");
-      break;
-
-    case GST_MESSAGE_QOS:
-      parse_qos_message (message);
-      break;
-
-    default:
-      break;
-  }
-}
-
-/**
- * @brief Compare score of detected objects.
- */
-static bool
-compare_objs (DetectedObject & a, DetectedObject & b)
-{
-  return a.prob > b.prob;
-}
-
-/**
- * @brief Intersection of union
- */
-static gfloat
-iou (DetectedObject & A, DetectedObject & B)
-{
-  int x1 = std::max (A.x, B.x);
-  int y1 = std::max (A.y, B.y);
-  int x2 = std::min (A.x + A.width, B.x + B.width);
-  int y2 = std::min (A.y + A.height, B.y + B.height);
-  int w = std::max (0, (x2 - x1 + 1));
-  int h = std::max (0, (y2 - y1 + 1));
-  float inter = w * h;
-  float areaA = A.width * A.height;
-  float areaB = B.width * B.height;
-  float o = inter / (areaA + areaB - inter);
-  return (o >= 0) ? o : 0;
-}
-
-/**
- * @brief NMS (non-maximum suppression)
- */
-static void
-nms (std::vector < DetectedObject > &detected)
-{
-  const float threshold_iou = .5f;
-  guint boxes_size;
-  guint i, j;
-
-  std::sort (detected.begin (), detected.end (), compare_objs);
-  boxes_size = detected.size ();
-
-  std::vector < bool > del (boxes_size, false);
-  for (i = 0; i < boxes_size; i++) {
-    if (!del[i]) {
-      for (j = i + 1; j < boxes_size; j++) {
-        if (iou (detected.at (i), detected.at (j)) > threshold_iou) {
-          del[j] = true;
-        }
-      }
-    }
-  }
-
-  /* update result */
-  g_mutex_lock (&g_app.mutex);
-
-  g_app.detected_objects.clear ();
-  for (i = 0; i < boxes_size; i++) {
-    if (!del[i]) {
-      g_app.detected_objects.push_back (detected[i]);
-
-      if (DBG) {
-        _print_log ("==============================");
-        _print_log ("Label           : %s",
-            (gchar *) g_list_nth_data (g_app.tflite_info.labels,
-                detected[i].class_id));
-        _print_log ("x               : %d", detected[i].x);
-        _print_log ("y               : %d", detected[i].y);
-        _print_log ("width           : %d", detected[i].width);
-        _print_log ("height          : %d", detected[i].height);
-        _print_log ("Confidence Score: %f", detected[i].prob);
-      }
-    }
-  }
-
-  g_mutex_unlock (&g_app.mutex);
-}
-
-#define _expit(x) \
-    (1.f / (1.f + expf (-x)))
-
-/**
- * @brief Get detected objects.
- */
-static void
-get_detected_objects (gfloat * detections, gfloat * boxes)
-{
-  const float threshold_score = .5f;
-  std::vector < DetectedObject > detected;
-
-  for (int d = 0; d < DETECTION_MAX; d++) {
-    float ycenter =
-        boxes[0] / Y_SCALE * g_app.tflite_info.box_priors[2][d] +
-        g_app.tflite_info.box_priors[0][d];
-    float xcenter =
-        boxes[1] / X_SCALE * g_app.tflite_info.box_priors[3][d] +
-        g_app.tflite_info.box_priors[1][d];
-    float h =
-        (float) expf (boxes[2] / H_SCALE) * g_app.tflite_info.box_priors[2][d];
-    float w =
-        (float) expf (boxes[3] / W_SCALE) * g_app.tflite_info.box_priors[3][d];
-
-    float ymin = ycenter - h / 2.f;
-    float xmin = xcenter - w / 2.f;
-    float ymax = ycenter + h / 2.f;
-    float xmax = xcenter + w / 2.f;
-
-    int x = xmin * MODEL_WIDTH;
-    int y = ymin * MODEL_HEIGHT;
-    int width = (xmax - xmin) * MODEL_WIDTH;
-    int height = (ymax - ymin) * MODEL_HEIGHT;
-
-    for (int c = 1; c < LABEL_SIZE; c++) {
-      gfloat score = _expit (detections[c]);
-      /**
-       * This score cutoff is taken from Tensorflow's demo app.
-       * There are quite a lot of nodes to be run to convert it to the useful possibility
-       * scores. As a result of that, this cutoff will cause it to lose good detections in
-       * some scenarios and generate too much noise in other scenario.
-       */
-      if (score < threshold_score)
-        continue;
-
-      DetectedObject object;
-
-      object.class_id = c;
-      object.x = x;
-      object.y = y;
-      object.width = width;
-      object.height = height;
-      object.prob = score;
-
-      detected.push_back (object);
-    }
-
-    detections += LABEL_SIZE;
-    boxes += BOX_SIZE;
-  }
-
-  nms (detected);
-}
-
-/**
- * @brief Callback for tensor sink signal.
- */
-static void
-new_data_cb (GstElement * element, GstBuffer * buffer, gpointer user_data)
-{
-  GstMemory *mem_boxes, *mem_detections;
-  GstMapInfo info_boxes, info_detections;
-  gfloat *boxes, *detections;
-
-  g_return_if_fail (g_app.running);
-
-  /**
-   * tensor type is float32.
-   * [0] dim of boxes > BOX_SIZE : 1 : DETECTION_MAX : 1
-   * [1] dim of labels > LABEL_SIZE : DETECTION_MAX : 1 : 1
-   */
-  g_assert (gst_buffer_n_memory (buffer) == 2);
-
-  /* boxes */
-  mem_boxes = gst_buffer_get_memory (buffer, 0);
-  g_assert (gst_memory_map (mem_boxes, &info_boxes, GST_MAP_READ));
-  g_assert (info_boxes.size == BOX_SIZE * DETECTION_MAX * 4);
-  boxes = (gfloat *) info_boxes.data;
-
-  /* detections */
-  mem_detections = gst_buffer_get_memory (buffer, 1);
-  g_assert (gst_memory_map (mem_detections, &info_detections, GST_MAP_READ));
-  g_assert (info_detections.size == LABEL_SIZE * DETECTION_MAX * 4);
-  detections = (gfloat *) info_detections.data;
-
-  get_detected_objects (detections, boxes);
-
-  gst_memory_unmap (mem_boxes, &info_boxes);
-  gst_memory_unmap (mem_detections, &info_detections);
-
-  gst_memory_unref (mem_boxes);
-  gst_memory_unref (mem_detections);
-}
-
-/**
- * @brief Set window title.
- * @param name GstXImageSink element name
- * @param title window title
- */
-static void
-set_window_title (const gchar * name, const gchar * title)
-{
-  GstTagList *tags;
-  GstPad *sink_pad;
-  GstElement *element;
-
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), name);
-
-  g_return_if_fail (element != NULL);
-
-  sink_pad = gst_element_get_static_pad (element, "sink");
-
-  if (sink_pad) {
-    tags = gst_tag_list_new (GST_TAG_TITLE, title, NULL);
-    gst_pad_send_event (sink_pad, gst_event_new_tag (tags));
-    gst_object_unref (sink_pad);
-  }
-
-  gst_object_unref (element);
-}
-
-/**
- * @brief Store the information from the caps that we are interested in.
- */
-static void
-prepare_overlay_cb (GstElement * overlay, GstCaps * caps, gpointer user_data)
-{
-  CairoOverlayState *state = &g_app.overlay_state;
-
-  state->valid = gst_video_info_from_caps (&state->vinfo, caps);
-}
-
-/**
- * @brief Callback to draw the overlay.
- */
-static void
-draw_overlay_cb (GstElement * overlay, cairo_t * cr, guint64 timestamp,
-    guint64 duration, gpointer user_data)
-{
-  CairoOverlayState *state = &g_app.overlay_state;
-  gfloat x, y, width, height;
-  gchar *label;
-  guint drawed = 0;
-
-  g_return_if_fail (state->valid);
-  g_return_if_fail (g_app.running);
-
-  std::vector < DetectedObject > detected;
-  std::vector < DetectedObject >::iterator iter;
-
-  g_mutex_lock (&g_app.mutex);
-  detected = g_app.detected_objects;
-  g_mutex_unlock (&g_app.mutex);
-
-  /* set font props */
-  cairo_select_font_face (cr, "Sans", CAIRO_FONT_SLANT_NORMAL,
-      CAIRO_FONT_WEIGHT_BOLD);
-  cairo_set_font_size (cr, 20.0);
-
-  for (iter = detected.begin (); iter != detected.end (); ++iter) {
-    label =
-        (gchar *) g_list_nth_data (g_app.tflite_info.labels, iter->class_id);
-
-    x = iter->x * VIDEO_WIDTH / MODEL_WIDTH;
-    y = iter->y * VIDEO_HEIGHT / MODEL_HEIGHT;
-    width = iter->width * VIDEO_WIDTH / MODEL_WIDTH;
-    height = iter->height * VIDEO_WIDTH / MODEL_HEIGHT;
-
-    /* draw rectangle */
-    cairo_rectangle (cr, x, y, width, height);
-    cairo_set_source_rgb (cr, 1, 0, 0);
-    cairo_set_line_width (cr, 1.5);
-    cairo_stroke (cr);
-    cairo_fill_preserve (cr);
-
-    /* draw title */
-    cairo_move_to (cr, x + 5, y + 25);
-    cairo_text_path (cr, label);
-    cairo_set_source_rgb (cr, 1, 0, 0);
-    cairo_fill_preserve (cr);
-    cairo_set_source_rgb (cr, 1, 1, 1);
-    cairo_set_line_width (cr, .3);
-    cairo_stroke (cr);
-    cairo_fill_preserve (cr);
-
-    if (++drawed >= MAX_OBJECT_DETECTION) {
-      /* max objects drawed */
-      break;
-    }
-  }
-}
-
-/**
- * @brief Main function.
- */
-int
-main (int argc, char ** argv)
-{
-  const gchar tflite_model_path[] = "./tflite_model_ssd";
-
-  gchar *str_pipeline;
-  GstElement *element;
-
-  _print_log ("start app..");
-
-  /* init app variable */
-  g_app.running = FALSE;
-  g_app.loop = NULL;
-  g_app.bus = NULL;
-  g_app.pipeline = NULL;
-  g_app.detected_objects.clear ();
-  g_mutex_init (&g_app.mutex);
-
-  _check_cond_err (tflite_init_info (&g_app.tflite_info, tflite_model_path));
-
-  /* init gstreamer */
-  gst_init (&argc, &argv);
-
-  /* main loop */
-  g_app.loop = g_main_loop_new (NULL, FALSE);
-  _check_cond_err (g_app.loop != NULL);
-
-  /* init pipeline */
-  str_pipeline =
-      g_strdup_printf
-      ("v4l2src name=src ! videoscale ! "
-      "video/x-raw,width=%d,height=%d,format=RGB ! tee name=t_raw "
-      "t_raw. ! queue ! videoconvert ! cairooverlay name=tensor_res ! ximagesink name=img_tensor "
-      "t_raw. ! queue leaky=2 max-size-buffers=2 ! videoscale ! video/x-raw,width=%d,height=%d ! tensor_converter ! "
-      "tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! "
-      "tensor_filter framework=tensorflow-lite model=%s ! "
-      "tensor_sink name=tensor_sink",
-      VIDEO_WIDTH, VIDEO_HEIGHT, MODEL_WIDTH, MODEL_HEIGHT,
-      g_app.tflite_info.model_path);
-  g_app.pipeline = gst_parse_launch (str_pipeline, NULL);
-  g_free (str_pipeline);
-  _check_cond_err (g_app.pipeline != NULL);
-
-  /* bus and message callback */
-  g_app.bus = gst_element_get_bus (g_app.pipeline);
-  _check_cond_err (g_app.bus != NULL);
-
-  gst_bus_add_signal_watch (g_app.bus);
-  g_signal_connect (g_app.bus, "message", G_CALLBACK (bus_message_cb), NULL);
-
-  /* tensor sink signal : new data callback */
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), "tensor_sink");
-  g_signal_connect (element, "new-data", G_CALLBACK (new_data_cb), NULL);
-  gst_object_unref (element);
-
-  /* cairo overlay */
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), "tensor_res");
-  g_signal_connect (element, "draw", G_CALLBACK (draw_overlay_cb), NULL);
-  g_signal_connect (element, "caps-changed", G_CALLBACK (prepare_overlay_cb),
-      NULL);
-  gst_object_unref (element);
-
-  /* start pipeline */
-  gst_element_set_state (g_app.pipeline, GST_STATE_PLAYING);
-  g_app.running = TRUE;
-
-  /* set window title */
-  set_window_title ("img_tensor", "NNStreamer Example");
-
-  /* run main loop */
-  g_main_loop_run (g_app.loop);
-
-  /* quit when received eos or error message */
-  g_app.running = FALSE;
-
-  /* cam source element */
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), "src");
-
-  gst_element_set_state (element, GST_STATE_READY);
-  gst_element_set_state (g_app.pipeline, GST_STATE_READY);
-
-  g_usleep (200 * 1000);
-
-  gst_element_set_state (element, GST_STATE_NULL);
-  gst_element_set_state (g_app.pipeline, GST_STATE_NULL);
-
-  g_usleep (200 * 1000);
-  gst_object_unref (element);
-
-error:
-  _print_log ("close app..");
-
-  free_app_data ();
-  return 0;
-}
diff --git a/nnstreamer_example/example_object_detection_tensorflow/CMakeLists.txt b/nnstreamer_example/example_object_detection_tensorflow/CMakeLists.txt
deleted file mode 100644 (file)
index 05fcd58..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-pkg_check_modules(app_ex_obj_pkgs gstreamer-1.0 gstreamer-video-1.0 glib-2.0 cairo REQUIRED)
-
-ADD_EXECUTABLE(nnstreamer_example_object_detection_tensorflow nnstreamer_example_object_detection_tensorflow.cc)
-
-TARGET_LINK_LIBRARIES(nnstreamer_example_object_detection_tensorflow ${app_ex_obj_pkgs_LIBRARIES} m)
-TARGET_INCLUDE_DIRECTORIES(nnstreamer_example_object_detection_tensorflow PUBLIC ${app_ex_obj_pkgs_INCLUDE_DIRS})
-TARGET_COMPILE_OPTIONS(nnstreamer_example_object_detection_tensorflow PUBLIC ${app_ex_obj_pkgs_CFLAGS_OTHER})
-
-IF (INSTALL_EXAMPLE_APP)
-       INSTALL(TARGETS nnstreamer_example_object_detection_tensorflow RUNTIME DESTINATION ${EXAMPLE_EXEC_PREFIX})
-       INSTALL(FILES get_model.sh DESTINATION ${EXAMPLE_EXEC_PREFIX}) # shell script example will be updated after decoder for tf-ssd model.
-ENDIF (INSTALL_EXAMPLE_APP)
diff --git a/nnstreamer_example/example_object_detection_tensorflow/get_model.sh b/nnstreamer_example/example_object_detection_tensorflow/get_model.sh
deleted file mode 100644 (file)
index 44b3a04..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/env bash
-wget https://github.com/nnsuite/testcases/blob/master/DeepLearningModels/tensorflow/ssdlite_mobilenet_v2/ssdlite_mobilenet_v2.pb
-wget https://github.com/nnsuite/testcases/blob/master/DeepLearningModels/tensorflow/ssdlite_mobilenet_v2/coco_labels_list.txt
diff --git a/nnstreamer_example/example_object_detection_tensorflow/meson.build b/nnstreamer_example/example_object_detection_tensorflow/meson.build
deleted file mode 100644 (file)
index 12195ba..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-cairo_dep = dependency('cairo')
-
-nnstreamer_example_object_detection_tensorflow = executable('nnstreamer_example_object_detection_tensorflow',
-  'nnstreamer_example_object_detection_tensorflow.cc',
-  dependencies: [glib_dep, gst_dep, gst_video_dep, cairo_dep, libm_dep],
-  include_directories: nnstreamer_inc,
-  install: get_option('install-example'),
-  install_dir: examples_install_dir
-)
-
-if get_option('install-example')
-  install_data(['get_model.sh'],
-    install_dir: examples_install_dir
-  )
-endif
diff --git a/nnstreamer_example/example_object_detection_tensorflow/nnstreamer_example_object_detection_tensorflow.cc b/nnstreamer_example/example_object_detection_tensorflow/nnstreamer_example_object_detection_tensorflow.cc
deleted file mode 100644 (file)
index d01d854..0000000
+++ /dev/null
@@ -1,595 +0,0 @@
-/**
- * @file       nnstreamer_example_object_detection_tensorflow.cc
- * @date       8 Jan 2019
- * @brief      Tensor stream example with Tensorflow model for object detection
- * @author     HyoungJoo Ahn <hello.ahn@samsung.com>
- * @bug                No known bugs.
- *
- * Run example :
- * Before running this example, GST_PLUGIN_PATH should be updated for nnstreamer plug-in.
- * $ export GST_PLUGIN_PATH=$GST_PLUGIN_PATH:<nnstreamer plugin path>
- * $ ./nnstreamer_example_object_detection_tensorflow
- *
- */
-
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <glib.h>
-#include <gst/gst.h>
-#include <gst/video/video.h>
-
-#include <cstring>
-#include <vector>
-#include <iostream>
-#include <fstream>
-#include <algorithm>
-
-#include <math.h>
-#include <cairo.h>
-#include <cairo-gobject.h>
-
-/**
- * @brief Macro for debug mode.
- */
-#ifndef DBG
-#define DBG FALSE
-#endif
-
-/**
- * @brief Macro for debug message.
- */
-#define _print_log(...) if (DBG) g_message (__VA_ARGS__)
-
-/**
- * @brief Macro to check error case.
- */
-#define _check_cond_err(cond) \
-  if (!(cond)) { \
-    _print_log ("app failed! [line : %d]", __LINE__); \
-    goto error; \
-  }
-
-#define VIDEO_WIDTH     640
-#define VIDEO_HEIGHT    480
-
-#define BOX_SIZE        4
-#define LABEL_SIZE      91
-#define DETECTION_MAX   100
-
-/**
- * @brief Max objects in display.
- */
-#define MAX_OBJECT_DETECTION 5
-
-typedef struct
-{
-  gint x;
-  gint y;
-  gint width;
-  gint height;
-  gint class_id;
-  gfloat prob;
-} DetectedObject;
-
-typedef struct
-{
-  gboolean valid;
-  GstVideoInfo vinfo;
-} CairoOverlayState;
-
-/**
- * @brief Data structure for tf model info.
- */
-typedef struct
-{
-  gchar *model_path; /**< tf model file path */
-  gchar *label_path; /**< label file path */
-  GList *labels; /**< list of loaded labels */
-} TFModelInfo;
-
-/**
- * @brief Data structure for app.
- */
-typedef struct
-{
-  GMainLoop *loop; /**< main event loop */
-  GstElement *pipeline; /**< gst pipeline for data stream */
-  GstBus *bus; /**< gst bus for data pipeline */
-  gboolean running; /**< true when app is running */
-  GMutex mutex; /**< mutex for processing */
-  TFModelInfo tf_info; /**< tf model info */
-  CairoOverlayState overlay_state;
-  std::vector < DetectedObject > detected_objects;
-} AppData;
-
-/**
- * @brief Data for pipeline and result.
- */
-static AppData g_app;
-
-/**
- * @brief Read strings from file.
- */
-static gboolean
-read_lines (const gchar * file_name, GList ** lines)
-{
-  std::ifstream file (file_name);
-  if (!file) {
-    _print_log ("Failed to open file %s", file_name);
-    return FALSE;
-  }
-
-  std::string str;
-  while (std::getline (file, str)) {
-    *lines = g_list_append (*lines, g_strdup (str.c_str ()));
-  }
-
-  return TRUE;
-}
-
-/**
- * @brief Load labels.
- */
-static gboolean
-tf_load_labels (TFModelInfo * tf_info)
-{
-  g_return_val_if_fail (tf_info != NULL, FALSE);
-
-  return read_lines (tf_info->label_path, &tf_info->labels);
-}
-
-/**
- * @brief Check tf model and load labels.
- */
-static gboolean
-tf_init_info (TFModelInfo * tf_info, const gchar * path)
-{
-  const gchar tf_model[] = "ssdlite_mobilenet_v2.pb";
-  const gchar tf_label[] = "coco_labels_list.txt";
-
-  g_return_val_if_fail (tf_info != NULL, FALSE);
-
-  tf_info->model_path = g_strdup_printf ("%s/%s", path, tf_model);
-  tf_info->label_path = g_strdup_printf ("%s/%s", path, tf_label);
-
-  tf_info->labels = NULL;
-
-  g_return_val_if_fail (tf_load_labels (tf_info), FALSE);
-
-  return TRUE;
-}
-
-/**
- * @brief Free data in tf info structure.
- */
-static void
-tf_free_info (TFModelInfo * tf_info)
-{
-  g_return_if_fail (tf_info != NULL);
-
-  if (tf_info->model_path) {
-    g_free (tf_info->model_path);
-    tf_info->model_path = NULL;
-  }
-
-  if (tf_info->label_path) {
-    g_free (tf_info->label_path);
-    tf_info->label_path = NULL;
-  }
-
-  if (tf_info->labels) {
-    g_list_free_full (tf_info->labels, g_free);
-    tf_info->labels = NULL;
-  }
-}
-
-/**
- * @brief Free resources in app data.
- */
-static void
-free_app_data (void)
-{
-  if (g_app.loop) {
-    g_main_loop_unref (g_app.loop);
-    g_app.loop = NULL;
-  }
-
-  if (g_app.bus) {
-    gst_bus_remove_signal_watch (g_app.bus);
-    gst_object_unref (g_app.bus);
-    g_app.bus = NULL;
-  }
-
-  if (g_app.pipeline) {
-    gst_object_unref (g_app.pipeline);
-    g_app.pipeline = NULL;
-  }
-
-  g_app.detected_objects.clear ();
-
-  tf_free_info (&g_app.tf_info);
-  g_mutex_clear (&g_app.mutex);
-}
-
-/**
- * @brief Function to print error message.
- */
-static void
-parse_err_message (GstMessage * message)
-{
-  gchar *debug;
-  GError *error;
-
-  g_return_if_fail (message != NULL);
-
-  switch (GST_MESSAGE_TYPE (message)) {
-    case GST_MESSAGE_ERROR:
-      gst_message_parse_error (message, &error, &debug);
-      break;
-
-    case GST_MESSAGE_WARNING:
-      gst_message_parse_warning (message, &error, &debug);
-      break;
-
-    default:
-      return;
-  }
-
-  gst_object_default_error (GST_MESSAGE_SRC (message), error, debug);
-  g_error_free (error);
-  g_free (debug);
-}
-
-/**
- * @brief Function to print qos message.
- */
-static void
-parse_qos_message (GstMessage * message)
-{
-  GstFormat format;
-  guint64 processed;
-  guint64 dropped;
-
-  gst_message_parse_qos_stats (message, &format, &processed, &dropped);
-  _print_log ("format[%d] processed[%" G_GUINT64_FORMAT "] dropped[%"
-      G_GUINT64_FORMAT "]", format, processed, dropped);
-}
-
-/**
- * @brief Callback for message.
- */
-static void
-bus_message_cb (GstBus * bus, GstMessage * message, gpointer user_data)
-{
-  switch (GST_MESSAGE_TYPE (message)) {
-    case GST_MESSAGE_EOS:
-      _print_log ("received eos message");
-      g_main_loop_quit (g_app.loop);
-      break;
-
-    case GST_MESSAGE_ERROR:
-      _print_log ("received error message");
-      parse_err_message (message);
-      g_main_loop_quit (g_app.loop);
-      break;
-
-    case GST_MESSAGE_WARNING:
-      _print_log ("received warning message");
-      parse_err_message (message);
-      break;
-
-    case GST_MESSAGE_STREAM_START:
-      _print_log ("received start message");
-      break;
-
-    case GST_MESSAGE_QOS:
-      parse_qos_message (message);
-      break;
-
-    default:
-      break;
-  }
-}
-
-/**
- * @brief Get detected objects.
- */
-static void
-get_detected_objects (
-  gfloat * num_detections,
-  gfloat * detection_classes,
-  gfloat * detection_scores,
-  gfloat * detection_boxes)
-{
-
-  g_mutex_lock (&g_app.mutex);
-
-  g_app.detected_objects.clear ();
-
-  _print_log("========================================================");
-  _print_log("                 Number Of Objects: %2d", (int) num_detections[0]);
-  _print_log("========================================================");
-  for (int i = 0; i < (int) num_detections[0]; i++){
-    DetectedObject object;
-
-    object.class_id = (int) detection_classes[i];
-    object.x = (int) (detection_boxes[i * BOX_SIZE + 1] * VIDEO_WIDTH);
-    object.y = (int) (detection_boxes[i * BOX_SIZE] * VIDEO_HEIGHT);
-    object.width = (int) ((detection_boxes[i * BOX_SIZE + 3]
-                 - detection_boxes[i * BOX_SIZE + 1]) * VIDEO_WIDTH);
-    object.height = (int) ((detection_boxes[i * BOX_SIZE + 2]
-                  - detection_boxes[i * BOX_SIZE]) * VIDEO_HEIGHT);
-    object.prob = detection_scores[i];
-
-    _print_log("%10s: x:%3d, y:%3d, w:%3d, h:%3d, prob:%.2f",
-      (gchar *) g_list_nth_data (g_app.tf_info.labels, object.class_id),
-      object.x, object.y, object.width, object.height, object.prob);
-
-    g_app.detected_objects.push_back (object);
-  }
-  _print_log("========================================================");
-  
-  g_mutex_unlock (&g_app.mutex);
-}
-
-/**
- * @brief Callback for tensor sink signal.
- */
-static void
-new_data_cb (GstElement * element, GstBuffer * buffer, gpointer user_data)
-{
-  GstMemory *mem_num, *mem_classes, *mem_scores, *mem_boxes;
-  GstMapInfo info_num, info_classes, info_scores, info_boxes;
-  gfloat *num_detections, *detection_classes, *detection_scores, *detection_boxes;
-
-  g_return_if_fail (g_app.running);
-
-  /**
-   * tensor type is float32.
-   * [0] dim of num_detections    > 1
-   * [1] dim of detection_classes > 1: 100
-   * [2] dim of detection_scores  > 1: 100
-   * [3] dim of detection_boxes   > 1: 100: 4 (top, left, bottom, right)
-   */
-  g_assert (gst_buffer_n_memory (buffer) == 4);
-
-  /* num_detections */
-  mem_num = gst_buffer_get_memory (buffer, 0);
-  g_assert (gst_memory_map (mem_num, &info_num, GST_MAP_READ));
-  g_assert (info_num.size == 4);
-  num_detections = (gfloat *) info_num.data;
-
-  /* detection_classes */
-  mem_classes = gst_buffer_get_memory (buffer, 1);
-  g_assert (gst_memory_map (mem_classes, &info_classes, GST_MAP_READ));
-  g_assert (info_classes.size == DETECTION_MAX * 4);
-  detection_classes = (gfloat *) info_classes.data;
-
-  /* detection_scores */
-  mem_scores = gst_buffer_get_memory (buffer, 2);
-  g_assert (gst_memory_map (mem_scores, &info_scores, GST_MAP_READ));
-  g_assert (info_scores.size == DETECTION_MAX * 4);
-  detection_scores = (gfloat *) info_scores.data;
-
-  /* detection_boxes */
-  mem_boxes = gst_buffer_get_memory (buffer, 3);
-  g_assert (gst_memory_map (mem_boxes, &info_boxes, GST_MAP_READ));
-  g_assert (info_boxes.size == DETECTION_MAX * BOX_SIZE * 4);
-  detection_boxes = (gfloat *) info_boxes.data;
-
-  get_detected_objects (
-    num_detections, detection_classes, detection_scores, detection_boxes);
-
-  gst_memory_unmap (mem_num, &info_num);
-  gst_memory_unmap (mem_classes, &info_classes);
-  gst_memory_unmap (mem_scores, &info_scores);
-  gst_memory_unmap (mem_boxes, &info_boxes);
-
-  gst_memory_unref (mem_num);
-  gst_memory_unref (mem_classes);
-  gst_memory_unref (mem_scores);
-  gst_memory_unref (mem_boxes);
-}
-
-/**
- * @brief Set window title.
- * @param name GstXImageSink element name
- * @param title window title
- */
-static void
-set_window_title (const gchar * name, const gchar * title)
-{
-  GstTagList *tags;
-  GstPad *sink_pad;
-  GstElement *element;
-
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), name);
-
-  g_return_if_fail (element != NULL);
-
-  sink_pad = gst_element_get_static_pad (element, "sink");
-
-  if (sink_pad) {
-    tags = gst_tag_list_new (GST_TAG_TITLE, title, NULL);
-    gst_pad_send_event (sink_pad, gst_event_new_tag (tags));
-    gst_object_unref (sink_pad);
-  }
-
-  gst_object_unref (element);
-}
-
-/**
- * @brief Store the information from the caps that we are interested in.
- */
-static void
-prepare_overlay_cb (GstElement * overlay, GstCaps * caps, gpointer user_data)
-{
-  CairoOverlayState *state = &g_app.overlay_state;
-
-  state->valid = gst_video_info_from_caps (&state->vinfo, caps);
-}
-
-/**
- * @brief Callback to draw the overlay.
- */
-static void
-draw_overlay_cb (GstElement * overlay, cairo_t * cr, guint64 timestamp,
-    guint64 duration, gpointer user_data)
-{
-  CairoOverlayState *state = &g_app.overlay_state;
-  gfloat x, y, width, height;
-  gchar *label;
-  guint drawed = 0;
-
-  g_return_if_fail (state->valid);
-  g_return_if_fail (g_app.running);
-
-  std::vector < DetectedObject > detected;
-  std::vector < DetectedObject >::iterator iter;
-
-  g_mutex_lock (&g_app.mutex);
-  detected = g_app.detected_objects;
-  g_mutex_unlock (&g_app.mutex);
-
-  /* set font props */
-  cairo_select_font_face (cr, "Sans", CAIRO_FONT_SLANT_NORMAL,
-      CAIRO_FONT_WEIGHT_BOLD);
-  cairo_set_font_size (cr, 20.0);
-
-  for (iter = detected.begin (); iter != detected.end (); ++iter) {
-    label =
-        (gchar *) g_list_nth_data (g_app.tf_info.labels, iter->class_id);
-
-    x = iter->x;
-    y = iter->y;
-    width = iter->width;
-    height = iter->height;
-
-    /* draw rectangle */
-    cairo_rectangle (cr, x, y, width, height);
-    cairo_set_source_rgb (cr, 1, 0, 0);
-    cairo_set_line_width (cr, 1.5);
-    cairo_stroke (cr);
-    cairo_fill_preserve (cr);
-
-    /* draw title */
-    cairo_move_to (cr, x + 5, y + 25);
-    cairo_text_path (cr, label);
-    cairo_set_source_rgb (cr, 1, 0, 0);
-    cairo_fill_preserve (cr);
-    cairo_set_source_rgb (cr, 1, 1, 1);
-    cairo_set_line_width (cr, .3);
-    cairo_stroke (cr);
-    cairo_fill_preserve (cr);
-
-    if (++drawed >= MAX_OBJECT_DETECTION) {
-      /* max objects drawed */
-      break;
-    }
-  }
-}
-
-/**
- * @brief Main function.
- */
-int
-main (int argc, char ** argv)
-{
-  const gchar tf_model_path[] = "./tf_model";
-
-  gchar *str_pipeline;
-  GstElement *element;
-
-  _print_log ("start app..");
-
-  /* init app variable */
-  g_app.running = FALSE;
-  g_app.loop = NULL;
-  g_app.bus = NULL;
-  g_app.pipeline = NULL;
-  g_app.detected_objects.clear ();
-  g_mutex_init (&g_app.mutex);
-
-  _check_cond_err (tf_init_info (&g_app.tf_info, tf_model_path));
-
-  /* init gstreamer */
-  gst_init (&argc, &argv);
-
-  /* main loop */
-  g_app.loop = g_main_loop_new (NULL, FALSE);
-  _check_cond_err (g_app.loop != NULL);
-
-  /* init pipeline */
-  str_pipeline =
-      g_strdup_printf
-      ("v4l2src name=src ! videoscale ! video/x-raw,width=%d,height=%d,format=RGB ! tee name=t_raw "
-      "t_raw. ! queue ! videoconvert ! cairooverlay name=tensor_res ! ximagesink name=img_tensor "
-      "t_raw. ! queue leaky=2 max-size-buffers=2 ! videoscale ! tensor_converter ! "
-      "tensor_filter framework=tensorflow model=%s "
-      "input=3:640:480:1 inputname=image_tensor inputtype=uint8 "
-      "output=1:1:1:1,100:1:1:1,100:1:1:1,4:100:1:1 "
-      "outputname=num_detections,detection_classes,detection_scores,detection_boxes "
-      "outputtype=float32,float32,float32,float32 ! "
-      "tensor_sink name=tensor_sink ",
-      VIDEO_WIDTH, VIDEO_HEIGHT, g_app.tf_info.model_path);
-  g_app.pipeline = gst_parse_launch (str_pipeline, NULL);
-  g_free (str_pipeline);
-  _check_cond_err (g_app.pipeline != NULL);
-
-  /* bus and message callback */
-  g_app.bus = gst_element_get_bus (g_app.pipeline);
-  _check_cond_err (g_app.bus != NULL);
-
-  gst_bus_add_signal_watch (g_app.bus);
-  g_signal_connect (g_app.bus, "message", G_CALLBACK (bus_message_cb), NULL);
-
-  /* tensor sink signal : new data callback */
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), "tensor_sink");
-  g_signal_connect (element, "new-data", G_CALLBACK (new_data_cb), NULL);
-  gst_object_unref (element);
-
-  /* cairo overlay */
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), "tensor_res");
-  g_signal_connect (element, "draw", G_CALLBACK (draw_overlay_cb), NULL);
-  g_signal_connect (element, "caps-changed", G_CALLBACK (prepare_overlay_cb),
-      NULL);
-  gst_object_unref (element);
-
-  /* start pipeline */
-  gst_element_set_state (g_app.pipeline, GST_STATE_PLAYING);
-  g_app.running = TRUE;
-
-  /* set window title */
-  set_window_title ("img_tensor", "NNStreamer Example");
-
-  /* run main loop */
-  g_main_loop_run (g_app.loop);
-
-  /* quit when received eos or error message */
-  g_app.running = FALSE;
-
-  /* cam source element */
-  element = gst_bin_get_by_name (GST_BIN (g_app.pipeline), "src");
-
-  gst_element_set_state (element, GST_STATE_READY);
-  gst_element_set_state (g_app.pipeline, GST_STATE_READY);
-
-  g_usleep (200 * 1000);
-
-  gst_element_set_state (element, GST_STATE_NULL);
-  gst_element_set_state (g_app.pipeline, GST_STATE_NULL);
-
-  g_usleep (200 * 1000);
-  gst_object_unref (element);
-
-error:
-  _print_log ("close app..");
-
-  free_app_data ();
-  return 0;
-}
index 1783694..01f9a03 100644 (file)
@@ -4,11 +4,6 @@ subdir('custom_example_average')
 subdir('custom_example_opencv')
 subdir('custom_example_RNN')
 subdir('custom_example_LSTM')
-
 subdir('example_cam')
 subdir('example_sink')
 subdir('example_filter')
-subdir('example_object_detection')
-subdir('example_object_detection_tensorflow')
-subdir('example_decoder_image_labelling')
-subdir('example_filter_performance_profile')
index 12eaae2..5d4164f 100644 (file)
@@ -34,9 +34,6 @@ BuildRequires:        python-numpy
 BuildRequires:  pkgconfig(libpng)
 # for tensorflow-lite
 BuildRequires: tensorflow-lite-devel
-# for cairo (nnstreamer_example_object_detection)
-BuildRequires: coregl-devel
-BuildRequires: cairo-devel
 # custom_example_opencv filter requires opencv-devel
 BuildRequires: opencv-devel
 # For './testAll.sh' time limit.