[Tensor Converter] Add tensor converter subplugin flatbuffer
authorgichan-jang <gichan2.jang@samsung.com>
Thu, 14 May 2020 02:25:12 +0000 (11:25 +0900)
committerMyungJoo Ham <myungjoo.ham@samsung.com>
Mon, 15 Jun 2020 08:26:49 +0000 (17:26 +0900)
Add tensor converter subplugin flatbuffer

Signed-off-by: gichan-jang <gichan2.jang@samsung.com>
debian/rules
ext/nnstreamer/include/nnstreamer.fbs [new file with mode: 0644]
ext/nnstreamer/meson.build
ext/nnstreamer/tensor_converter/meson.build [new file with mode: 0644]
ext/nnstreamer/tensor_converter/tensor_converter_flatbuf.cc [new file with mode: 0644]
meson.build
meson_options.txt
packaging/nnstreamer.spec
packaging/run_unittests_binaries.sh

index 660cb6f..846e30f 100755 (executable)
@@ -20,6 +20,7 @@ export GST_PLUGIN_PATH=${ROOT_DIR}/build/gst/nnstreamer
 export NNSTREAMER_CONF=${ROOT_DIR}/build/nnstreamer-test.ini
 export NNSTREAMER_FILTERS=${ROOT_DIR}/build/ext/nnstreamer/tensor_filter
 export NNSTREAMER_DECODERS=${ROOT_DIR}/build/ext/nnstreamer/tensor_decoder
+export NNSTREAMER_CONVERTERS=${ROOT_DIR}/build/ext/nnstreamer/tensor_converter
 export PYTHONIOENCODING=utf-8
 
 ifneq ($(filter $(DEB_HOST_ARCH),amd64),)
diff --git a/ext/nnstreamer/include/nnstreamer.fbs b/ext/nnstreamer/include/nnstreamer.fbs
new file mode 100644 (file)
index 0000000..bd4ed6e
--- /dev/null
@@ -0,0 +1,45 @@
+/**
+ * @file  nnstreamer.fbs
+ * @date  14 May 2020
+ * @brief The schema file defines tensor(s) to de-/serialize flatbuffers. The file used with flatc to generate flatbuffer header file.
+ * @see https://github.com/nnstreamer/nnstreamer
+ * @author  Gichan Jang <gichan2.jang@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+namespace nnstreamer.flatbuf;
+
+enum Tensor_type : int { 
+  NNS_INT32 = 0,
+  NNS_UINT32,
+  NNS_INT16,
+  NNS_UINT16,
+  NNS_INT8,
+  NNS_UINT8,
+  NNS_FLOAT64,
+  NNS_FLOAT32,
+  NNS_INT64,
+  NNS_UINT64,
+
+  NNS_END
+  }
+
+struct frame_rate {
+  rate_n : int;
+  rate_d : int;
+}
+
+table Tensor {
+  name  : string;
+  type : Tensor_type = NNS_END;
+  dimension : [uint32]; // support up to 4th ranks.
+  data : [ubyte]; 
+}
+
+table Tensors {
+  num_tensor : int;
+  fr : frame_rate;
+  tensor : [Tensor]; // tensor size is limited to 16
+}
+
+root_type Tensors;
index b3e9467..362cccd 100644 (file)
@@ -1,3 +1,10 @@
+if flatbuf_support_is_available
+  # Compile flatbuffers schema file
+  fb_gen = generator(fb_comp, output : '@BASENAME@_generated.h',
+                 arguments : ['--cpp', '-o', '@BUILD_DIR@', '@INPUT@'])
+  fb_gen_src = fb_gen.process('./include/nnstreamer.fbs')
+endif
+
 if protobuf_support_is_available
   pb_gen = generator(pb_comp,
       output: ['@BASENAME@.pb.h', '@BASENAME@.pb.cc'],
@@ -13,3 +20,4 @@ endif
 subdir('tensor_decoder')
 subdir('tensor_filter')
 subdir('tensor_source')
+subdir('tensor_converter')
diff --git a/ext/nnstreamer/tensor_converter/meson.build b/ext/nnstreamer/tensor_converter/meson.build
new file mode 100644 (file)
index 0000000..96802a9
--- /dev/null
@@ -0,0 +1,19 @@
+# flatbuffer
+if flatbuf_support_is_available
+
+  converter_sub_flatbuf_sources = [
+    'tensor_converter_flatbuf.cc'
+  ]
+
+  nnstreamer_converter_flatbuf_sources = [fb_gen_src]
+  foreach s : converter_sub_flatbuf_sources
+    nnstreamer_converter_flatbuf_sources += join_paths(meson.current_source_dir(), s)
+  endforeach
+
+  shared_library('nnstreamer_converter_flatbuf',
+    nnstreamer_converter_flatbuf_sources,
+    dependencies: [nnstreamer_dep, glib_dep, gst_dep, flatbuf_dep],
+    install: true,
+    install_dir: converter_subplugin_install_dir
+  )
+endif
diff --git a/ext/nnstreamer/tensor_converter/tensor_converter_flatbuf.cc b/ext/nnstreamer/tensor_converter/tensor_converter_flatbuf.cc
new file mode 100644 (file)
index 0000000..342b144
--- /dev/null
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: LGPL-2.1-only */
+/**
+ * GStreamer / NNStreamer tensor_converter subplugin, "Flatbuffer"
+ * Copyright (C) 2020 Gichan Jang <gichan2.jang@samsung.com>
+ */
+/**
+
+* @file        tensor_converter_flatbuf.cc
+* @date        14 May 2020
+* @brief       NNStreamer tensor-converter subplugin, "flatbuffer",
+*              which converts flatbuufers byte stream to tensors.
+* @see         https://github.com/nnsuite/nnstreamer
+* @author      Gichan Jang <gichan2.jang@samsung.com>
+* @bug         No known bugs except for NYI items
+*
+*/
+
+/**
+ * Install flatbuffers
+ * We assume that you use Ubuntu linux distribution.
+ * You may simply download binary packages from PPA
+ *
+ * $ sudo apt-add-repository ppa:nnstreamer
+ * $ sudo apt update
+ * $ sudo apt install libflatbuffers libflatbuffers-dev flatbuffers-compiler
+ */
+
+#include <iostream>
+#include <fstream>
+#include <typeinfo>
+#include <glib.h>
+#include <gst/gstinfo.h>
+#include <nnstreamer_plugin_api.h>
+#include "nnstreamer_plugin_api_converter.h"
+#include <nnstreamer_log.h>
+#include <nnstreamer_generated.h> /* Generated by `flatc`. */
+
+namespace nnstreamer {
+namespace flatbuf {
+
+void init_fbc (void) __attribute__ ((constructor));
+void fini_fbc (void) __attribute__ ((destructor));
+
+/** @brief tensor converter plugin's NNStreamerExternalConverter callback */
+static GstCaps *
+fbc_query_caps (const GstTensorsConfig * config)
+{
+  return gst_caps_from_string (GST_FLATBUF_TENSOR_CAP_DEFAULT);
+}
+
+/** @brief tensor converter plugin's NNStreamerExternalConverter callback */
+static gboolean
+fbc_get_out_config (const GstCaps * in_cap, GstTensorsConfig * config)
+{
+  GstStructure *structure;
+  g_return_val_if_fail (config != NULL, FALSE);
+  gst_tensors_config_init (config);
+  g_return_val_if_fail (in_cap != NULL, FALSE);
+
+  structure = gst_caps_get_structure (in_cap, 0);
+  g_return_val_if_fail (structure != NULL, FALSE);
+
+  /* All tensor info should be updated later in chain function. */
+  config->info.info[0].type = _NNS_UINT8;
+  config->info.num_tensors = 1;
+  if (gst_tensor_parse_dimension ("1:1:1:1",
+          config->info.info[0].dimension) == 0) {
+    ml_loge ("Failed to set initial dimension for subplugin");
+    return FALSE;
+  }
+
+  if (gst_structure_has_field (structure, "framerate")) {
+    gst_structure_get_fraction (structure, "framerate", &config->rate_n,
+        &config->rate_d);
+  } else {
+    /* cannot get the framerate */
+    config->rate_n = 0;
+    config->rate_d = 1;
+  }
+  return TRUE;
+}
+
+/** @brief tensor converter plugin's NNStreamerExternalConverter callback
+ *  @todo : Consider multi frames, return Bufferlist and 
+ *          remove frame size and the number of frames
+ */
+static GstBuffer *
+fbc_convert (GstBuffer * in_buf, gsize * frame_size, guint * frames_in,
+    GstTensorsConfig * config)
+{
+  const Tensors *tensors;
+  const flatbuffers::Vector <flatbuffers::Offset <Tensor>> *tensor;
+  const flatbuffers::Vector <unsigned char> *tensor_data;
+  frame_rate fr;
+  GstBuffer *out_buf = NULL;
+  GstMemory *in_mem, *out_mem;
+  GstMapInfo in_info;
+  guint mem_size;
+  gpointer mem_data;
+
+  in_mem = gst_buffer_peek_memory (in_buf, 0);
+  g_assert (gst_memory_map (in_mem, &in_info, GST_MAP_READ));
+
+  tensors = GetTensors (in_info.data);
+  g_assert (tensors);
+
+  config->info.num_tensors = tensors->num_tensor ();
+  if (tensors->num_tensor () > NNS_TENSOR_SIZE_LIMIT) {
+    nns_loge ("The number of tensors is limited to %d", NNS_TENSOR_SIZE_LIMIT);
+    goto done;
+  }
+  config->rate_n = tensors->fr ()->rate_n ();
+  config->rate_d = tensors->fr ()->rate_d ();
+
+  tensor = tensors->tensor ();
+  out_buf = gst_buffer_new ();
+  *frame_size = 0;
+  *frames_in = 1;
+
+
+  for (guint i = 0; i < config->info.num_tensors; i++) {
+    config->info.info[i].name =
+        g_strdup (tensor->Get (i)->name ()->str ().c_str ());
+    config->info.info[i].type = (tensor_type) tensor->Get (i)->type ();
+    tensor_data = tensor->Get (i)->data ();
+
+    for (guint j = 0; j < NNS_TENSOR_RANK_LIMIT; j++) {
+      config->info.info[i].dimension[j] =
+          tensor->Get (i)->dimension ()->Get (j);
+    }
+    mem_size = VectorLength (tensor_data);
+    *frame_size += mem_size;
+
+    mem_data = g_memdup (tensor_data->data (), mem_size);
+
+    out_mem = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
+        mem_data, mem_size, 0, mem_size, NULL, NULL);
+
+    gst_buffer_append_memory (out_buf, out_mem);
+  }
+
+  /** copy timestamps */
+  gst_buffer_copy_into (out_buf, in_buf,
+      (GstBufferCopyFlags) GST_BUFFER_COPY_METADATA, 0, -1);
+done :
+  gst_memory_unmap (in_mem, &in_info);
+
+  return out_buf;
+}
+
+static const gchar converter_subplugin_flatbuf[] = "libnnstreamer_converter_flatbuf";
+
+/** @brief flatbuffer tensor converter sub-plugin NNStreamerExternalConverter instance */
+static NNStreamerExternalConverter flatBuf = {
+  .name = converter_subplugin_flatbuf,
+  .convert = fbc_convert,
+  .get_out_config = fbc_get_out_config,
+  .query_caps = fbc_query_caps
+};
+
+/** @brief Initialize this object for tensor converter sub-plugin */
+void
+init_fbc (void)
+{
+  registerExternalConverter (&flatBuf);
+}
+
+/** @brief Destruct this object for tensor converter sub-plugin */
+void
+fini_fbc (void)
+{
+  unregisterExternalConverter (flatBuf.name);
+}
+
+}; /* Namespace flatbuf */
+}; /* Namespace nnstreamer */
index da84b96..1de1da8 100644 (file)
@@ -130,6 +130,10 @@ thread_dep = dependency('threads') # pthread for tensorflow-lite
 # Protobuf
 protobuf_dep = dependency('protobuf', version: '>= 3.6.1', required: false)
 
+# Flatbuffer compiler
+flatbuf_dep = cc.find_library('flatbuffers', required : get_option('flatbuf-support'))
+fb_comp = find_program('flatc', required : get_option('flatbuf-support'))
+
 # Protobuf compiler
 pb_comp = find_program('protoc', required: get_option('protobuf-support'))
 
@@ -228,6 +232,10 @@ features = {
     'project_args': { 'ENABLE_SNPE' : 1 },
     'extra_args': { 'SNPE_ROOT': SNPE_ROOT }
   },
+  'flatbuf-support': {
+    'extra_deps': [ fb_comp ],
+    'project_args': { 'ENABLE_FLATBUF': 1 }
+  },
   'protobuf-support': {
     'extra_deps': [ pb_comp ]
   }
index 6b3a4df..63e48dd 100644 (file)
@@ -16,6 +16,7 @@ option('armnn-support', type: 'feature', value: 'auto')
 option('orcc-support', type: 'feature', value: 'auto')
 option('snpe-support', type: 'feature', value: 'auto')
 option('protobuf-support', type: 'feature', value: 'auto')
+option('flatbuf-support', type: 'feature', value: 'auto')
 
 # booleans & other options
 option('enable-test', type: 'boolean', value: true)
index 083940b..84fdacd 100644 (file)
@@ -6,6 +6,7 @@
 %define                tensorflow_lite_support 1
 %define                armnn_support 0
 %define                vivante_support 0
+%define                flatbuf_support 1
 
 %if 0%{tizen_version_major} >= 5
 %define                python_support 1
@@ -94,9 +95,12 @@ BuildRequires:       python-numpy-devel
 %endif
 # Testcase requires bmp2png, which requires libpng
 BuildRequires:  pkgconfig(libpng)
+%if 0%{?flatbuf_support}
+# for flatbuffers
+BuildRequires: flatbuffers-devel
+%endif
 %if 0%{?tensorflow_lite_support}
 # for tensorflow-lite
-BuildRequires: flatbuffers-devel
 BuildRequires: tensorflow-lite-devel
 %endif
 # custom_example_opencv filter requires opencv-devel
@@ -411,6 +415,13 @@ You may enable this package to use Google Edge TPU with NNStreamer and Tizen ML
 %define enable_edgetpu -Denable-edgetpu=false
 %endif
 
+# Support flatbuffer
+%if 0%{?flatbuf_support}
+%define enable_flatbuf -Denable-flatbuf=enabled
+%else
+%define enable_flatbuf -Denable-flatbuf=disabled
+%endif
+
 %prep
 %setup -q
 cp %{SOURCE1001} .
@@ -455,6 +466,7 @@ export GST_PLUGIN_PATH=$(pwd)/build/gst/nnstreamer
 export NNSTREAMER_CONF=$(pwd)/build/nnstreamer-test.ini
 export NNSTREAMER_FILTERS=$(pwd)/build/ext/nnstreamer/tensor_filter
 export NNSTREAMER_DECODERS=$(pwd)/build/ext/nnstreamer/tensor_decoder
+export NNSTREAMER_CONVERTERS=$(pwd)/build/ext/nnstreamer/tensor_converter
 
 %define test_script $(pwd)/packaging/run_unittests_binaries.sh
 
@@ -550,6 +562,7 @@ cp -r result %{buildroot}%{_datadir}/nnstreamer/unittest/
 %defattr(-,root,root,-)
 %license LICENSE
 %{_prefix}/lib/nnstreamer/decoders/libnnstreamer_decoder_*.so
+%{_prefix}/lib/nnstreamer/converters/libnnstreamer_converter_*.so
 %{_prefix}/lib/nnstreamer/filters/libnnstreamer_filter_cpp.so
 %{gstlibdir}/libnnstreamer.so
 %{_libdir}/libnnstreamer.so
index 3c25eef..56b3c3e 100755 (executable)
@@ -12,6 +12,7 @@ pushd build
 export NNSTREAMER_CONF=$(pwd)/nnstreamer-test.ini
 export NNSTREAMER_FILTERS=$(pwd)/ext/nnstreamer/tensor_filter
 export NNSTREAMER_DECODERS=$(pwd)/ext/nnstreamer/tensor_decoder
+export NNSTREAMER_CONVERTERS=$(pwd)/ext/nnstreamer/tensor_converter
 export _PYTHONPATH=${PYTHONPATH}
 
 run_entry() {