[Decoder] Add tensor decoder subplugin flatbuffers
authorgichan-jang <gichan2.jang@samsung.com>
Wed, 26 Feb 2020 11:30:28 +0000 (20:30 +0900)
committerMyungJoo Ham <myungjoo.ham@samsung.com>
Wed, 24 Jun 2020 06:36:21 +0000 (15:36 +0900)
Add tensor decoder subplugin for flatbuffer

flatbuffers package can be found here :
https://launchpad.net/~nnstreamer/+archive/ubuntu/ppa-build-test/+packages

Signed-off-by: gichan-jang <gichan2.jang@samsung.com>
ext/nnstreamer/tensor_converter/tensor_converter_flatbuf.cc
ext/nnstreamer/tensor_decoder/meson.build
ext/nnstreamer/tensor_decoder/tensordec-flatbuf.cc [new file with mode: 0644]

index 342b144..2883406 100644 (file)
@@ -9,7 +9,7 @@
 * @date        14 May 2020
 * @brief       NNStreamer tensor-converter subplugin, "flatbuffer",
 *              which converts flatbuufers byte stream to tensors.
-* @see         https://github.com/nnsuite/nnstreamer
+* @see         https://github.com/nnstreamer/nnstreamer
 * @author      Gichan Jang <gichan2.jang@samsung.com>
 * @bug         No known bugs except for NYI items
 *
index c6ffbc1..c87b1de 100644 (file)
@@ -131,14 +131,27 @@ if protobuf_support_is_available
   
   shared_library('nnstreamer_decoder_protobuf',
     nnstreamer_decoder_protobuf_sources,
-    dependencies: [nnstreamer_dep, glib_dep, gst_dep, protobuf_dep, thread_dep],
+    dependencies: [nnstreamer_dep, glib_dep, gst_dep, protobuf_dep],
     install: true,
     install_dir: decoder_subplugin_install_dir
   )
-  static_library('nnstreamer_decoder_protobuf',
-    nnstreamer_decoder_protobuf_sources,
-    dependencies: [nnstreamer_dep, glib_dep, gst_dep, protobuf_dep, thread_dep],
+endif
+
+# flatbuffer
+if flatbuf_support_is_available
+  decoder_sub_flatbuf_sources = [
+    'tensordec-flatbuf.cc',
+  ]
+
+  nnstreamer_decoder_flatbuf_sources = [fb_gen_src]
+  foreach s : decoder_sub_flatbuf_sources
+    nnstreamer_decoder_flatbuf_sources += join_paths(meson.current_source_dir(), s)
+  endforeach  
+
+  shared_library('nnstreamer_decoder_flatbuf',
+    nnstreamer_decoder_flatbuf_sources,
+    dependencies: [nnstreamer_dep, glib_dep, gst_dep, flatbuf_dep],
     install: true,
-    install_dir: nnstreamer_libdir
+    install_dir: decoder_subplugin_install_dir,
   )
 endif
diff --git a/ext/nnstreamer/tensor_decoder/tensordec-flatbuf.cc b/ext/nnstreamer/tensor_decoder/tensordec-flatbuf.cc
new file mode 100644 (file)
index 0000000..75b2103
--- /dev/null
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: LGPL-2.1-only */
+/**
+ * GStreamer / NNStreamer tensor_decoder subplugin, "Flatbuffer"
+ * Copyright (C) 2020 Gichan Jang <gichan2.jang@samsung.com>
+ */
+/**
+ * @file        tensordec-flatbuf.cc
+ * @date        26 Feb 2020
+ * @brief       NNStreamer tensor-decoder subplugin, "flatbuffer",
+ *              which converts tensor or tensors to flatbuffer byte stream.
+ *
+ * @see         https://github.com/nnstreamer/nnstreamer
+ * @author      Gichan Jang <gichan2.jang@samsung.com>
+ * @bug         No known bugs except for NYI items
+ *
+ */
+
+#include <iostream>
+#include <typeinfo>
+#include <glib.h>
+#include <gst/gstinfo.h>
+#include <nnstreamer_plugin_api_decoder.h>
+#include <nnstreamer_plugin_api.h>
+#include <nnstreamer_log.h>
+#include <nnstreamer_generated.h>    // Generated by `flatc`.
+
+namespace nnstreamer {
+namespace flatbuf {
+
+void init_fb (void) __attribute__ ((constructor));
+void fini_fb (void) __attribute__ ((destructor));
+
+/** @brief tensordec-plugin's GstTensorDecoderDef callback */
+static int
+fb_init (void **pdata)
+{
+  *pdata = NULL;
+  return TRUE;
+}
+
+/** @brief tensordec-plugin's GstTensorDecoderDef callback */
+static void
+fb_exit (void **pdata)
+{
+  return;
+}
+
+/** @brief tensordec-plugin's GstTensorDecoderDef callback */
+static int
+fb_setOption (void **pdata, int opNum, const char *param)
+{
+  return TRUE;
+}
+
+/** @brief tensordec-plugin's GstTensorDecoderDef callback */
+static GstCaps *
+fb_getOutCaps (void **pdata, const GstTensorsConfig * config)
+{
+  return gst_caps_from_string (GST_FLATBUF_TENSOR_CAP_DEFAULT);
+}
+
+/** @brief tensordec-plugin's GstTensorDecoderDef callback */
+static GstFlowReturn
+fb_decode (void **pdata, const GstTensorsConfig * config,
+    const GstTensorMemory * input, GstBuffer * outbuf)
+{
+  char *name;
+  Tensor_type type;
+  GstMapInfo out_info;
+  GstMemory *out_mem;
+  unsigned int i, num_tensors = config->info.num_tensors;
+  flatbuffers::uoffset_t fb_size;
+  flatbuffers::FlatBufferBuilder builder;
+  std::vector<flatbuffers::Offset<Tensor>> tensor_vector;
+  flatbuffers::Offset<flatbuffers::Vector<uint32_t>> dim;
+  flatbuffers::Offset<flatbuffers::String> tensor_name;
+  flatbuffers::Offset<flatbuffers::Vector<unsigned char>> input_vector;
+  flatbuffers::Offset<Tensor> tensor;
+  flatbuffers::Offset<Tensors> tensors;
+  frame_rate fr = frame_rate (config->rate_n, config->rate_d);
+
+  /* Fill the info in tensor and puth to tensor vector */
+  for (i = 0; i < num_tensors; i++) {
+    unsigned char *tmp_buf;
+
+    dim = builder.CreateVector (config->info.info[i].dimension, NNS_TENSOR_RANK_LIMIT);
+    name = config->info.info[i].name;
+
+    if (name == NULL)
+      tensor_name = builder.CreateString ("Anonymous");
+    else
+      tensor_name = builder.CreateString (name);
+
+    type = (Tensor_type) config->info.info[i].type;
+
+    /* Create the vector first, and fill in data later */
+    /** @todo Consider to remove memcpy */
+    input_vector = builder.CreateUninitializedVector<unsigned char> (input[i].size, &tmp_buf);
+    memcpy (tmp_buf, input[i].data, input[i].size);
+
+    tensor = CreateTensor (builder, tensor_name, type, dim, input_vector);
+    tensor_vector.push_back (tensor);
+  }
+
+  tensors = CreateTensors (builder, num_tensors, &fr, builder.CreateVector (tensor_vector));
+
+  /* Serialize the data.*/
+  builder.Finish (tensors);
+  fb_size = builder.GetSize ();
+
+  g_assert (outbuf);
+
+  if (gst_buffer_get_size (outbuf) == 0) {
+    out_mem = gst_allocator_alloc (NULL, fb_size, NULL);
+  }
+  else {
+    if (gst_buffer_get_size (outbuf) < fb_size) {
+      gst_buffer_set_size (outbuf, fb_size);
+    }
+    out_mem = gst_buffer_get_all_memory (outbuf);
+  }
+
+  if (FALSE == gst_memory_map (out_mem, &out_info, GST_MAP_WRITE)) {
+    nns_loge ("Cannot map gst memory (tensor decoder flatbuf)\n");
+    return GST_FLOW_ERROR;
+  }
+
+  memcpy (out_info.data, builder.GetBufferPointer (), fb_size);
+
+  gst_memory_unmap (out_mem, &out_info);
+
+  if (gst_buffer_get_size (outbuf) == 0)
+    gst_buffer_append_memory (outbuf, out_mem);
+
+  return GST_FLOW_OK;
+}
+
+static gchar decoder_subplugin_flatbuf[] = "flatbuf";
+
+/** @brief flatbuffer tensordec-plugin GstTensorDecoderDef instance */
+static GstTensorDecoderDef flatBuf = {
+  .modename = decoder_subplugin_flatbuf,
+  .init = fb_init,
+  .exit = fb_exit,
+  .setOption = fb_setOption,
+  .getOutCaps = fb_getOutCaps,
+  .decode = fb_decode
+};
+
+/** @brief Initialize this object for tensordec-plugin */
+void
+init_fb (void)
+{
+  nnstreamer_decoder_probe (&flatBuf);
+}
+
+/** @brief Destruct this object for tensordec-plugin */
+void
+fini_fb (void)
+{
+  nnstreamer_decoder_exit (flatBuf.modename);
+}
+
+}; /* Namespace flatbuf */
+}; /* Namespace nnstreamer */