/usr/lib/nnstreamer/filters/libnnstreamer_filter_python3.so
+/usr/lib/nnstreamer/converters/libnnstreamer_converter_python3.so
/usr/lib/nnstreamer/extra/nnstreamer_python3.so
/** @brief tensor converter plugin's NNStreamerExternalConverter callback */
GstBuffer *
-gst_tensor_converter_protobuf (GstBuffer *in_buf, GstTensorsConfig *config)
+gst_tensor_converter_protobuf (GstBuffer *in_buf, GstTensorsConfig *config, void *priv_data)
{
nnstreamer::protobuf::Tensors tensors;
nnstreamer::protobuf::Tensors::frame_rate *fr = NULL;
* @retval Return a new GstBuf if the data is to be modified.
*/
GstBuffer *
-gst_tensor_converter_protobuf (GstBuffer * in_buf, GstTensorsConfig * config);
+gst_tensor_converter_protobuf (GstBuffer * in_buf, GstTensorsConfig * config, void *priv_data);
#endif /* __NNS_PROTOBUF_UTIL_H__ */
*
*/
/**
- * @file tensor_filter_python_helper.c
+ * @file nnstreamer_python3_helper.c
* @date 10 Apr 2019
* @brief python helper structure for nnstreamer tensor_filter
* @see http://github.com/nnstreamer/nnstreamer
install_dir: converter_subplugin_install_dir
)
endif
+
+# python3
+if have_python3
+ converter_sub_python3_sources = ['tensor_converter_python3.cc']
+
+ nnstreamer_converter_python3_sources = []
+ foreach s : converter_sub_python3_sources
+ nnstreamer_converter_python3_sources += join_paths(meson.current_source_dir(), s)
+ endforeach
+ nnstreamer_converter_python3_deps = [python3_dep, libdl_dep, glib_dep, gst_dep, nnstreamer_dep]
+ shared_library('nnstreamer_converter_python3',
+ nnstreamer_converter_python3_sources,
+ dependencies: nnstreamer_converter_python3_deps,
+ install: true,
+ install_dir: converter_subplugin_install_dir
+ )
+
+ static_library('nnstreamer_converter_python3',
+ nnstreamer_converter_python3_sources,
+ dependencies: nnstreamer_converter_python3_deps,
+ install: true,
+ install_dir: nnstreamer_libdir
+ )
+endif
* remove frame size and the number of frames
*/
static GstBuffer *
-fbc_convert (GstBuffer *in_buf, GstTensorsConfig *config)
+fbc_convert (GstBuffer *in_buf, GstTensorsConfig *config, void *priv_data)
{
const Tensors *tensors;
const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensor;
/** @brief tensor converter plugin's NNStreamerExternalConverter callback
*/
static GstBuffer *
-flxc_convert (GstBuffer *in_buf, GstTensorsConfig *config)
+flxc_convert (GstBuffer *in_buf, GstTensorsConfig *config, void *priv_data)
{
GstBuffer *out_buf = NULL;
GstMemory *in_mem, *out_mem;
/** @brief tensor converter plugin's NNStreamerExternalConverter callback */
static GstBuffer *
-pbc_convert (GstBuffer *in_buf, GstTensorsConfig *config)
+pbc_convert (GstBuffer *in_buf, GstTensorsConfig *config, void *priv_data)
{
- return gst_tensor_converter_protobuf (in_buf, config);
+ return gst_tensor_converter_protobuf (in_buf, config, NULL);
}
static gchar converter_subplugin_protobuf[] = "protobuf";
--- /dev/null
+/* SPDX-License-Identifier: LGPL-2.1-only */
+/**
+ * GStreamer / NNStreamer tensor_converter subplugin, "python3"
+ * Copyright (C) 2021 Gichan Jang <gichan2.jang@samsung.com>
+ */
+/**
+ * @file tensor_converter_python3.c
+ * @date May 03 2021
+ * @brief NNStreamer tensor-converter subplugin, "python3",
+* which converts to tensors using python.
+ * @see https://github.com/nnstreamer/nnstreamer
+ * @author Gichan Jang <gichan2.jang@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include <stdexcept>
+
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#include <dlfcn.h>
+#include <numpy/arrayobject.h>
+#include <nnstreamer_log.h>
+#include <nnstreamer_plugin_api.h>
+#include <nnstreamer_plugin_api_converter.h>
+
+/**
+ * @brief Macro for debug mode.
+ */
+#ifndef DBG
+#define DBG FALSE
+#endif
+
+#define SO_EXT "so.1.0"
+
+#define Py_ERRMSG(...) \
+ do { \
+ PyErr_Print (); \
+ ml_loge (__VA_ARGS__); \
+ } while (0);
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+void init_converter_py (void) __attribute__((constructor));
+void fini_converter_py (void) __attribute__((destructor));
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+/**
+ * @brief Python embedding core structure
+ */
+class PYConverterCore
+{
+ public:
+ /**
+ * member functions.
+ */
+ PYConverterCore (const char *_script_path);
+ ~PYConverterCore ();
+
+ int init ();
+ int loadScript ();
+ const char *getScriptPath ();
+ GstBuffer *convert (GstBuffer*in_buf, GstTensorsConfig *config);
+ int parseTensorsInfo (PyObject *result, GstTensorsInfo *info);
+ tensor_type getTensorType (NPY_TYPES npyType);
+
+ /** @brief Lock python-related actions */
+ void Py_LOCK ()
+ {
+ g_mutex_lock (&py_mutex);
+ }
+ /** @brief Unlock python-related actions */
+ void Py_UNLOCK ()
+ {
+ g_mutex_unlock (&py_mutex);
+ }
+
+ private:
+ std::string module_name;
+ const std::string script_path;
+ PyObject *shape_cls;
+ PyObject *core_obj;
+ void *handle; /**< returned handle by dlopen() */
+ GMutex py_mutex;
+};
+
+/**
+ * @brief PYConverterCore creator
+ * @param _script_path : the logical path to '{script_name}.py' file
+ * @note the script of _script_path will be loaded simultaneously
+ * @return Nothing
+ */
+PYConverterCore::PYConverterCore (const char *_script_path)
+ : script_path (_script_path)
+{
+ /**
+ * To fix import error of python extension modules
+ * (e.g., multiarray.x86_64-linux-gnu.so: undefined symbol: PyExc_SystemError)
+ */
+ gchar libname[32] = { 0, };
+
+ g_snprintf (libname, sizeof (libname), "libpython%d.%d.%s",
+ PY_MAJOR_VERSION, PY_MINOR_VERSION, SO_EXT);
+ handle = dlopen (libname, RTLD_LAZY | RTLD_GLOBAL);
+ if (nullptr == handle) {
+ /* check the python was compiled with '--with-pymalloc' */
+ g_snprintf (libname, sizeof (libname), "libpython%d.%dm.%s",
+ PY_MAJOR_VERSION, PY_MINOR_VERSION, SO_EXT);
+
+ handle = dlopen (libname, RTLD_LAZY | RTLD_GLOBAL);
+ if (nullptr == handle)
+ throw std::runtime_error (dlerror ());
+ }
+
+ _import_array (); /** for numpy */
+
+ /**
+ * Parse script path to get module name
+ * The module name should drop its extension (i.e., .py)
+ */
+ module_name = script_path;
+ const size_t last_idx = module_name.find_last_of ("/\\");
+
+ if (last_idx != std::string::npos)
+ module_name.erase (0, last_idx + 1);
+
+ const size_t ext_idx = module_name.rfind ('.');
+ if (ext_idx != std::string::npos)
+ module_name.erase (ext_idx);
+
+ /** Add current/directory path to sys.path */
+ PyObject *sys_module = PyImport_ImportModule ("sys");
+ if (nullptr == sys_module)
+ throw std::runtime_error ("Cannot import python module 'sys'.");
+
+ PyObject *sys_path = PyObject_GetAttrString (sys_module, "path");
+ if (nullptr == sys_path)
+ throw std::runtime_error ("Cannot import python module 'path'.");
+
+ PyList_Append (sys_path, PyUnicode_FromString ("."));
+ PyList_Append (sys_path,
+ PyUnicode_FromString (script_path.substr (0, last_idx).c_str ()));
+
+ Py_XDECREF (sys_path);
+ Py_XDECREF (sys_module);
+
+ core_obj = NULL;
+ shape_cls = NULL;
+
+ g_mutex_init (&py_mutex);
+}
+
+/**
+ * @brief PYConverterCore Destructor
+ * @return Nothing
+ */
+PYConverterCore::~PYConverterCore ()
+{
+ if (core_obj)
+ Py_XDECREF (core_obj);
+ if (shape_cls)
+ Py_XDECREF (shape_cls);
+ PyErr_Clear ();
+
+ dlclose (handle);
+ g_mutex_clear (&py_mutex);
+}
+
+/**
+ * @brief parse the converting result to feed output tensors
+ * @param[result] Python object retunred by convert
+ * @param[info] info Structure for output tensors info
+ * @return 0 if no error, otherwise negative errno
+ */
+int
+PYConverterCore::parseTensorsInfo (PyObject *result, GstTensorsInfo *info)
+{
+ if (PyList_Size (result) < 0)
+ return -1;
+
+ info->num_tensors = PyList_Size (result);
+ for (unsigned int i = 0; i < info->num_tensors; i++) {
+ /** don't own the reference */
+ PyObject *tensor_shape = PyList_GetItem (result, (Py_ssize_t)i);
+ if (nullptr == tensor_shape)
+ throw std::runtime_error ("parseTensorsInfo() has failed (1).");
+
+ PyObject *shape_dims = PyObject_CallMethod (tensor_shape, (char *)"getDims", NULL);
+ if (nullptr == shape_dims)
+ throw std::runtime_error ("parseTensorsInfo() has failed (2).");
+
+
+ PyObject *shape_type = PyObject_CallMethod (tensor_shape, (char *)"getType", NULL);
+ if (nullptr == shape_type)
+ throw std::runtime_error ("parseOutputTensors() has failed (3).");
+
+ /** convert numpy type to tensor type */
+ info->info[i].type
+ = getTensorType ((NPY_TYPES) (((PyArray_Descr *)shape_type)->type_num));
+
+ for (int j = 0; j < PyList_Size (shape_dims); j++)
+ info->info[i].dimension[j]
+ = (uint32_t)PyLong_AsLong (PyList_GetItem (shape_dims, (Py_ssize_t)j));
+
+ info->info[i].name = g_strdup("");
+ Py_XDECREF (shape_dims);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief convert any media stream to tensor
+ */
+GstBuffer *
+PYConverterCore::convert (GstBuffer *in_buf, GstTensorsConfig *config)
+{
+ GstMemory *in_mem, *out_mem;
+ GstMapInfo in_info;
+ GstBuffer *out_buf = NULL;
+ PyObject *tensors_info = NULL, *output = NULL, *pyValue = NULL;
+ gint rate_n, rate_d;
+ guint mem_size;
+ gpointer mem_data;
+ if (nullptr == in_buf)
+ throw std::invalid_argument ("Null pointers are given to PYConverterCore::convert().\n");
+
+ in_mem = gst_buffer_peek_memory (in_buf, 0);
+
+ if (!gst_memory_map (in_mem, &in_info, GST_MAP_READ)) {
+ Py_ERRMSG ("Cannot map input memory / tensor_converter::custom-script");
+ return NULL;
+ }
+
+ npy_intp input_dims[] = { (npy_intp) (gst_buffer_get_size (in_buf)) };
+
+ PyObject *param = PyList_New (0);
+ PyObject *input_array = PyArray_SimpleNewFromData (
+ 1, input_dims, NPY_UINT8, in_info.data);
+ PyList_Append (param, input_array);
+
+ Py_LOCK ();
+ if (!PyObject_HasAttrString (core_obj, (char *)"convert")) {
+ Py_ERRMSG ("Cannot find 'convert'");
+ goto done;
+ }
+
+ pyValue = PyObject_CallMethod (core_obj, "convert", "(O)", param);
+
+ if (!PyArg_ParseTuple (pyValue, "OOii", &tensors_info, &output,
+ &rate_n, &rate_d)) {
+ Py_ERRMSG ("Failed to parse converting result");
+ goto done;
+ }
+
+ if (parseTensorsInfo (tensors_info, &config->info) != 0) {
+ Py_ERRMSG ("Failed to parse tensors info");
+ goto done;
+ }
+ config->rate_n = rate_n;
+ config->rate_d = rate_d;
+ Py_XDECREF (tensors_info);
+
+ if (output) {
+ unsigned int num_tensors = PyList_Size (output);
+
+ out_buf = gst_buffer_new ();
+ for (unsigned int i = 0; i < num_tensors; i++) {
+ PyArrayObject *output_array
+ = (PyArrayObject *)PyList_GetItem (output, (Py_ssize_t)i);
+
+ mem_size = PyArray_SIZE (output_array);
+ mem_data = g_memdup ((guint8 *) PyArray_DATA (output_array), mem_size);
+
+ out_mem = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY, mem_data,
+ mem_size, 0, mem_size, mem_data, g_free);
+ gst_buffer_append_memory (out_buf, out_mem);
+
+ }
+ Py_XDECREF (output);
+ } else {
+ Py_ERRMSG ("Fail to get output from 'convert'");
+ }
+
+done:
+ Py_UNLOCK ();
+ gst_memory_unmap (in_mem, &in_info);
+ return out_buf;
+}
+
+/**
+ * @brief load the python script
+ */
+int
+PYConverterCore::loadScript ()
+{
+ PyObject *module = PyImport_ImportModule (module_name.c_str ());
+
+ if (module) {
+ PyObject *cls = PyObject_GetAttrString (module, "CustomConverter");
+ if (cls) {
+ core_obj = PyObject_CallObject (cls, NULL);
+ Py_XDECREF (cls);
+ } else {
+ Py_ERRMSG ("Cannot find 'CustomConverter' class in the script\n");
+ return -2;
+ }
+ Py_XDECREF (module);
+ } else {
+ Py_ERRMSG ("the script is not properly loaded\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief return the data type of the tensor
+ * @param npyType : the defined type of Python numpy
+ * @return the enum of defined _NNS_TYPE
+ */
+tensor_type
+PYConverterCore::getTensorType (NPY_TYPES npyType)
+{
+ npyType = NPY_INT;
+ if (npyType == NPY_INT32)
+ g_critical ("NPY_INT32");
+ else if (npyType == NPY_INT)
+ g_critical ("NPY_INT");
+ else
+ g_critical ("what~?");
+ switch (npyType) {
+ case NPY_INT32:
+ return _NNS_INT32;
+ case NPY_UINT32:
+ return _NNS_UINT32;
+ case NPY_INT16:
+ return _NNS_INT16;
+ case NPY_UINT16:
+ return _NNS_UINT16;
+ case NPY_INT8:
+ return _NNS_INT8;
+ case NPY_UINT8:
+ return _NNS_UINT8;
+ case NPY_INT64:
+ return _NNS_INT64;
+ case NPY_UINT64:
+ return _NNS_UINT64;
+ case NPY_FLOAT32:
+ return _NNS_FLOAT32;
+ case NPY_FLOAT64:
+ return _NNS_FLOAT64;
+ default:
+ /** @todo Support other types */
+ break;
+ }
+
+ return _NNS_END;
+}
+
+/**
+ * @brief initialize the object with python script
+ * @return 0 if OK. non-zero if error.
+ */
+int
+PYConverterCore::init ()
+{
+ /** Find nnstreamer_api module */
+ PyObject *api_module = PyImport_ImportModule ("nnstreamer_python");
+ if (api_module == NULL) {
+ return -EINVAL;
+ }
+
+ shape_cls = PyObject_GetAttrString (api_module, "TensorShape");
+ Py_XDECREF (api_module);
+
+ if (shape_cls == NULL)
+ return -EINVAL;
+
+ return loadScript ();
+}
+
+/**
+ * @brief get the script path
+ * @return the script path.
+ */
+const char *
+PYConverterCore::getScriptPath ()
+{
+ return script_path.c_str ();
+}
+
+/**
+ * @brief Free privateData and move on.
+ */
+static void
+py_close (void **private_data)
+{
+ PYConverterCore *core = static_cast<PYConverterCore *> (*private_data);
+
+ g_return_if_fail (core != NULL);
+ delete core;
+
+ *private_data = NULL;
+}
+
+/**
+ * @brief The open callback for GstTensorConverterFramework. Called before anything else
+ * @param path: python script path
+ * @param private_data: python plugin's private data
+ */
+static int
+py_open (const gchar *path, void **priv_data)
+{
+ PYConverterCore *core;
+
+ if (!Py_IsInitialized ())
+ throw std::runtime_error ("Python is not initialize.");
+
+ /** Load python script file */
+ core = static_cast<PYConverterCore *> (*priv_data);
+
+ if (core != NULL) {
+ if (g_strcmp0 (path, core->getScriptPath ()) == 0)
+ return 1; /* skipped */
+
+ py_close (priv_data);
+ }
+
+ /* init null */
+ *priv_data = NULL;
+
+ core = new PYConverterCore (path);
+ if (core == NULL) {
+ Py_ERRMSG ("Failed to allocate memory for converter subplugin: Python\n");
+ return -1;
+ }
+
+ if (core->init () != 0) {
+ delete core;
+ Py_ERRMSG ("failed to initailize the object: Python\n");
+ return -2;
+ }
+ *priv_data = core;
+
+ return 0;
+}
+
+/** @brief tensor converter plugin's NNStreamerExternalConverter callback */
+static GstCaps *
+python_query_caps (const GstTensorsConfig *config)
+{
+ return gst_caps_from_string ("application/octet-stream");
+}
+
+/**
+ * @brief tensor converter plugin's NNStreamerExternalConverter callback
+ */
+static gboolean
+python_get_out_config (const GstCaps *in_cap, GstTensorsConfig *config)
+{
+ GstStructure *structure;
+ g_return_val_if_fail (config != NULL, FALSE);
+ gst_tensors_config_init (config);
+ g_return_val_if_fail (in_cap != NULL, FALSE);
+
+ structure = gst_caps_get_structure (in_cap, 0);
+ g_return_val_if_fail (structure != NULL, FALSE);
+
+ /* All tensor info should be updated later in chain function. */
+ config->info.info[0].type = _NNS_UINT8;
+ config->info.num_tensors = 1;
+ if (gst_tensor_parse_dimension ("1:1:1:1", config->info.info[0].dimension) == 0) {
+ Py_ERRMSG ("Failed to set initial dimension for subplugin");
+ return FALSE;
+ }
+
+ if (gst_structure_has_field (structure, "framerate")) {
+ gst_structure_get_fraction (structure, "framerate", &config->rate_n, &config->rate_d);
+ } else {
+ /* cannot get the framerate */
+ config->rate_n = 0;
+ config->rate_d = 1;
+ }
+ return TRUE;
+}
+
+/**
+ * @brief tensor converter plugin's NNStreamerExternalConverter callback
+ */
+static GstBuffer *
+python_convert (GstBuffer *in_buf, GstTensorsConfig *config, void *priv_data)
+{
+ PYConverterCore *core = static_cast<PYConverterCore *> (priv_data);
+ g_return_val_if_fail (in_buf, NULL);
+ g_return_val_if_fail (config, NULL);
+ return core->convert (in_buf, config);
+}
+
+static const gchar converter_subplugin_python[] = "python3";
+
+/** @brief flatbuffer tensor converter sub-plugin NNStreamerExternalConverter instance */
+static NNStreamerExternalConverter Python = {
+ .name = converter_subplugin_python,
+ .convert = python_convert,
+ .get_out_config = python_get_out_config,
+ .query_caps = python_query_caps,
+ .open = py_open,
+ .close = py_close,
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+/** @brief Initialize this object for tensor converter sub-plugin */
+void
+init_converter_py (void)
+{
+ registerExternalConverter (&Python);
+ /** Python should be initialized and finalized only once */
+ Py_Initialize ();
+}
+
+/** @brief Destruct this object for tensor converter sub-plugin */
+void
+fini_converter_py (void)
+{
+ /** Python should be initialized and finalized only once */
+ Py_Finalize ();
+ unregisterExternalConverter (Python.name);
+}
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
const char *name;
/* 1. chain func, data handling. */
- GstBuffer *(*convert) (GstBuffer * in_buf, GstTensorsConfig * config);
+ GstBuffer *(*convert) (GstBuffer * in_buf, GstTensorsConfig * config, void *priv_data);
/**< Convert the given input stream to tensor/tensors stream.
*
* @param[in] buf The input stream buffer
* @param[in] config The config of output tensor/tensors
* @retval Return subplugin caps (if config is NULL, return default caps)
*/
+
+ int (*open) (const gchar *script_path, void **priv_data);
+ /**< tensor_converter will call this to open subplugin.
+ * @param[in] script_path script path of the subplugin.
+ * @param[in/out] private_data A subplugin may save its internal private data here. The subplugin is responsible for alloc/free of this pointer. Normally, open() allocates memory for private_data.
+ * @return 0 if ok. < 0 if error.
+ */
+
+ void (*close) (void **priv_data);
+ /**< tensor_converter will call this to close subplugin.
+ * @param[in] private_data frees private_data and set NULL.
+ */
+
} NNStreamerExternalConverter;
/**
self->frame_size = 0;
self->remove_padding = FALSE;
self->externalConverter = NULL;
+ self->priv_data = NULL;
self->mode = _CONVERTER_MODE_NONE;
self->mode_option = NULL;
self->custom.func = NULL;
}
g_free (self->mode_option);
+ g_free (self->ext_fw);
self->custom.func = NULL;
self->custom.data = NULL;
-
+ if (self->externalConverter && self->externalConverter->close)
+ self->externalConverter->close (&self->priv_data);
G_OBJECT_CLASS (parent_class)->finalize (object);
}
break;
}
- if (g_ascii_strcasecmp (strv[0], "custom-code") == 0)
- self->mode = _CONVERTER_MODE_CUSTOM_CODE;
self->mode_option = g_strdup (strv[1]);
+ if (g_ascii_strcasecmp (strv[0], "custom-code") == 0) {
+ self->mode = _CONVERTER_MODE_CUSTOM_CODE;
+ ptr = get_subplugin (NNS_CUSTOM_CONVERTER, self->mode_option);
+ if (!ptr) {
+ nns_logw ("Failed to find custom subplugin of the tensor_converter");
+ return;
+ }
+ self->custom.func = ptr->func;
+ self->custom.data = ptr->data;
+ } else if (g_ascii_strcasecmp (strv[0], "custom-script") == 0) {
+ self->mode = _CONVERTER_MODE_CUSTOM_SCRIPT;
+ /** @todo detects framework based on the script extension */
+ self->ext_fw = g_strdup ("python3");
+ }
g_strfreev (strv);
- ptr = get_subplugin (NNS_CUSTOM_CONVERTER, self->mode_option);
- if (!ptr) {
- nns_logw ("Failed to find custom subplugin of the tensor_converter");
- return;
- }
- self->custom.func = ptr->func;
- self->custom.data = ptr->data;
break;
}
default:
gchar *mode_str = NULL;
if (self->mode_option == NULL)
mode_str = g_strdup ("");
- else
- mode_str = g_strdup_printf ("%s:%s", "custom-code", self->mode_option);
+ else {
+ if (self->mode == _CONVERTER_MODE_CUSTOM_CODE)
+ mode_str =
+ g_strdup_printf ("%s:%s", "custom-code", self->mode_option);
+ else if (self->mode == _CONVERTER_MODE_CUSTOM_SCRIPT)
+ mode_str =
+ g_strdup_printf ("%s:%s", "custom-script", self->mode_option);
+ }
g_value_take_string (value, mode_str);
break;
}
}
inbuf = self->custom.func (buf, self->custom.data, &new_config);
} else if (self->externalConverter && self->externalConverter->convert) {
- inbuf = self->externalConverter->convert (buf, &new_config);
+ inbuf =
+ self->externalConverter->convert (buf, &new_config,
+ self->priv_data);
} else {
GST_ERROR_OBJECT (self, "Undefined behavior with type %d\n",
self->in_media_type);
}
} else if (!self->externalConverter) {
const NNStreamerExternalConverter *ex;
-
+ if (self->mode == _CONVERTER_MODE_CUSTOM_SCRIPT) {
+ mimetype = self->ext_fw;
+ }
if (!(ex = findExternalConverter (mimetype))) {
ml_loge ("Failed to get external converter for %s.", mimetype);
return FALSE;
}
self->externalConverter = ex;
+ if (self->mode == _CONVERTER_MODE_CUSTOM_SCRIPT) {
+ if (self->externalConverter->open (self->mode_option,
+ &self->priv_data) < 0) {
+ ml_loge ("Failed to open tensor converter custom subplugin.\n");
+ self->externalConverter = NULL;
+ return FALSE;
+ }
+ }
}
return TRUE;
g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE);
structure = gst_caps_get_structure (caps, 0);
- if (self->mode == _CONVERTER_MODE_CUSTOM_CODE) {
+ if (self->mode != _CONVERTER_MODE_NONE) {
in_type = _NNS_MEDIA_ANY;
} else {
in_type = gst_structure_get_media_type (structure);
for (i = 0; i < total; i++) {
ex = nnstreamer_converter_find (str_array[i]);
+ if (g_strcmp0 (media_type, str_array[i]) == 0) {
+ /* found matched media type */
+ return ex;
+ }
+
if (ex && ex->query_caps) {
caps = ex->query_caps (NULL);
caps_size = gst_caps_get_size (caps);
typedef enum {
_CONVERTER_MODE_NONE = 0, /**< Normal mode (default) */
_CONVERTER_MODE_CUSTOM_CODE = 1, /**< Custom mode (callback type) */
+ _CONVERTER_MODE_CUSTOM_SCRIPT = 2, /**< Custom mode (script type) */
} tensor_converter_mode;
/**
tensor_converter_mode mode; /**< tensor converter operating mode */
gchar *mode_option; /**< tensor converter mode option */
+ gchar *ext_fw; /**< tensor converter custom mode framework */
converter_custom_cb_s custom;
+
+ void *priv_data; /**< plugin's private data */
};
/**
%manifest nnstreamer.manifest
%defattr(-,root,root,-)
%{_prefix}/lib/nnstreamer/filters/libnnstreamer_filter_python3.so
+%{_prefix}/lib/nnstreamer/converters/libnnstreamer_converter_python3.so
%{_prefix}/lib/nnstreamer/extra/nnstreamer_python3.so
%{python3_sitelib}/nnstreamer_python.so
%endif
/** @brief tensor converter plugin's convert callback\r
*/\r
static GstBuffer *\r
-conv_convert (GstBuffer *in_buf, GstTensorsConfig *config)\r
+conv_convert (GstBuffer *in_buf, GstTensorsConfig *config, void *priv_data)\r
{\r
return NULL;\r
}\r
--- /dev/null
+#!/usr/bin/env bash
+##
+## SPDX-License-Identifier: LGPL-2.1-only
+##
+## @file runTest.sh
+## @author Gichan Jang <gichan2.jang@samsung.com>
+## @date May 03 2021
+## @brief SSAT Test Cases for NNStreamer
+##
+
+if [[ "$SSATAPILOADED" != "1" ]]; then
+ SILENT=0
+ INDEPENDENT=1
+ search="ssat-api.sh"
+ source $search
+ printf "${Blue}Independent Mode${NC}
+"
+fi
+
+# This is compatible with SSAT (https://github.com/myungjoo/SSAT)
+testInit $1
+
+if [ "$SKIPGEN" == "YES" ]; then
+ echo "Test Case Generation Skipped"
+ sopath=$2
+else
+ echo "Test Case Generation Started"
+ python3 ../nnstreamer_converter/generateGoldenTestResult.py 9
+ python3 ../nnstreamer_merge/generateTest.py
+ sopath=$1
+fi
+convertBMP2PNG
+
+flat_version=$(dpkg-query --showformat='${Version}' --show libflatbuffers)
+if [ $flat_version ]; then
+ major=`echo $flat_version | cut -d. -f1`
+ minor=`echo $flat_version | cut -d. -f2`
+ echo "dpkg major: $major minor: $minor"
+else
+ flat_version=$(rpm -q flatbuffers)
+ parsed_version=`echo $flat_version | cut -d- -f2`
+ major=`echo $parsed_version | cut -d. -f1`
+ minor=`echo $parsed_version | cut -d. -f2`
+ echo "rpm major: $major minor: $minor"
+fi
+
+if [ $major -eq 1 ] && [ $minor -le 12 ]; then
+ echo "The Flexbuffers Python API is supported if the flatbuffers version is greater than 1.12."
+ echo "See: https://github.com/google/flatbuffers/issues/5306"
+ report
+ exit
+fi
+
+PATH_TO_PLUGIN="../../build"
+# Check python libraies are built
+if [[ -d $PATH_TO_PLUGIN ]]; then
+ ini_path="${PATH_TO_PLUGIN}/ext/nnstreamer/tensor_converter"
+ if [[ -d ${ini_path} ]]; then
+ check=$(ls ${ini_path} | grep python3.so)
+ if [[ ! $check ]]; then
+ echo "Cannot find python shared lib"
+ report
+ exit
+ fi
+ else
+ echo "Cannot find ${ini_path}"
+ report exit
+ fi
+else
+ echo "No build directory"
+ report
+ exit
+fi
+
+FRAMEWORK="python3"
+# This symlink is necessary only for testcases; when installed, symlinks will be made
+pushd ../../build/ext/nnstreamer/tensor_converter
+TEST_PYTHONPATH=${FRAMEWORK}_pymodule
+mkdir -p ${TEST_PYTHONPATH}
+pushd ${TEST_PYTHONPATH}
+# Covert to an absolute path from the relative path
+export PYTHONPATH=$(pwd)
+if [[ ! -f ./nnstreamer_python.so ]]; then
+ ln -s ../../extra/nnstreamer_${FRAMEWORK}.so nnstreamer_python.so
+fi
+popd
+popd
+
+PATH_TO_SCRIPT="../test_models/models/custom_converter.py"
+##
+## @brief Execute gstreamer pipeline and compare the output of the pipeline
+## @param $1 Colorspace
+## @param $2 Width
+## @param $3 Height
+## @param $4 Test Case Number
+function do_test() {
+ gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} videotestsrc num-buffers=3 pattern=13 ! video/x-raw,format=${1},width=${2},height=${3},framerate=5/1 ! \
+ tee name=t ! queue ! multifilesink location=\"raw_${1}_${2}x${3}_%1d.log\"
+ t. ! queue ! tensor_converter ! tensor_decoder mode=flexbuf ! other/flexbuf ! tensor_converter mode=custom-script:${PATH_TO_SCRIPT} ! multifilesink location=\"flexb_${1}_${2}x${3}_%1d.log\" sync=true" ${4} 0 0 $PERFORMANCE
+
+ callCompareTest raw_${1}_${2}x${3}_0.log flexb_${1}_${2}x${3}_0.log "${4}-1" "custom flexbuf conversion test ${4}-1" 1 0
+ callCompareTest raw_${1}_${2}x${3}_1.log flexb_${1}_${2}x${3}_1.log "${4}-2" "custom flexbuf conversion test ${4}-2" 1 0
+ callCompareTest raw_${1}_${2}x${3}_2.log flexb_${1}_${2}x${3}_2.log "${4}-3" "custom flexbuf conversion test ${4}-3" 1 0
+}
+# The width and height of video should be multiple of 4
+do_test BGRx 320 240 1-1
+do_test RGB 320 240 1-2
+do_test GRAY8 320 240 1-3
+
+# audio format S16LE, 8k sample rate, samples per buffer 8000
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} audiotestsrc num-buffers=1 samplesperbuffer=8000 ! audioconvert ! audio/x-raw,format=S16LE,rate=8000 ! \
+ tee name=t ! queue ! audioconvert ! tensor_converter frames-per-tensor=8000 ! tensor_decoder mode=flexbuf ! \
+ other/flexbuf ! tensor_converter mode=custom-script:${PATH_TO_SCRIPT} ! filesink location=\"test.audio8k.s16le.log\" sync=true \
+ t. ! queue ! filesink location=\"test.audio8k.s16le.origin.log\" sync=true" 2-1 0 0 $PERFORMANCE
+callCompareTest test.audio8k.s16le.origin.log test.audio8k.s16le.log 2-2 "Audio8k-s16le Golden Test" 0 0
+
+# audio format U8, 16k sample rate, samples per buffer 8000
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} audiotestsrc num-buffers=1 samplesperbuffer=8000 ! audioconvert ! audio/x-raw,format=U8,rate=16000 ! \
+ tee name=t ! queue ! audioconvert ! tensor_converter frames-per-tensor=8000 ! tensor_decoder mode=flexbuf ! \
+ other/flexbuf ! tensor_converter mode=custom-script:${PATH_TO_SCRIPT} ! filesink location=\"test.audio16k.u8.log\" sync=true \
+ t. ! queue ! filesink location=\"test.audio16k.u8.origin.log\" sync=true" 2-3 0 0 $PERFORMANCE
+callCompareTest test.audio16k.u8.origin.log test.audio16k.u8.log 2-4 "Audio16k-u8 Golden Test" 0 0
+
+# audio format U16LE, 16k sample rate, 2 channels, samples per buffer 8000
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} audiotestsrc num-buffers=1 samplesperbuffer=8000 ! audioconvert ! audio/x-raw,format=U16LE,rate=16000,channels=2 ! \
+ tee name=t ! queue ! audioconvert ! tensor_converter frames-per-tensor=8000 ! tensor_decoder mode=flexbuf ! \
+ other/flexbuf ! tensor_converter mode=custom-script:${PATH_TO_SCRIPT} ! filesink location=\"test.audio16k2c.u16le.log\" sync=true \
+ t. ! queue ! filesink location=\"test.audio16k2c.u16le.origin.log\" sync=true" 2-5 0 0 $PERFORMANCE
+callCompareTest test.audio16k2c.u16le.origin.log test.audio16k2c.u16le.log 2-6 "Audio16k2c-u16le Golden Test" 0 0
+
+# Test other/tensors
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} tensor_mux name=tensors_mux sync-mode=basepad sync-option=1:50000000 ! tensor_decoder mode=flexbuf ! other/flexbuf ! tensor_converter mode=custom-script:${PATH_TO_SCRIPT} ! multifilesink location=testsynch19_%1d.log \
+ tensor_mux name=tensor_mux0 sync-mode=slowest ! queue ! tensor_decoder mode=flexbuf ! other/flexbuf ! tensor_converter ! tensors_mux.sink_0 \
+ tensor_mux name=tensor_mux1 sync-mode=slowest ! queue ! tensor_decoder mode=flexbuf ! other/flexbuf ! tensor_converter ! tensors_mux.sink_1 \
+ multifilesrc location=\"testsequence03_%1d.png\" index=0 caps=\"image/png, framerate=(fraction)10/1\" ! pngdec ! tensor_converter ! tensor_mux0.sink_0 \
+ multifilesrc location=\"testsequence03_%1d.png\" index=0 caps=\"image/png, framerate=(fraction)20/1\" ! pngdec ! tensor_converter ! tensor_mux0.sink_1 \
+ multifilesrc location=\"testsequence03_%1d.png\" index=0 caps=\"image/png, framerate=(fraction)30/1\" ! pngdec ! tensor_converter ! tensor_mux1.sink_0 \
+ multifilesrc location=\"testsequence03_%1d.png\" index=0 caps=\"image/png, framerate=(fraction)20/1\" ! pngdec ! tensor_converter ! tensor_mux1.sink_1" 3 0 0 $PERFORMANCE
+callCompareTest testsynch19_0.golden testsynch19_0.log 3-1 "Tensor mux Compare 3-1" 1 0
+callCompareTest testsynch19_1.golden testsynch19_1.log 3-2 "Tensor mux Compare 3-2" 1 0
+callCompareTest testsynch19_2.golden testsynch19_2.log 3-3 "Tensor mux Compare 3-3" 1 0
+callCompareTest testsynch19_3.golden testsynch19_3.log 3-4 "Tensor mux Compare 3-4" 1 0
+callCompareTest testsynch19_4.golden testsynch19_4.log 3-5 "Tensor mux Compare 3-5" 1 0
+
+report
EXPECT_EQ (gst_buffer_n_memory (dec_out_buf), 1U);
/** Convert flatbuf to tensors */
- conv_out_buf = fb_conv->convert (dec_out_buf, &check_config);
+ conv_out_buf = fb_conv->convert (dec_out_buf, &check_config, NULL);
EXPECT_EQ (gst_buffer_n_memory (conv_out_buf), 2U);
/** Check tensors config. */
ASSERT_TRUE (fb_conv);
gst_tensors_config_init (&config);
- conv_out_buf = fb_conv->convert (NULL, &config);
+ conv_out_buf = fb_conv->convert (NULL, &config, NULL);
EXPECT_TRUE (NULL == conv_out_buf);
gst_tensors_info_free (&config.info);
/** Prepare input */
gst_tensors_config_init (&config);
in_buf = gst_buffer_new ();
- conv_out_buf = fb_conv->convert (in_buf, NULL);
+ conv_out_buf = fb_conv->convert (in_buf, NULL, NULL);
EXPECT_TRUE (NULL == conv_out_buf);
gst_tensors_info_free (&config.info);
ASSERT_TRUE (pb_conv);
gst_tensors_config_init (&config);
- conv_out_buf = pb_conv->convert (NULL, &config);
+ conv_out_buf = pb_conv->convert (NULL, &config, NULL);
EXPECT_TRUE (NULL == conv_out_buf);
gst_tensors_info_free (&config.info);
gst_tensors_config_init (&config);
in_buf = gst_buffer_new ();
- conv_out_buf = pb_conv->convert (in_buf, NULL);
+ conv_out_buf = pb_conv->convert (in_buf, NULL, NULL);
EXPECT_TRUE (NULL == conv_out_buf);
gst_tensors_info_free (&config.info);
ASSERT_TRUE (flx_conv);
gst_tensors_config_init (&config);
- conv_out_buf = flx_conv->convert (NULL, &config);
+ conv_out_buf = flx_conv->convert (NULL, &config, NULL);
EXPECT_TRUE (NULL == conv_out_buf);
gst_tensors_info_free (&config.info);
gst_tensors_config_init (&config);
in_buf = gst_buffer_new ();
- conv_out_buf = flx_conv->convert (in_buf, NULL);
+ conv_out_buf = flx_conv->convert (in_buf, NULL, NULL);
EXPECT_TRUE (NULL == conv_out_buf);
gst_tensors_info_free (&config.info);
--- /dev/null
+##
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2021 Samsung Electronics
+#
+# @file custom_converter.py
+# @brief Python custom converter
+# @author Gichan Jang <gichan2.jang@samsung.com>
+#
+# @note The Flexbuffers Python API is supported if the flatbuffers version is greater than 1.12.
+# See: https://github.com/google/flatbuffers/issues/5306
+# It can be downloaded and used from https://github.com/google/flatbuffers/blob/master/python/flatbuffers/flexbuffers.py
+
+import numpy as np
+import nnstreamer_python as nns
+from flatbuffers import flexbuffers
+
+## @brief Change from numpy type to tensor type
+def _to_numpy_type (dtype):
+ if dtype == 0:
+ return np.int32
+ elif dtype == 1:
+ return np.uint32
+ elif dtype == 2:
+ return np.int16
+ elif dtype == 3:
+ return np.uint16
+ elif dtype == 4:
+ return np.int8
+ elif dtype == 5:
+ return np.uint8
+ elif dtype == 6:
+ return np.float64
+ elif dtype == 7:
+ return np.float32
+ elif dtype == 8:
+ return np.int64
+ elif dtype == 9:
+ return np.uint64
+ else:
+ print ("Not supported numpy type")
+ return -1
+
+## @brief User-defined custom converter
+class CustomConverter(object):
+
+## @breif Python callback: convert
+ def convert (self, input_array):
+ data = input_array[0].tobytes()
+ root = flexbuffers.GetRoot(data)
+ tensors = root.AsMap
+
+ num_tensors = tensors['num_tensors'].AsInt
+ rate_n = tensors['rate_n'].AsInt
+ rate_d = tensors['rate_d'].AsInt
+ raw_data = []
+ tensors_info = []
+
+ for i in range(num_tensors):
+ tensor_key = "tensor_{idx}".format(idx=i)
+ tensor = tensors[tensor_key].AsVector
+ dtype = _to_numpy_type (tensor[1].AsInt)
+ tdim = tensor[2].AsTypedVector
+ dim = []
+ for j in range(4):
+ dim.append(tdim[j].AsInt)
+ tensors_info.append(nns.TensorShape(dim, np.uint8))
+ raw_data.append(np.frombuffer(tensor[3].AsBlob, dtype=np.uint8))
+
+ return (tensors_info, raw_data, rate_n, rate_d)