NNStreamer extensions for tensor filter update their functions based on the model file
However, this behavior is not thread-safe.
Updated for extensions to not update their functions for allocate_in_invoke
For allocate_in_invoke, tensor_filter needs to know based on the model if the allocate_in_invoke is supported or not
So added another interface in GstTensorFilterFramework - allocateInInvoke
V2:
Added allocate_in_invoke to be set properly in single API implementation of tensor_filter
Also added some minor updates in comment
Added minor bugfix in python extension
Related Issue: #2034
Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
{
GstTensorFilterPrivate *priv;
guint i;
+ gboolean allocate_in_invoke;
priv = &self->priv;
}
/** Setup output buffer */
+ allocate_in_invoke = gst_tensor_filter_allocate_in_invoke (priv);
for (i = 0; i < priv->prop.output_meta.num_tensors; i++) {
/* allocate memory if allocate_in_invoke is FALSE */
- if (priv->fw->allocate_in_invoke == FALSE) {
+ if (allocate_in_invoke == FALSE) {
output[i].data = g_malloc (output[i].size);
if (!output[i].data) {
g_critical ("Failed to allocate the output tensor.");
return TRUE;
error:
- if (priv->fw->allocate_in_invoke == FALSE)
+ if (allocate_in_invoke == FALSE)
for (i = 0; i < priv->prop.output_meta.num_tensors; i++)
g_free (output[i].data);
return FALSE;
/**
* @brief The optional callback for GstTensorFilterFramework
+ * @param[in] private_data caffe2 plugin's private data
* @param[in] data The data element.
*/
static void
PyObject *param = PyList_New(0);
for (unsigned int i = 0; i < inputTensorMeta.num_tensors; i++) {
/** create a Numpy array wrapper (1-D) for NNS tensor data */
- npy_intp input_dims[] = {(npy_intp) input[i].size};
+ npy_intp input_dims[] = {(npy_intp) (input[i].size / gst_tensor_get_element_size (input[i].type))};
PyObject *input_array = PyArray_SimpleNewFromData(
1, input_dims, getNumpyType(input[i].type), input[i].data);
PyList_Append(param, input_array);
* @brief The mandatory callback for GstTensorFilterFramework
* @param prop: property of tensor_filter instance
* @param private_data : python plugin's private data
- * @param[in] input The array of input tensors
- * @param[out] output The array of output tensors
+ * @param input : The array of input tensors
+ * @param output : The array of output tensors
*/
static int
py_run (const GstTensorFilterProperties * prop, void **private_data,
/**
* @brief The optional callback for GstTensorFilterFramework
- * @param[in] data The data element.
+ * @param data : The data element.
+ * @param private_data : python plugin's private data
*/
static void
py_destroyNotify (void **private_data, void *data)
if (it != PYCore::outputArrayMap.end()){
Py_XDECREF(it->second);
PYCore::outputArrayMap.erase (it);
- } else
+ } else {
g_critical("Cannot find output data: 0x%lx", (unsigned long) data);
+ }
}
/**
/**
* @brief The optional callback for GstTensorFilterFramework
- * @param[in] data The data element.
+ * @param data : The data element.
+ * @param private_data : tensorflow plugin's private data
*/
static void
tf_destroyNotify (void **private_data, void *data)
*/
int (*allocateInInvoke) (void **private_data);
- /**< Optional. tensor_filter.c will call it when allocate_in_invoke is set to TRUE. This check if the provided model for the framework supports allocation at invoke or not. If this is not defined, then the value of allocate_in_invoke is assumed to be final for all models.
- *
- * @param[in] private_data A subplugin may save its internal private data here.
- * @return 0 if supported. -errno if not supported.
- */
+ /**< Optional. tensor_filter.c will call it when allocate_in_invoke is set to TRUE. This check if the provided model for the framework supports allocation at invoke or not. If this is not defined, then the value of allocate_in_invoke is assumed to be final for all models.
+ *
+ * @param[in] private_data A subplugin may save its internal private data here.
+ * @return 0 if supported. -errno if not supported.
+ */
};
/**
GstTensorMemory out_tensors[NNS_TENSOR_SIZE_LIMIT];
guint i;
gint ret;
+ gboolean allocate_in_invoke;
self = GST_TENSOR_FILTER_CAST (trans);
priv = &self->priv;
/* 0. Check all properties. */
silent_debug ("Invoking %s with %s model\n", priv->fw->name,
GST_STR_NULL (prop->model_files[0]));
+ allocate_in_invoke = gst_tensor_filter_allocate_in_invoke (priv);
/* 1. Set input tensors from inbuf. */
g_assert (gst_buffer_n_memory (inbuf) == prop->input_meta.num_tensors);
out_tensors[i].type = prop->output_meta.info[i].type;
/* allocate memory if allocate_in_invoke is FALSE */
- if (priv->fw->allocate_in_invoke == FALSE) {
+ if (allocate_in_invoke == FALSE) {
out_mem[i] = gst_allocator_alloc (NULL, out_tensors[i].size, NULL);
g_assert (gst_memory_map (out_mem[i], &out_info[i], GST_MAP_WRITE));
/* 4. Update result and free map info. */
for (i = 0; i < prop->output_meta.num_tensors; i++) {
- if (priv->fw->allocate_in_invoke) {
+ if (allocate_in_invoke) {
GPtrArray *data_array = g_ptr_array_new ();
g_ptr_array_add (data_array, (gpointer) self);
g_ptr_array_add (data_array, (gpointer) out_tensors[i].data);
+
/* filter-subplugin allocated new memory, update this */
out_mem[i] =
gst_memory_new_wrapped (0, out_tensors[i].data, out_tensors[i].size,
}
}
+/**
+ * @brief check if the allocate_in_invoke is valid for the framework
+ * @param[in] priv Struct containing the properties of the object
+ * @return TRUE if valid, FALSE on error
+ */
+gboolean
+gst_tensor_filter_allocate_in_invoke (GstTensorFilterPrivate * priv)
+{
+ int allocate_in_invoke;
+
+ allocate_in_invoke = priv->fw->allocate_in_invoke;
+ if (allocate_in_invoke == TRUE && priv->fw->allocateInInvoke) {
+ if (priv->fw->allocateInInvoke (&priv->privateData) == 0) {
+ allocate_in_invoke = TRUE;
+ } else {
+ allocate_in_invoke = FALSE;
+ }
+ }
+
+ return allocate_in_invoke;
+}
/**
* @brief Printout the comparison results of two tensors.
GstTensorsInfo * info2);
/**
+ * @brief check if the allocate_in_invoke is valid for the framework
+ * @param[in] priv Struct containing the properties of the object
+ * @return TRUE if valid, FALSE on error
+ */
+extern gboolean
+gst_tensor_filter_allocate_in_invoke (GstTensorFilterPrivate * priv);
+
+/**
* @brief Installs all the properties for tensor_filter
* @param[in] gobject_class Glib object class whose properties will be set
*/
void init_filter_custom (void) __attribute__ ((constructor));
void fini_filter_custom (void) __attribute__ ((destructor));
-static GstTensorFilterFramework NNS_support_custom;
-
/**
* @brief internal_data
*/
ptr = *private_data;
g_assert (!ptr->methods->invoke != !ptr->methods->allocate_invoke); /* XOR! */
- if (ptr->methods->allocate_invoke) {
- NNS_support_custom.allocate_in_invoke = TRUE;
- }
return 0;
}
}
}
+/**
+ * @brief The optional callback for GstTensorFilterFramework
+ */
+static int
+custom_allocateInInvoke (void **private_data)
+{
+ internal_data *ptr = *private_data;
+
+ if (ptr && ptr->methods->allocate_invoke) {
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static gchar filter_subplugin_custom[] = "custom";
static GstTensorFilterFramework NNS_support_custom = {
.version = GST_TENSOR_FILTER_FRAMEWORK_V0,
.name = filter_subplugin_custom,
.allow_in_place = FALSE, /* custom cannot support in-place (output == input). */
- .allocate_in_invoke = FALSE, /* GstTensorFilter allocates output buffers */
+ .allocate_in_invoke = TRUE, /* GstTensorFilter allocates output buffers */
.run_without_model = FALSE, /* custom needs a so file */
.invoke_NN = custom_invoke,
- /* We need to disable getI/O-dim or setI-dim with the first call */
.getInputDimension = custom_getInputDim,
.getOutputDimension = custom_getOutputDim,
.setInputDimension = custom_setInputDim,
.open = custom_open,
.close = custom_close,
- .destroyNotify = custom_destroyNotify, /* default null. if allocate_in_invoke is true, this will be set from custom filter. */
+ .destroyNotify = custom_destroyNotify, /* if custom filter model supports allocate_in_invoke, this will be set from custom filter. */
+ .allocateInInvoke = custom_allocateInInvoke,
};
/** @brief Initialize this object for tensor_filter subplugin runtime register */