This PR defines new APIs and callbacks for custom filter.
Signed-off-by: Jaeyun <jy1210.jung@samsung.com>
#endif /* __cplusplus */
/**
+ * @brief Internal private representation of custom filter handle.
+ */
+typedef struct {
+ char *name;
+ ml_tensors_info_h in_info;
+ ml_tensors_info_h out_info;
+ ml_custom_easy_invoke_cb cb;
+ void *pdata;
+} ml_custom_filter_s;
+
+/**
* @brief Data structure for tensor information.
* @since_tizen 5.5
*/
/**
* @brief A handle of a common element (i.e. All GstElement except AppSrc, AppSink, TensorSink, Selector and Valve) of an NNStreamer pipeline
+ * @since_tizen 6.0
*/
typedef void *ml_pipeline_element_h;
/**
+ * @brief A handle of a "custom-easy filter" of an NNStreamer pipeline.
+ * @since_tizen 6.0
+ */
+typedef void *ml_custom_easy_filter_h;
+
+/**
* @brief Types of NNFWs.
* @details To check if a nnfw-type is supported in a system, an application may call the API, ml_check_nnfw_availability().
* @since_tizen 5.5
*/
typedef void (*ml_pipeline_state_cb) (ml_pipeline_state_e state, void *user_data);
+/**
+ * @brief Callback to execute the custom-easy filter in NNStreamer pipelines.
+ * @since_tizen 6.0
+ * @remarks The @a in can be used only in the callback. To use outside, make a copy.
+ * @remarks The @a out can be used only in the callback. To use outside, make a copy.
+ * @param[out] in The handle of the tensor input (a single frame. tensor/tensors).
+ * @param[out] out The handle of the tensor output to be filled (a single frame. tensor/tensors).
+ * @param[out] user_data User application's private data.
+ * @return @c 0 on success. @c 1 to ignore the input data. Otherwise a negative error value.
+ */
+typedef int (*ml_custom_easy_invoke_cb) (const ml_tensors_data_h in, ml_tensors_data_h out, void *user_data);
+
/****************************************************
** NNStreamer Pipeline Construction (gst-parse) **
****************************************************/
int ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool *available);
/**
+ * @brief Registers a custom filter.
+ * @details NNStreamer provides an interface for processing the tensors with 'custom-easy' framework which can execute without independent shared object.
+ * Using this function, the application can easily register and execute the processing code.
+ * If a custom filter with same name exists, this will be failed and return the error code #ML_ERROR_INVALID_PARAMETER.
+ * @since_tizen 6.0
+ * @remarks If the function succeeds, @a custom handle must be released using ml_pipeline_custom_easy_filter_unregister().
+ * @param[in] name The name of custom filter.
+ * @param[in] in The handle of input tensors information.
+ * @param[in] out The handle of output tensors information.
+ * @param[in] cb The function to be called when the pipeline runs.
+ * @param[in] user_data Private data for the callback. This value is passed to the callback when it's invoked.
+ * @param[out] custom The custom filter handler.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_NOT_SUPPORTED Not supported.
+ * @retval #ML_ERROR_INVALID_PARAMETER The parameter is invalid, or duplicated name exists.
+ * @retval #ML_ERROR_OUT_OF_MEMORY Failed to allocate required memory to register the custom filter.
+ *
+ * Here is an example of the usage:
+ * @code
+ * // Define invoke callback.
+ * static int custom_filter_invoke_cb (const ml_tensors_data_h in, ml_tensors_data_h out, void *user_data)
+ * {
+ * // Get input tensors using data handle 'in', and fill output tensors using data handle 'out'.
+ * }
+ *
+ * // The pipeline description (input data with dimension 2:1:1:1 and type int8 will be passed to custom filter 'my-custom-filter', which converts data type to float32 and processes tensors.)
+ * const char pipeline[] = "appsrc ! other/tensor,dimension=(string)2:1:1:1,type=(string)int8,framerate=(fraction)0/1 ! tensor_filter framework=custom-easy model=my-custom-filter ! tensor_sink";
+ * int status;
+ * ml_pipeline_h pipe;
+ * ml_custom_easy_filter_h custom;
+ * ml_tensors_info_h in_info, out_info;
+ *
+ * // Set input and output tensors information.
+ * ml_tensors_info_create (&in_info);
+ * ml_tensors_info_set_count (in_info, 1);
+ * ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT8);
+ * ml_tensors_info_set_tensor_dimension (in_info, 0, dim);
+ *
+ * ml_tensors_info_create (&out_info);
+ * ml_tensors_info_set_count (out_info, 1);
+ * ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_FLOAT32);
+ * ml_tensors_info_set_tensor_dimension (out_info, 0, dim);
+ *
+ * // Register custom filter with name 'my-custom-filter' ('custom-easy' framework).
+ * status = ml_pipeline_custom_easy_filter_register ("my-custom-filter", in_info, out_info, custom_filter_invoke_cb, NULL, &custom);
+ * if (status != ML_ERROR_NONE) {
+ * // Handle error case.
+ * goto error;
+ * }
+ *
+ * // Construct the pipeline.
+ * status = ml_pipeline_construct (pipeline, NULL, NULL, &pipe);
+ * if (status != ML_ERROR_NONE) {
+ * // Handle error case.
+ * goto error;
+ * }
+ *
+ * // Start the pipeline and execute the tensor.
+ * ml_pipeline_start (pipe);
+ *
+ * error:
+ * // Destroy the pipeline and unregister custom filter.
+ * ml_pipeline_stop (pipe);
+ * ml_pipeline_destroy (handle);
+ * ml_pipeline_custom_easy_filter_unregister (custom);
+ * @endcode
+ */
+int ml_pipeline_custom_easy_filter_register (const char *name, const ml_tensors_info_h in, const ml_tensors_info_h out, ml_custom_easy_invoke_cb cb, void *user_data, ml_custom_easy_filter_h *custom);
+
+/**
+ * @brief Unregisters the custom filter.
+ * @details Use this function to release and unregister the custom filter.
+ * @since_tizen 6.0
+ * @param[in] custom The custom filter to be unregistered.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_NOT_SUPPORTED Not supported.
+ * @retval #ML_ERROR_INVALID_PARAMETER The parameter is invalid.
+ */
+int ml_pipeline_custom_easy_filter_unregister (ml_custom_easy_filter_h custom);
+
+/**
* @}
*/
#ifdef __cplusplus
#include "nnstreamer-capi-private.h"
#include "tensor_typedef.h"
+#include "tensor_filter_custom_easy.h"
#include "nnstreamer_plugin_api.h"
#define handle_init(type, name, h) \
return element;
}
+
+/**
+ * @brief Releases custom filter handle.
+ */
+static void
+ml_pipeline_custom_free_handle (ml_custom_filter_s * custom)
+{
+ if (custom) {
+ g_free (custom->name);
+ ml_tensors_info_destroy (custom->in_info);
+ ml_tensors_info_destroy (custom->out_info);
+
+ g_free (custom);
+ }
+}
+
+/**
+ * @brief Invoke callback for custom-easy filter.
+ */
+static int
+ml_pipeline_custom_invoke (void *data, const GstTensorFilterProperties * prop,
+ const GstTensorMemory * in, GstTensorMemory * out)
+{
+ int status;
+ ml_custom_filter_s *c;
+ ml_tensors_data_h in_data, out_data;
+ ml_tensors_data_s *_data;
+ guint i;
+
+ c = (ml_custom_filter_s *) data;
+ in_data = out_data = NULL;
+
+ /* internal error? */
+ if (!c || !c->cb)
+ return -1;
+
+ /* prepare invoke */
+ status = ml_tensors_data_create_no_alloc (c->in_info, &in_data);
+ if (status != ML_ERROR_NONE)
+ goto done;
+
+ _data = (ml_tensors_data_s *) in_data;
+ for (i = 0; i < _data->num_tensors; i++)
+ _data->tensors[i].tensor = in[i].data;
+
+ status = ml_tensors_data_create_no_alloc (c->out_info, &out_data);
+ if (status != ML_ERROR_NONE)
+ goto done;
+
+ _data = (ml_tensors_data_s *) out_data;
+ for (i = 0; i < _data->num_tensors; i++)
+ _data->tensors[i].tensor = out[i].data;
+
+ /* call invoke callback */
+ status = c->cb (in_data, out_data, c->pdata);
+
+done:
+ /* NOTE: DO NOT free tensor data */
+ g_free (in_data);
+ g_free (out_data);
+
+ return status;
+}
+
+/**
+ * @brief Registers a custom filter.
+ */
+int
+ml_pipeline_custom_easy_filter_register (const char *name,
+ const ml_tensors_info_h in, const ml_tensors_info_h out,
+ ml_custom_easy_invoke_cb cb, void *user_data,
+ ml_custom_easy_filter_h * custom)
+{
+ int status = ML_ERROR_NONE;
+ ml_custom_filter_s *c;
+ GstTensorsInfo in_info, out_info;
+
+ check_feature_state ();
+
+ if (!name || !cb || !custom)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ /* init null */
+ *custom = NULL;
+
+ if (!ml_tensors_info_is_valid (in) || !ml_tensors_info_is_valid (out))
+ return ML_ERROR_INVALID_PARAMETER;
+
+ /* create and init custom handle */
+ if ((c = g_new0 (ml_custom_filter_s, 1)) == NULL)
+ return ML_ERROR_OUT_OF_MEMORY;
+
+ c->name = g_strdup (name);
+ c->cb = cb;
+ c->pdata = user_data;
+ ml_tensors_info_create (&c->in_info);
+ ml_tensors_info_create (&c->out_info);
+
+ ml_tensors_info_clone (c->in_info, in);
+ ml_tensors_info_clone (c->out_info, out);
+
+ /* register custom filter */
+ ml_tensors_info_copy_from_ml (&in_info, in);
+ ml_tensors_info_copy_from_ml (&out_info, out);
+
+ if (NNS_custom_easy_register (name, ml_pipeline_custom_invoke, c,
+ &in_info, &out_info) != 0) {
+ nns_loge ("Failed to register custom filter %s.", name);
+ status = ML_ERROR_INVALID_PARAMETER;
+ }
+
+ if (status == ML_ERROR_NONE) {
+ *custom = c;
+ } else {
+ ml_pipeline_custom_free_handle (c);
+ }
+
+ return status;
+}
+
+/**
+ * @brief Unregisters the custom filter.
+ */
+int
+ml_pipeline_custom_easy_filter_unregister (ml_custom_easy_filter_h custom)
+{
+ ml_custom_filter_s *c;
+
+ check_feature_state ();
+
+ if (!custom)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ c = (ml_custom_filter_s *) custom;
+
+ if (NNS_custom_easy_unregister (c->name) != 0) {
+ ml_loge ("Failed to unregister custom filter %s.", c->name);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
+ ml_pipeline_custom_free_handle (c);
+ return ML_ERROR_NONE;
+}
}
/**
+ * @brief Invoke callback for custom-easy filter.
+ */
+static int
+test_custom_easy_cb (const ml_tensors_data_h in, ml_tensors_data_h out,
+ void *user_data)
+{
+ /* test code, set data size. */
+ if (user_data) {
+ void *raw_data = NULL;
+ size_t *data_size = (size_t *) user_data;
+
+ ml_tensors_data_get_tensor_data (out, 0, &raw_data, data_size);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Test for custom-easy registration.
+ */
+TEST (nnstreamer_capi_custom, register_filter_01_p)
+{
+ const char test_custom_filter[] = "test-custom-filter";
+ ml_pipeline_h pipe;
+ ml_pipeline_src_h src;
+ ml_pipeline_sink_h sink;
+ ml_custom_easy_filter_h custom;
+ ml_tensors_info_h in_info, out_info;
+ ml_tensors_data_h in_data;
+ ml_tensor_dimension dim = { 2, 1, 1, 1 };
+ int status;
+ gchar *pipeline =
+ g_strdup_printf
+ ("appsrc name=srcx ! other/tensor,dimension=(string)2:1:1:1,type=(string)int8,framerate=(fraction)0/1 ! tensor_filter framework=custom-easy model=%s ! tensor_sink name=sinkx",
+ test_custom_filter);
+ guint *count_sink = (guint *) g_malloc0 (sizeof (guint));
+ size_t *filter_data_size = (size_t *) g_malloc0 (sizeof (size_t));
+ size_t data_size;
+ guint i;
+
+ ml_tensors_info_create (&in_info);
+ ml_tensors_info_set_count (in_info, 1);
+ ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT8);
+ ml_tensors_info_set_tensor_dimension (in_info, 0, dim);
+
+ ml_tensors_info_create (&out_info);
+ ml_tensors_info_set_count (out_info, 1);
+ ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_FLOAT32);
+ ml_tensors_info_set_tensor_dimension (out_info, 0, dim);
+ ml_tensors_info_get_tensor_size(out_info, 0, &data_size);
+
+ /* test code for custom filter */
+ status = ml_pipeline_custom_easy_filter_register (test_custom_filter,
+ in_info, out_info, test_custom_easy_cb, filter_data_size, &custom);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_construct (pipeline, NULL, NULL, &pipe);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_sink_register (pipe, "sinkx", test_sink_callback_count,
+ count_sink, &sink);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_src_get_handle (pipe, "srcx", &src);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_start (pipe);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ for (i = 0; i < 5; i++) {
+ status = ml_tensors_data_create (in_info, &in_data);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_src_input_data (src, in_data, ML_PIPELINE_BUF_POLICY_AUTO_FREE);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ g_usleep (50000); /* 50ms. Wait a bit. */
+ }
+
+ status = ml_pipeline_stop (pipe);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_src_release_handle (src);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_sink_unregister (sink);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_destroy (pipe);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_custom_easy_filter_unregister (custom);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ /* check received data in sink node */
+ EXPECT_TRUE (*count_sink > 0U);
+ EXPECT_TRUE (*filter_data_size > 0U && *filter_data_size == data_size);
+
+ ml_tensors_info_destroy (in_info);
+ ml_tensors_info_destroy (out_info);
+ g_free (pipeline);
+ g_free (count_sink);
+ g_free (filter_data_size);
+}
+
+/**
+ * @brief Test for custom-easy registration.
+ * @detail Invalid params.
+ */
+TEST (nnstreamer_capi_custom, register_filter_02_n)
+{
+ const char test_custom_filter[] = "test-custom-filter";
+ ml_custom_easy_filter_h custom, custom2;
+ ml_tensors_info_h in_info, out_info;
+ ml_tensor_dimension dim = { 2, 1, 1, 1 };
+ int status;
+
+ ml_tensors_info_create (&in_info);
+ ml_tensors_info_create (&out_info);
+
+ /* test code with null param */
+ status = ml_pipeline_custom_easy_filter_register (NULL,
+ in_info, out_info, test_custom_easy_cb, NULL, &custom);
+ EXPECT_NE (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_custom_easy_filter_register (test_custom_filter,
+ NULL, out_info, test_custom_easy_cb, NULL, &custom);
+ EXPECT_NE (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_custom_easy_filter_register (test_custom_filter,
+ in_info, NULL, test_custom_easy_cb, NULL, &custom);
+ EXPECT_NE (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_custom_easy_filter_register (test_custom_filter,
+ in_info, out_info, NULL, NULL, &custom);
+ EXPECT_NE (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_custom_easy_filter_register (test_custom_filter,
+ in_info, out_info, test_custom_easy_cb, NULL, NULL);
+ EXPECT_NE (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_custom_easy_filter_unregister (NULL);
+ EXPECT_NE (status, ML_ERROR_NONE);
+
+ /* test code with invalid input info */
+ status = ml_pipeline_custom_easy_filter_register (test_custom_filter,
+ in_info, out_info, test_custom_easy_cb, NULL, &custom);
+ EXPECT_NE (status, ML_ERROR_NONE);
+
+ ml_tensors_info_set_count (in_info, 1);
+ ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT8);
+ ml_tensors_info_set_tensor_dimension (in_info, 0, dim);
+
+ /* test code with invalid output info */
+ status = ml_pipeline_custom_easy_filter_register (test_custom_filter,
+ in_info, out_info, test_custom_easy_cb, NULL, &custom);
+ EXPECT_NE (status, ML_ERROR_NONE);
+
+ ml_tensors_info_set_count (out_info, 1);
+ ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_FLOAT32);
+ ml_tensors_info_set_tensor_dimension (out_info, 0, dim);
+
+ /* test code for duplicated name */
+ status = ml_pipeline_custom_easy_filter_register (test_custom_filter,
+ in_info, out_info, test_custom_easy_cb, NULL, &custom);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_custom_easy_filter_register (test_custom_filter,
+ in_info, out_info, test_custom_easy_cb, NULL, &custom2);
+ EXPECT_NE (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_custom_easy_filter_unregister (custom);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ ml_tensors_info_destroy (in_info);
+ ml_tensors_info_destroy (out_info);
+}
+
+/**
* @brief Main gtest
*/
int