From 0265104b59c2b6ae5b2277ba566bb8974d6567da Mon Sep 17 00:00:00 2001 From: Jaeyun Date: Thu, 4 Jul 2019 15:52:45 +0900 Subject: [PATCH] [C-Api] free data handle in src node If buf policy is auto-free, free data handle for each case. Also, added code to remove duplicated code when constructung the pipelie. Signed-off-by: Jaeyun Jung --- api/capi/include/nnstreamer.h | 1 + api/capi/src/nnstreamer-capi-pipeline.c | 46 +++++++++++++++++++------------- tests/tizen_capi/unittest_tizen_capi.cpp | 2 +- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/api/capi/include/nnstreamer.h b/api/capi/include/nnstreamer.h index 0cc5c28..c278a32 100644 --- a/api/capi/include/nnstreamer.h +++ b/api/capi/include/nnstreamer.h @@ -349,6 +349,7 @@ int ml_pipeline_src_release_handle (ml_pipeline_src_h src_handle); * @since_tizen 5.5 * @param[in] src_handle The source handle returned by ml_pipeline_src_get_handle(). * @param[in] data The handle of input tensors, in the format of tensors info given by ml_pipeline_src_get_tensors_info(). + * This function takes ownership of the data if @a policy is #ML_PIPELINE_BUF_POLICY_AUTO_FREE. * @param[in] policy The policy of buf deallocation. * @return 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful diff --git a/api/capi/src/nnstreamer-capi-pipeline.c b/api/capi/src/nnstreamer-capi-pipeline.c index d7e46ce..934b7c3 100644 --- a/api/capi/src/nnstreamer-capi-pipeline.c +++ b/api/capi/src/nnstreamer-capi-pipeline.c @@ -370,36 +370,34 @@ ml_pipeline_construct (const char *pipeline_description, ml_pipeline_h * pipe) name = gst_element_get_name (elem); if (name != NULL) { - ml_pipeline_element *e = NULL; + ml_pipeline_element_e element_type = ML_PIPELINE_ELEMENT_UNKNOWN; if (G_TYPE_CHECK_INSTANCE_TYPE (elem, gst_element_factory_get_element_type (tensor_sink))) { - e = construct_element (elem, pipe_h, name, - ML_PIPELINE_ELEMENT_SINK); + element_type = ML_PIPELINE_ELEMENT_SINK; } else if (G_TYPE_CHECK_INSTANCE_TYPE (elem, GST_TYPE_APP_SRC)) { - e = construct_element (elem, pipe_h, name, - ML_PIPELINE_ELEMENT_APP_SRC); + element_type = ML_PIPELINE_ELEMENT_APP_SRC; } else if (G_TYPE_CHECK_INSTANCE_TYPE (elem, GST_TYPE_APP_SINK)) { - e = construct_element (elem, pipe_h, name, - ML_PIPELINE_ELEMENT_APP_SINK); + element_type = ML_PIPELINE_ELEMENT_APP_SINK; } else if (G_TYPE_CHECK_INSTANCE_TYPE (elem, gst_element_factory_get_element_type (valve))) { - e = construct_element (elem, pipe_h, name, - ML_PIPELINE_ELEMENT_VALVE); + element_type = ML_PIPELINE_ELEMENT_VALVE; } else if (G_TYPE_CHECK_INSTANCE_TYPE (elem, gst_element_factory_get_element_type (inputs))) { - e = construct_element (elem, pipe_h, name, - ML_PIPELINE_ELEMENT_SWITCH_INPUT); + element_type = ML_PIPELINE_ELEMENT_SWITCH_INPUT; } else if (G_TYPE_CHECK_INSTANCE_TYPE (elem, gst_element_factory_get_element_type (outputs))) { - e = construct_element (elem, pipe_h, name, - ML_PIPELINE_ELEMENT_SWITCH_OUTPUT); + element_type = ML_PIPELINE_ELEMENT_SWITCH_OUTPUT; } else { /** @todo CRITICAL HANDLE THIS! */ } - if (e != NULL) + if (element_type != ML_PIPELINE_ELEMENT_UNKNOWN) { + ml_pipeline_element *e; + + e = construct_element (elem, pipe_h, name, element_type); g_hash_table_insert (pipe_h->namednodes, g_strdup (name), e); + } g_free (name); } @@ -859,14 +857,14 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, ml_loge ("The tensor size is invalid. It should be 1 ~ %u; where it is %u", ML_TENSOR_SIZE_LIMIT, _data->num_tensors); ret = ML_ERROR_INVALID_PARAMETER; - goto unlock_return; + goto destroy_data; } ret = ml_pipeline_src_parse_tensors_info (elem); if (ret != ML_ERROR_NONE) { ml_logw ("The pipeline is not ready to accept inputs. The input is ignored."); - goto unlock_return; + goto destroy_data; } if (elem->tensors_info.num_tensors != _data->num_tensors) { @@ -875,7 +873,7 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, elem->name, elem->tensors_info.num_tensors, _data->num_tensors); ret = ML_ERROR_INVALID_PARAMETER; - goto unlock_return; + goto destroy_data; } for (i = 0; i < elem->tensors_info.num_tensors; i++) { @@ -887,7 +885,7 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, i, _data->tensors[i].size, sz); ret = ML_ERROR_INVALID_PARAMETER; - goto unlock_return; + goto destroy_data; } } @@ -905,6 +903,12 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, /* Push the data! */ gret = gst_app_src_push_buffer (GST_APP_SRC (elem->element), buffer); + /* Free data ptr if buffer policy is auto-free */ + if (policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) { + g_free (_data); + _data = NULL; + } + if (gret == GST_FLOW_FLUSHING) { ml_logw ("The pipeline is not in PAUSED/PLAYING. The input may be ignored."); ret = ML_ERROR_TRY_AGAIN; @@ -913,6 +917,12 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, ret = ML_ERROR_STREAMS_PIPE; } +destroy_data: + if (_data != NULL && policy == ML_PIPELINE_BUF_POLICY_AUTO_FREE) { + /* Free data handle */ + ml_tensors_data_destroy (data); + } + handle_exit (h); } diff --git a/tests/tizen_capi/unittest_tizen_capi.cpp b/tests/tizen_capi/unittest_tizen_capi.cpp index bf00da0..429307e 100644 --- a/tests/tizen_capi/unittest_tizen_capi.cpp +++ b/tests/tizen_capi/unittest_tizen_capi.cpp @@ -423,7 +423,7 @@ TEST (nnstreamer_capi_sink, dummy_02) guint *count_sink; /* pipeline with appsink */ - pipeline = g_strdup ("videotestsrc num-buffers=3 ! videoconvert ! valve name=valvex ! tensor_converter ! appsink name=sinkx"); + pipeline = g_strdup ("videotestsrc num-buffers=3 ! videoconvert ! tensor_converter ! appsink name=sinkx"); count_sink = (guint *) g_malloc (sizeof (guint)); *count_sink = 0; -- 2.7.4