From 830e5a3885b452514757e5defe9c2f233fb61e57 Mon Sep 17 00:00:00 2001 From: Jaeyun Date: Fri, 28 Jun 2019 20:34:36 +0900 Subject: [PATCH] [C-Api] handle for tensors data instance 1. Moved tensors-data structure to private header. 2. Add handle for tensors-data instance and refactor related functions. Signed-off-by: Jaeyun Jung --- api/capi/include/nnstreamer-capi-private.h | 23 ++- api/capi/include/nnstreamer-single.h | 10 +- api/capi/include/nnstreamer.h | 87 +++++---- api/capi/src/nnstreamer-capi-pipeline.c | 49 ++--- api/capi/src/nnstreamer-capi-single.c | 68 +++---- api/capi/src/nnstreamer-capi-util.c | 140 ++++++++++----- tests/tizen_capi/unittest_tizen_capi.cpp | 276 ++++++++++++----------------- 7 files changed, 327 insertions(+), 326 deletions(-) diff --git a/api/capi/include/nnstreamer-capi-private.h b/api/capi/include/nnstreamer-capi-private.h index 87a052e..36e6b0b 100644 --- a/api/capi/include/nnstreamer-capi-private.h +++ b/api/capi/include/nnstreamer-capi-private.h @@ -94,6 +94,24 @@ typedef struct { } ml_tensors_info_s; /** + * @brief An instance of a single input or output frame. + * @since_tizen 5.5 + */ +typedef struct { + void *tensor; /**< The instance of tensor data. */ + size_t size; /**< The size of tensor. */ +} ml_tensor_data_s; + +/** + * @brief An instance of input or output frames. #ml_tensors_info_h is the handle for tensors metadata. + * @since_tizen 5.5 + */ +typedef struct { + unsigned int num_tensors; /**< The number of tensors. */ + ml_tensor_data_s tensors[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor data. NULL for unused tensors. */ +} ml_tensors_data_s; + +/** * @brief Possible controls on elements of a pipeline. */ typedef enum { @@ -184,11 +202,6 @@ typedef struct _ml_pipeline_valve { } ml_pipeline_valve; /** - * @brief Sets the last error code. - */ -void ml_util_set_error (int error_code); - -/** * @brief Gets the byte size of the given tensor info. */ size_t ml_util_get_tensor_size (const ml_tensor_info_s *info); diff --git a/api/capi/include/nnstreamer-single.h b/api/capi/include/nnstreamer-single.h index cb7241f..658421e 100644 --- a/api/capi/include/nnstreamer-single.h +++ b/api/capi/include/nnstreamer-single.h @@ -91,12 +91,8 @@ int ml_single_close (ml_single_h single); * @since_tizen 5.5 * @param[in] single The model handle to be inferred. * @param[in] input The input data to be inferred. - * @param[out] output The output buffer. Set NULL if you want to let - * this function to allocate a new output buffer. - * @return @c The output buffer. If @output is NULL, this is a newly - * allocated buffer; thus, the user needs to free it. - * If there is an error, this is set NULL. Check ml_util_get_last_error() - * of tizen_error.h in such cases. + * @param[out] output The output buffer allocated. Caller is responsible to free the output buffer with ml_util_destroy_tensors_data(). + * @return @c 0 on success. otherwise a negative error value. * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid. * @retval #ML_ERROR_STREAMS_PIPE Cannot push a buffer into source element. @@ -106,7 +102,7 @@ int ml_single_close (ml_single_h single); * input data frames of an instance of a model should share the * same dimension. */ -ml_tensors_data_s * ml_single_inference (ml_single_h single, const ml_tensors_data_s *input, ml_tensors_data_s *output); +int ml_single_inference (ml_single_h single, const ml_tensors_data_h input, ml_tensors_data_h *output); /************* * UTILITIES * diff --git a/api/capi/include/nnstreamer.h b/api/capi/include/nnstreamer.h index 754b345..3091d25 100644 --- a/api/capi/include/nnstreamer.h +++ b/api/capi/include/nnstreamer.h @@ -66,6 +66,12 @@ typedef unsigned int ml_tensor_dimension[ML_TENSOR_RANK_LIMIT]; typedef void *ml_tensors_info_h; /** + * @brief A handle of input or output frames. #ml_tensors_info_h is the handle for tensors metadata. + * @since_tizen 5.5 + */ +typedef void *ml_tensors_data_h; + +/** * @brief A handle of an NNStreamer pipeline. * @since_tizen 5.5 */ @@ -202,34 +208,16 @@ typedef enum { } ml_pipeline_switch_e; /** - * @brief An instance of a single input or output frame. - * @since_tizen 5.5 - */ -typedef struct { - void *tensor; /**< The instance of tensor data. */ - size_t size; /**< The size of tensor. */ -} ml_tensor_data_s; - -/** - * @brief An instance of input or output frames. #ml_tensors_info_h is the handle for tensors metadata. - * @since_tizen 5.5 - */ -typedef struct { - unsigned int num_tensors; /**< The number of tensors. */ - ml_tensor_data_s tensors[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor data. NULL for unused tensors. */ -} ml_tensors_data_s; - -/** * @brief Callback for sink element of NNStreamer pipelines (pipeline's output) * @detail If an application wants to accept data outputs of an NNStreamer stream, use this callback to get data from the stream. Note that the buffer may be deallocated after the return and this is synchronously called. Thus, if you need the data afterwards, copy the data to another buffer and return fast. Do not hold too much time in the callback. It is recommended to use very small tensors at sinks. * @since_tizen 5.5 * @remarks The @a data can be used only in the callback. To use outside, make a copy. * @remarks The @a info can be used only in the callback. To use outside, make a copy. - * @param[out] data The contents of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by data->num_tensors. Note that max num_tensors is 16 (#ML_TENSOR_SIZE_LIMIT). + * @param[out] data The handle of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by ml_util_get_tensors_count() with the handle 'info'. Note that max num_tensors is 16 (#ML_TENSOR_SIZE_LIMIT). * @param[out] info The handle of tensors information (cardinality, dimension, and type of given tensor/tensors). * @param[in,out] user_data User Application's Private Data. */ -typedef void (*ml_pipeline_sink_cb) (const ml_tensors_data_s *data, const ml_tensors_info_h info, void *user_data); +typedef void (*ml_pipeline_sink_cb) (const ml_tensors_data_h data, const ml_tensors_info_h info, void *user_data); /**************************************************** ** NNStreamer Pipeline Construction (gst-parse) ** @@ -359,7 +347,7 @@ int ml_pipeline_src_put_handle (ml_pipeline_src_h h); /** * @brief Puts an input data frame. * @param[in] h The source handle returned by ml_pipeline_src_get_handle(). - * @param[in] data The input tensors, in the format of tensors info given by ml_pipeline_src_get_tensors_info(). + * @param[in] data The handle of input tensors, in the format of tensors info given by ml_pipeline_src_get_tensors_info(). * @param[in] policy The policy of buf deallocation. * @return 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful @@ -367,7 +355,7 @@ int ml_pipeline_src_put_handle (ml_pipeline_src_h h); * @retval #ML_ERROR_STREAMS_PIPE The pipeline has inconsistent padcaps. Not negotiated? * @retval #ML_ERROR_TRY_AGAIN The pipeline is not ready yet. */ -int ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s *data, ml_pipeline_buf_policy_e policy); +int ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_h data, ml_pipeline_buf_policy_e policy); /** * @brief Gets a handle for the tensors information of given src node. @@ -611,20 +599,52 @@ int ml_util_get_tensor_dimension (ml_tensors_info_h info, const unsigned int ind size_t ml_util_get_tensors_size (const ml_tensors_info_h info); /** - * @brief Frees the tensors data pointer. + * @brief Allocates a tensor data frame with the given tensors information. * @since_tizen 5.5 - * @param[in] data The tensors data pointer to be freed. + * @param[in] info The handle of tensors information for the allocation. + * @param[out] data The handle of tensors data allocated. Caller is responsible to free the allocated data with ml_util_destroy_tensors_data(). + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + * @retval #ML_ERROR_STREAMS_PIPE Failed to allocate new memory. */ -void ml_util_free_tensors_data (ml_tensors_data_s **data); +int ml_util_allocate_tensors_data (const ml_tensors_info_h info, ml_tensors_data_h *data); /** - * @brief Allocates a tensor data frame with the given tensors information. + * @brief Frees the given handle of a tensors data. * @since_tizen 5.5 - * @param[in] info The handle of tensors information for the allocation. - * @return @c Tensors data pointer allocated. Null if error. Caller is responsible to free the allocated data with ml_util_free_tensors_data(). - * @retval NULL There is an error. Call ml_util_get_last_error() to get specific error code. + * @param[in] data The handle of tensors data. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + */ +int ml_util_destroy_tensors_data (ml_tensors_data_h data); + +/** + * @brief Gets a tensor data of given handle. + * @since_tizen 5.5 + * @param[in] data The handle of tensors data. + * @param[in] index The index of tensor in tensors data. + * @param[out] raw_data Raw tensor data in the handle. + * @param[out] data_size Byte size of tensor data. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + */ +int ml_util_get_tensor_data (ml_tensors_data_h data, const unsigned int index, void **raw_data, size_t *data_size); + +/** + * @brief Copies a tensor data to given handle. + * @since_tizen 5.5 + * @param[in] data The handle of tensors data. + * @param[in] index The index of tensor in tensors data. + * @param[out] raw_data Raw tensor data to be copied. + * @param[out] data_size Byte size of raw data. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -ml_tensors_data_s *ml_util_allocate_tensors_data (const ml_tensors_info_h info); +int ml_util_copy_tensor_data (ml_tensors_data_h data, const unsigned int index, const void *raw_data, const size_t data_size); /** * @brief Checks the availability of the given execution environments. @@ -642,13 +662,6 @@ ml_tensors_data_s *ml_util_allocate_tensors_data (const ml_tensors_info_h info); int ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool *available); /** - * @brief Gets the last error code. - * @since_tizen 5.5 - * @return @c 0 on success. Otherwise a negative error value. - */ -int ml_util_get_last_error (void); - -/** * @} */ #ifdef __cplusplus diff --git a/api/capi/src/nnstreamer-capi-pipeline.c b/api/capi/src/nnstreamer-capi-pipeline.c index 4e17fc2..a0aea1e 100644 --- a/api/capi/src/nnstreamer-capi-pipeline.c +++ b/api/capi/src/nnstreamer-capi-pipeline.c @@ -131,7 +131,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) guint i; guint num_mems; GList *l; - ml_tensors_data_s tensors_data; + ml_tensors_data_s *data = NULL; size_t total_size = 0; num_mems = gst_buffer_n_memory (b); @@ -143,15 +143,16 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) } /* set tensor data */ - memset (&tensors_data, 0, sizeof (ml_tensors_data_s)); + data = g_new0 (ml_tensors_data_s, 1); + g_assert (data); - tensors_data.num_tensors = num_mems; + data->num_tensors = num_mems; for (i = 0; i < num_mems; i++) { mem[i] = gst_buffer_peek_memory (b, i); gst_memory_map (mem[i], &info[i], GST_MAP_READ); - tensors_data.tensors[i].tensor = info[i].data; - tensors_data.tensors[i].size = info[i].size; + data->tensors[i].tensor = info[i].data; + data->tensors[i].size = info[i].size; total_size += info[i].size; } @@ -189,7 +190,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) for (i = 0; i < elem->tensors_info.num_tensors; i++) { size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]); - if (sz != tensors_data.tensors[i].size) { + if (sz != data->tensors[i].size) { ml_loge ("The sink event of [%s] cannot be handled because the tensor dimension mismatches.", elem->name); @@ -225,7 +226,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) ml_pipeline_sink *sink = l->data; ml_pipeline_sink_cb callback = sink->cb; - callback (&tensors_data, &elem->tensors_info, sink->pdata); + callback (data, &elem->tensors_info, sink->pdata); /** @todo Measure time. Warn if it takes long. Kill if it takes too long. */ } @@ -237,6 +238,10 @@ error: gst_memory_unmap (mem[i], &info[i]); } + if (data) { + g_free (data); + data = NULL; + } return; } @@ -832,25 +837,28 @@ ml_pipeline_src_put_handle (ml_pipeline_src_h h) * @brief Push a data frame to a src (more info in nnstreamer.h) */ int -ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data, +ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_h data, ml_pipeline_buf_policy_e policy) { /** @todo NYI */ GstBuffer *buffer; + GstMemory *mem; GstFlowReturn gret; + ml_tensors_data_s *_data; unsigned int i; handle_init (src, src, h); - if (!data) { + _data = (ml_tensors_data_s *) data; + if (!_data) { ml_loge ("The given param data is invalid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } - if (data->num_tensors < 1 || data->num_tensors > ML_TENSOR_SIZE_LIMIT) { + if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) { ml_loge ("The tensor size is invalid. It should be 1 ~ %u; where it is %u", - ML_TENSOR_SIZE_LIMIT, data->num_tensors); + ML_TENSOR_SIZE_LIMIT, _data->num_tensors); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } @@ -862,10 +870,10 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data, goto unlock_return; } - if (elem->tensors_info.num_tensors != data->num_tensors) { + if (elem->tensors_info.num_tensors != _data->num_tensors) { ml_loge ("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u", - elem->name, elem->tensors_info.num_tensors, data->num_tensors); + elem->name, elem->tensors_info.num_tensors, _data->num_tensors); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -874,10 +882,10 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data, for (i = 0; i < elem->tensors_info.num_tensors; i++) { size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]); - if (sz != data->tensors[i].size) { + if (sz != _data->tensors[i].size) { ml_loge ("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)", - i, data->tensors[i].size, sz); + i, _data->tensors[i].size, sz); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -886,12 +894,11 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data, /* Create buffer to be pushed from buf[] */ buffer = gst_buffer_new (); - for (i = 0; i < data->num_tensors; i++) { - GstBuffer *addbuffer = - gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, - data->tensors[i].tensor, data->tensors[i].size, 0, - data->tensors[i].size, data->tensors[i].tensor, ml_buf_policy[policy]); - buffer = gst_buffer_append (buffer, addbuffer); + for (i = 0; i < _data->num_tensors; i++) { + mem = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY, + _data->tensors[i].tensor, _data->tensors[i].size, 0, + _data->tensors[i].size, _data->tensors[i].tensor, ml_buf_policy[policy]); + gst_buffer_append_memory (buffer, mem); /** @todo Verify that gst_buffer_append lists tensors/gstmem in the correct order */ } diff --git a/api/capi/src/nnstreamer-capi-single.c b/api/capi/src/nnstreamer-capi-single.c index da0dca1..b51e78a 100644 --- a/api/capi/src/nnstreamer-capi-single.c +++ b/api/capi/src/nnstreamer-capi-single.c @@ -334,12 +334,12 @@ ml_single_close (ml_single_h single) /** * @brief Invokes the model with the given input data. */ -ml_tensors_data_s * +int ml_single_inference (ml_single_h single, - const ml_tensors_data_s * input, ml_tensors_data_s * output) + const ml_tensors_data_h input, ml_tensors_data_h * output) { ml_single *single_h; - ml_tensors_data_s *result = NULL; + ml_tensors_data_s *in_data, *result; GstSample *sample; GstBuffer *buffer; GstMemory *mem; @@ -347,47 +347,38 @@ ml_single_inference (ml_single_h single, GstFlowReturn ret; int i, status = ML_ERROR_NONE; - if (!single || !input) { + if (!single || !input || !output) { ml_loge ("The given param is invalid."); - status = ML_ERROR_INVALID_PARAMETER; - goto error; + return ML_ERROR_INVALID_PARAMETER; } single_h = (ml_single *) single; + in_data = (ml_tensors_data_s *) input; - /* Validate output memory and size */ - if (output) { - if (output->num_tensors != single_h->out_info.num_tensors) { - ml_loge ("Invalid output data, the number of output is different."); - status = ML_ERROR_INVALID_PARAMETER; - goto error; - } - - for (i = 0; i < output->num_tensors; i++) { - if (!output->tensors[i].tensor || - output->tensors[i].size != - ml_util_get_tensor_size (&single_h->out_info.info[i])) { - ml_loge ("Invalid output data, the size of output is different."); - status = ML_ERROR_INVALID_PARAMETER; - goto error; - } - } + /* Allocate output buffer */ + status = ml_util_allocate_tensors_data (&single_h->out_info, output); + if (status != ML_ERROR_NONE) { + ml_loge ("Failed to allocate the memory block."); + *output = NULL; + return status; } + result = (ml_tensors_data_s *) (*output); + + /* Push input buffer */ buffer = gst_buffer_new (); - for (i = 0; i < input->num_tensors; i++) { + for (i = 0; i < in_data->num_tensors; i++) { mem = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY, - input->tensors[i].tensor, input->tensors[i].size, 0, - input->tensors[i].size, NULL, NULL); + in_data->tensors[i].tensor, in_data->tensors[i].size, 0, + in_data->tensors[i].size, NULL, NULL); gst_buffer_append_memory (buffer, mem); } ret = gst_app_src_push_buffer (GST_APP_SRC (single_h->src), buffer); if (ret != GST_FLOW_OK) { ml_loge ("Cannot push a buffer into source element."); - status = ML_ERROR_STREAMS_PIPE; - goto error; + return ML_ERROR_STREAMS_PIPE; } /* Try to get the result */ @@ -401,20 +392,7 @@ ml_single_inference (ml_single_h single, if (!sample) { ml_loge ("Failed to get the result from sink element."); - status = ML_ERROR_TIMED_OUT; - goto error; - } - - if (output) { - result = output; - } else { - result = ml_util_allocate_tensors_data (&single_h->out_info); - - if (!result) { - ml_loge ("Failed to allocate the memory block."); - status = ml_util_get_last_error (); - goto error; - } + return ML_ERROR_TIMED_OUT; } /* Copy the result */ @@ -429,11 +407,7 @@ ml_single_inference (ml_single_h single, } gst_sample_unref (sample); - status = ML_ERROR_NONE; - -error: - ml_util_set_error (status); - return result; + return ML_ERROR_NONE; } /** diff --git a/api/capi/src/nnstreamer-capi-util.c b/api/capi/src/nnstreamer-capi-util.c index d99afac..7738a1d 100644 --- a/api/capi/src/nnstreamer-capi-util.c +++ b/api/capi/src/nnstreamer-capi-util.c @@ -20,32 +20,13 @@ * @bug No known bugs except for NYI items */ +#include #include #include #include "nnstreamer.h" #include "nnstreamer-capi-private.h" -static int ml_internal_error_code = ML_ERROR_NONE; - -/** - * @brief Gets the last error code. - */ -int -ml_util_get_last_error (void) -{ - return ml_internal_error_code; -} - -/** - * @brief Sets the last error code. - */ -void -ml_util_set_error (int error_code) -{ - ml_internal_error_code = error_code; -} - /** * @brief Allocates a tensors information handle with default value. */ @@ -420,57 +401,118 @@ ml_util_free_tensors_info (ml_tensors_info_s * info) /** * @brief Frees the tensors data pointer. */ -void -ml_util_free_tensors_data (ml_tensors_data_s ** data) +int +ml_util_destroy_tensors_data (ml_tensors_data_h data) { - gint i; + ml_tensors_data_s *_data; + guint i; - if (data == NULL || (*data) == NULL) - return; + if (!data) + return ML_ERROR_INVALID_PARAMETER; + + _data = (ml_tensors_data_s *) data; - for (i = 0; i < (*data)->num_tensors; i++) { - if ((*data)->tensors[i].tensor) { - g_free ((*data)->tensors[i].tensor); - (*data)->tensors[i].tensor = NULL; + for (i = 0; i < _data->num_tensors; i++) { + if (_data->tensors[i].tensor) { + g_free (_data->tensors[i].tensor); + _data->tensors[i].tensor = NULL; } } - g_free (*data); - *data = NULL; + g_free (_data); + return ML_ERROR_NONE; } /** * @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h) */ -ml_tensors_data_s * -ml_util_allocate_tensors_data (const ml_tensors_info_h info) +int +ml_util_allocate_tensors_data (const ml_tensors_info_h info, + ml_tensors_data_h * data) { - ml_tensors_data_s *data; + ml_tensors_data_s *_data; ml_tensors_info_s *tensors_info; gint i; + if (!info || !data) + return ML_ERROR_INVALID_PARAMETER; + tensors_info = (ml_tensors_info_s *) info; + *data = NULL; - if (!tensors_info) { - ml_util_set_error (ML_ERROR_INVALID_PARAMETER); - return NULL; + _data = g_new0 (ml_tensors_data_s, 1); + if (!_data) { + ml_loge ("Failed to allocate the memory block."); + return ML_ERROR_STREAMS_PIPE; } - data = g_new0 (ml_tensors_data_s, 1); - if (!data) { - ml_loge ("Failed to allocate the memory block."); - ml_util_set_error (ML_ERROR_STREAMS_PIPE); - return NULL; + _data->num_tensors = tensors_info->num_tensors; + for (i = 0; i < _data->num_tensors; i++) { + _data->tensors[i].size = ml_util_get_tensor_size (&tensors_info->info[i]); + _data->tensors[i].tensor = g_malloc0 (_data->tensors[i].size); + if (_data->tensors[i].tensor == NULL) + goto failed; } - data->num_tensors = tensors_info->num_tensors; - for (i = 0; i < data->num_tensors; i++) { - data->tensors[i].size = ml_util_get_tensor_size (&tensors_info->info[i]); - data->tensors[i].tensor = g_malloc0 (data->tensors[i].size); + *data = _data; + return ML_ERROR_NONE; + +failed: + if (_data) { + for (i = 0; i < _data->num_tensors; i++) { + g_free (_data->tensors[i].tensor); + } } - ml_util_set_error (ML_ERROR_NONE); - return data; + ml_loge ("Failed to allocate the memory block."); + return ML_ERROR_STREAMS_PIPE; +} + +/** + * @brief Gets a tensor data of given handle. + */ +int +ml_util_get_tensor_data (ml_tensors_data_h data, const unsigned int index, + void **raw_data, size_t * data_size) +{ + ml_tensors_data_s *_data; + + if (!data) + return ML_ERROR_INVALID_PARAMETER; + + _data = (ml_tensors_data_s *) data; + + if (_data->num_tensors <= index) + return ML_ERROR_INVALID_PARAMETER; + + *raw_data = _data->tensors[index].tensor; + *data_size = _data->tensors[index].size; + + return ML_ERROR_NONE; +} + +/** + * @brief Copies a tensor data to given handle. + */ +int +ml_util_copy_tensor_data (ml_tensors_data_h data, const unsigned int index, + const void *raw_data, const size_t data_size) +{ + ml_tensors_data_s *_data; + + if (!data) + return ML_ERROR_INVALID_PARAMETER; + + _data = (ml_tensors_data_s *) data; + + if (_data->num_tensors <= index) + return ML_ERROR_INVALID_PARAMETER; + + if (data_size <= 0 || _data->tensors[index].size < data_size) + return ML_ERROR_INVALID_PARAMETER; + + memcpy (_data->tensors[index].tensor, raw_data, data_size); + return ML_ERROR_NONE; } /** @@ -689,7 +731,7 @@ ml_util_get_caps_from_tensors_info (const ml_tensors_info_s * info) */ int ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, - bool *available) + bool * available) { if (!available) return ML_ERROR_INVALID_PARAMETER; diff --git a/tests/tizen_capi/unittest_tizen_capi.cpp b/tests/tizen_capi/unittest_tizen_capi.cpp index 680f51f..1b58a70 100644 --- a/tests/tizen_capi/unittest_tizen_capi.cpp +++ b/tests/tizen_capi/unittest_tizen_capi.cpp @@ -281,11 +281,14 @@ TEST (nnstreamer_capi_valve, failure_01) * @brief A tensor-sink callback for sink handle in a pipeline */ static void -test_sink_callback_dm01 (const ml_tensors_data_s * data, +test_sink_callback_dm01 (const ml_tensors_data_h data, const ml_tensors_info_h info, void *user_data) { gchar *filepath = (gchar *) user_data; unsigned int i, num = 0; + void *data_ptr; + size_t data_size; + int status; FILE *fp = g_fopen (filepath, "a"); if (fp == NULL) @@ -294,7 +297,9 @@ test_sink_callback_dm01 (const ml_tensors_data_s * data, ml_util_get_tensors_count (info, &num); for (i = 0; i < num; i++) { - fwrite (data->tensors[i].tensor, data->tensors[i].size, 1, fp); + status = ml_util_get_tensor_data (data, i, &data_ptr, &data_size); + if (status == ML_ERROR_NONE) + fwrite (data_ptr, data_size, 1, fp); } fclose (fp); @@ -304,7 +309,7 @@ test_sink_callback_dm01 (const ml_tensors_data_s * data, * @brief A tensor-sink callback for sink handle in a pipeline */ static void -test_sink_callback_count (const ml_tensors_data_s * data, +test_sink_callback_count (const ml_tensors_data_h data, const ml_tensors_info_h info, void *user_data) { guint *count = (guint *) user_data; @@ -524,9 +529,6 @@ TEST (nnstreamer_capi_sink, failure_01) g_free (count_sink); } -static char uintarray[10][4]; -static char *uia_index[10]; - /** * @brief Test NNStreamer pipeline src */ @@ -546,33 +548,32 @@ TEST (nnstreamer_capi_src, dummy_01) ml_pipeline_src_h srchandle; int status; ml_tensors_info_h info; - ml_tensors_data_s data1, data2; + ml_tensors_data_h data1, data2; unsigned int count = 0; ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN; ml_tensor_dimension dim = { 0, }; int i; - char *uintarray2[10]; + uint8_t *uintarray1[10]; + uint8_t *uintarray2[10]; uint8_t *content; - gboolean r; gsize len; status = ml_pipeline_construct (pipeline, &handle); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (dir != NULL); for (i = 0; i < 10; i++) { - uia_index[i] = &uintarray[i][0]; + uintarray1[i] = (uint8_t *) g_malloc (4); + uintarray1[i][0] = i + 4; + uintarray1[i][1] = i + 1; + uintarray1[i][2] = i + 3; + uintarray1[i][3] = i + 2; - uintarray[i][0] = i; - uintarray[i][1] = i + 1; - uintarray[i][2] = i + 3; - uintarray[i][3] = i + 2; - - uintarray2[i] = (char *) g_malloc (4); + uintarray2[i] = (uint8_t *) g_malloc (4); uintarray2[i][0] = i + 3; uintarray2[i][1] = i + 2; uintarray2[i][2] = i + 1; - uintarray2[i][3] = i; + uintarray2[i][3] = i + 4; /* These will be free'ed by gstreamer (ML_PIPELINE_BUF_POLICY_AUTO_FREE) */ /** @todo Check whether gstreamer really deallocates this */ } @@ -603,17 +604,21 @@ TEST (nnstreamer_capi_src, dummy_01) EXPECT_EQ (dim[2], 1U); EXPECT_EQ (dim[3], 1U); + status = ml_util_allocate_tensors_data (info, &data1); + EXPECT_EQ (status, ML_ERROR_NONE); + ml_util_destroy_tensors_info (info); - data1.num_tensors = 1; - data1.tensors[0].tensor = uia_index[0]; - data1.tensors[0].size = 4; + status = ml_util_copy_tensor_data (data1, 0, uintarray1[0], 4); + EXPECT_EQ (status, ML_ERROR_NONE); - status = ml_pipeline_src_input_data (srchandle, &data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); EXPECT_EQ (status, ML_ERROR_NONE); + g_usleep (50000); /* 50ms. Wait a bit. */ - status = ml_pipeline_src_input_data (srchandle, &data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); EXPECT_EQ (status, ML_ERROR_NONE); + g_usleep (50000); /* 50ms. Wait a bit. */ status = ml_pipeline_src_put_handle (srchandle); EXPECT_EQ (status, ML_ERROR_NONE); @@ -637,17 +642,22 @@ TEST (nnstreamer_capi_src, dummy_01) EXPECT_EQ (dim[3], 1U); for (i = 0; i < 10; i++) { - data1.num_tensors = 1; - data1.tensors[0].tensor = uia_index[i]; - data1.tensors[0].size = 4; - status = ml_pipeline_src_input_data (srchandle, &data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + status = ml_util_copy_tensor_data (data1, 0, uintarray1[i], 4); + EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_util_allocate_tensors_data (info, &data2); EXPECT_EQ (status, ML_ERROR_NONE); - data2.num_tensors = 1; - data2.tensors[0].tensor = uintarray2[i]; - data2.tensors[0].size = 4; - status = ml_pipeline_src_input_data (srchandle, &data2, ML_PIPELINE_BUF_POLICY_AUTO_FREE); + status = ml_util_copy_tensor_data (data2, 0, uintarray2[i], 4); EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_pipeline_src_input_data (srchandle, data2, ML_PIPELINE_BUF_POLICY_AUTO_FREE); + EXPECT_EQ (status, ML_ERROR_NONE); + + g_usleep (50000); /* 50ms. Wait a bit. */ } status = ml_pipeline_src_put_handle (srchandle); @@ -660,23 +670,23 @@ TEST (nnstreamer_capi_src, dummy_01) g_free (pipeline); - r = g_file_get_contents (file1, (gchar **) &content, &len, NULL); - EXPECT_EQ (r, TRUE); - + EXPECT_TRUE (g_file_get_contents (file1, (gchar **) &content, &len, NULL)); EXPECT_EQ (len, 8U * 11); for (i = 0; i < 10; i++) { - EXPECT_EQ (content[i * 8 + 0 + 8], i); + EXPECT_EQ (content[i * 8 + 0 + 8], i + 4); EXPECT_EQ (content[i * 8 + 1 + 8], i + 1); EXPECT_EQ (content[i * 8 + 2 + 8], i + 3); EXPECT_EQ (content[i * 8 + 3 + 8], i + 2); EXPECT_EQ (content[i * 8 + 4 + 8], i + 3); EXPECT_EQ (content[i * 8 + 5 + 8], i + 2); EXPECT_EQ (content[i * 8 + 6 + 8], i + 1); - EXPECT_EQ (content[i * 8 + 7 + 8], i); + EXPECT_EQ (content[i * 8 + 7 + 8], i + 4); } g_free (content); + ml_util_destroy_tensors_info (info); + ml_util_destroy_tensors_data (data1); } /** @@ -735,18 +745,11 @@ TEST (nnstreamer_capi_src, failure_02) */ TEST (nnstreamer_capi_src, failure_03) { - const int num_tensors = ML_TENSOR_SIZE_LIMIT + 1; - const int num_dims = 4; - const char *pipeline = "appsrc name=srcx ! other/tensor,dimension=(string)4:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! tensor_sink"; ml_pipeline_h handle; ml_pipeline_src_h srchandle; - ml_tensors_data_s data; - - for (int i = 0; i < ML_TENSOR_SIZE_LIMIT; ++i) { - data.tensors[i].tensor = g_malloc0 (sizeof (char) * num_dims); - data.tensors[i].size = num_dims; - } + ml_tensors_data_h data; + ml_tensors_info_h info; int status = ml_pipeline_construct (pipeline, &handle); EXPECT_EQ (status, ML_ERROR_NONE); @@ -757,18 +760,13 @@ TEST (nnstreamer_capi_src, failure_03) status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle); EXPECT_EQ (status, ML_ERROR_NONE); - /* null data */ - status = ml_pipeline_src_input_data (srchandle, NULL, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); - EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_pipeline_src_get_tensors_info (srchandle, &info); + EXPECT_EQ (status, ML_ERROR_NONE); - /* invalid number of tensors (max size) */ - data.num_tensors = num_tensors; - status = ml_pipeline_src_input_data (srchandle, &data, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); - EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_util_allocate_tensors_data (info, &data); - /* invalid number of tensors (size is 0) */ - data.num_tensors = 0; - status = ml_pipeline_src_input_data (srchandle, &data, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); + /* null data */ + status = ml_pipeline_src_input_data (srchandle, NULL, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); status = ml_pipeline_src_put_handle (srchandle); @@ -780,8 +778,8 @@ TEST (nnstreamer_capi_src, failure_03) status = ml_pipeline_destroy (handle); EXPECT_EQ (status, ML_ERROR_NONE); - for (int i = 0; i < ML_TENSOR_SIZE_LIMIT; ++i) - g_free (data.tensors[i].tensor); + status = ml_util_destroy_tensors_data (data); + EXPECT_EQ (status, ML_ERROR_NONE); } /** @@ -1017,7 +1015,7 @@ TEST (nnstreamer_capi_singleshot, invoke_01) ml_single_h single; ml_tensors_info_h in_info, out_info; ml_tensors_info_h in_res, out_res; - ml_tensors_data_s *input, *output1, *output2; + ml_tensors_data_h input, output; ml_tensor_dimension in_dim, out_dim, res_dim; ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN; unsigned int count = 0; @@ -1104,36 +1102,19 @@ TEST (nnstreamer_capi_singleshot, invoke_01) EXPECT_TRUE (out_dim[2] == res_dim[2]); EXPECT_TRUE (out_dim[3] == res_dim[3]); - /* generate dummy data */ - input = ml_util_allocate_tensors_data (in_info); - EXPECT_TRUE (input != NULL); - - status = ml_util_get_last_error (); - EXPECT_EQ (status, ML_ERROR_NONE); - - output1 = ml_single_inference (single, input, NULL); - EXPECT_TRUE (output1 != NULL); - - status = ml_util_get_last_error (); - EXPECT_EQ (status, ML_ERROR_NONE); - - ml_util_free_tensors_data (&output1); - - output2 = ml_util_allocate_tensors_data (out_info); - EXPECT_TRUE (output2 != NULL); + input = output = NULL; - status = ml_util_get_last_error (); + /* generate dummy data */ + status = ml_util_allocate_tensors_data (in_info, &input); EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (input != NULL); - output1 = ml_single_inference (single, input, output2); - EXPECT_TRUE (output1 != NULL); - EXPECT_TRUE (output1 == output2); - - status = ml_util_get_last_error (); + status = ml_single_inference (single, input, &output); EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (output != NULL); - ml_util_free_tensors_data (&output2); - ml_util_free_tensors_data (&input); + ml_util_destroy_tensors_data (output); + ml_util_destroy_tensors_data (input); status = ml_single_close (single); EXPECT_EQ (status, ML_ERROR_NONE); @@ -1153,7 +1134,7 @@ TEST (nnstreamer_capi_singleshot, invoke_02) { ml_single_h single; ml_tensors_info_h in_info, out_info; - ml_tensors_data_s *input, *output1, *output2; + ml_tensors_data_h input, output; ml_tensor_dimension in_dim, out_dim; int status; @@ -1191,36 +1172,19 @@ TEST (nnstreamer_capi_singleshot, invoke_02) ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); EXPECT_EQ (status, ML_ERROR_NONE); - /* generate dummy data */ - input = ml_util_allocate_tensors_data (in_info); - EXPECT_TRUE (input != NULL); - - status = ml_util_get_last_error (); - EXPECT_EQ (status, ML_ERROR_NONE); - - output1 = ml_single_inference (single, input, NULL); - EXPECT_TRUE (output1 != NULL); - - status = ml_util_get_last_error (); - EXPECT_EQ (status, ML_ERROR_NONE); - - ml_util_free_tensors_data (&output1); + input = output = NULL; - output2 = ml_util_allocate_tensors_data (out_info); - EXPECT_TRUE (output2 != NULL); - - status = ml_util_get_last_error (); + /* generate dummy data */ + status = ml_util_allocate_tensors_data (in_info, &input); EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (input != NULL); - output1 = ml_single_inference (single, input, output2); - EXPECT_TRUE (output1 != NULL); - EXPECT_TRUE (output1 == output2); - - status = ml_util_get_last_error (); + status = ml_single_inference (single, input, &output); EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (output != NULL); - ml_util_free_tensors_data (&output2); - ml_util_free_tensors_data (&input); + ml_util_destroy_tensors_data (output); + ml_util_destroy_tensors_data (input); status = ml_single_close (single); EXPECT_EQ (status, ML_ERROR_NONE); @@ -1239,9 +1203,12 @@ TEST (nnstreamer_capi_singleshot, invoke_03) { ml_single_h single; ml_tensors_info_h in_info, out_info; - ml_tensors_data_s *input, *output1, *output2; + ml_tensors_data_h input, output; ml_tensor_dimension in_dim; - int i, status; + int status; + unsigned int i; + void *data_ptr; + size_t data_size; const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH"); gchar *test_model; @@ -1276,61 +1243,45 @@ TEST (nnstreamer_capi_singleshot, invoke_03) ML_NNFW_TYPE_CUSTOM_FILTER, ML_NNFW_HW_ANY); EXPECT_EQ (status, ML_ERROR_NONE); + input = output = NULL; + /* generate input data */ - input = ml_util_allocate_tensors_data (in_info); + status = ml_util_allocate_tensors_data (in_info, &input); + EXPECT_EQ (status, ML_ERROR_NONE); ASSERT_TRUE (input != NULL); - EXPECT_TRUE (input->num_tensors == 2U); for (i = 0; i < 10; i++) { int16_t i16 = (int16_t) (i + 1); float f32 = (float) (i + .1); - ((int16_t *) input->tensors[0].tensor)[i] = i16; - ((float *) input->tensors[1].tensor)[i] = f32; - } - - status = ml_util_get_last_error (); - EXPECT_EQ (status, ML_ERROR_NONE); - - output1 = ml_single_inference (single, input, NULL); - EXPECT_TRUE (output1 != NULL); - - status = ml_util_get_last_error (); - EXPECT_EQ (status, ML_ERROR_NONE); - - for (i = 0; i < 10; i++) { - int16_t i16 = (int16_t) (i + 1); - float f32 = (float) (i + .1); + status = ml_util_get_tensor_data (input, 0, &data_ptr, &data_size); + EXPECT_EQ (status, ML_ERROR_NONE); + ((int16_t *) data_ptr)[i] = i16; - EXPECT_EQ (((int16_t *) output1->tensors[0].tensor)[i], i16); - EXPECT_FLOAT_EQ (((float *) output1->tensors[1].tensor)[i], f32); + status = ml_util_get_tensor_data (input, 1, &data_ptr, &data_size); + EXPECT_EQ (status, ML_ERROR_NONE); + ((float *) data_ptr)[i] = f32; } - ml_util_free_tensors_data (&output1); - - output2 = ml_util_allocate_tensors_data (out_info); - EXPECT_TRUE (output2 != NULL); - - status = ml_util_get_last_error (); - EXPECT_EQ (status, ML_ERROR_NONE); - - output1 = ml_single_inference (single, input, output2); - EXPECT_TRUE (output1 != NULL); - EXPECT_TRUE (output1 == output2); - - status = ml_util_get_last_error (); + status = ml_single_inference (single, input, &output); EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (output != NULL); for (i = 0; i < 10; i++) { int16_t i16 = (int16_t) (i + 1); float f32 = (float) (i + .1); - EXPECT_EQ (((int16_t *) output1->tensors[0].tensor)[i], i16); - EXPECT_FLOAT_EQ (((float *) output1->tensors[1].tensor)[i], f32); + status = ml_util_get_tensor_data (output, 0, &data_ptr, &data_size); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (((int16_t *) data_ptr)[i], i16); + + status = ml_util_get_tensor_data (output, 1, &data_ptr, &data_size); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_FLOAT_EQ (((float *) data_ptr)[i], f32); } - ml_util_free_tensors_data (&output2); - ml_util_free_tensors_data (&input); + ml_util_destroy_tensors_data (output); + ml_util_destroy_tensors_data (input); status = ml_single_close (single); EXPECT_EQ (status, ML_ERROR_NONE); @@ -1350,13 +1301,15 @@ TEST (nnstreamer_capi_singleshot, invoke_04) ml_single_h single; ml_tensors_info_h in_info, out_info; ml_tensors_info_h in_res, out_res; - ml_tensors_data_s *input, *output; + ml_tensors_data_h input, output; ml_tensor_dimension in_dim, out_dim, res_dim; ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN; unsigned int count = 0; char *name = NULL; int status, max_score_index; float score, max_score; + void *data_ptr; + size_t data_size; const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH"); gchar *test_model, *test_file; @@ -1449,28 +1402,31 @@ TEST (nnstreamer_capi_singleshot, invoke_04) EXPECT_TRUE (out_dim[2] == res_dim[2]); EXPECT_TRUE (out_dim[3] == res_dim[3]); + input = output = NULL; + /* generate input data */ - input = ml_util_allocate_tensors_data (in_info); + status = ml_util_allocate_tensors_data (in_info, &input); + EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (input != NULL); - status = ml_util_get_last_error (); + status = ml_util_copy_tensor_data (input, 0, contents, len); EXPECT_EQ (status, ML_ERROR_NONE); - memcpy (input->tensors[0].tensor, contents, len); - - output = ml_single_inference (single, input, NULL); - EXPECT_TRUE (output != NULL); - - status = ml_util_get_last_error (); + status = ml_single_inference (single, input, &output); EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (output != NULL); /* check result (max score index is 2) */ - EXPECT_EQ (output->num_tensors, 1U); + status = ml_util_get_tensor_data (output, 1, &data_ptr, &data_size); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_util_get_tensor_data (output, 0, &data_ptr, &data_size); + EXPECT_EQ (status, ML_ERROR_NONE); max_score = .0; max_score_index = 0; for (gint i = 0; i < 12; i++) { - score = ((float *) output->tensors[0].tensor)[i]; + score = ((float *) data_ptr)[i]; if (score > max_score) { max_score = score; max_score_index = i; @@ -1479,8 +1435,8 @@ TEST (nnstreamer_capi_singleshot, invoke_04) EXPECT_EQ (max_score_index, 2); - ml_util_free_tensors_data (&output); - ml_util_free_tensors_data (&input); + ml_util_destroy_tensors_data (output); + ml_util_destroy_tensors_data (input); status = ml_single_close (single); EXPECT_EQ (status, ML_ERROR_NONE); -- 2.7.4