From: Jaeyun Date: Wed, 3 Jul 2019 11:06:54 +0000 (+0900) Subject: [C-Api] change function name X-Git-Tag: accepted/tizen/unified/20190717.115101~12 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2cca39a86fac015cb9ba2356543982bdfbf036bb;p=platform%2Fupstream%2Fnnstreamer.git [C-Api] change function name Change function name rule. (remove _util and add prefix _info/_data) Signed-off-by: Jaeyun Jung --- diff --git a/api/capi/include/nnstreamer-capi-private.h b/api/capi/include/nnstreamer-capi-private.h index 36e6b0b..cf615aa 100644 --- a/api/capi/include/nnstreamer-capi-private.h +++ b/api/capi/include/nnstreamer-capi-private.h @@ -204,7 +204,7 @@ typedef struct _ml_pipeline_valve { /** * @brief Gets the byte size of the given tensor info. */ -size_t ml_util_get_tensor_size (const ml_tensor_info_s *info); +size_t ml_tensor_info_get_size (const ml_tensor_info_s *info); /** * @brief Initializes the tensors information with default value. @@ -214,29 +214,29 @@ size_t ml_util_get_tensor_size (const ml_tensor_info_s *info); * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_initialize_tensors_info (ml_tensors_info_s *info); +int ml_tensors_info_initialize (ml_tensors_info_s *info); /** * @brief Frees the tensors info pointer. * @since_tizen 5.5 * @param[in] info The tensors info pointer to be freed. */ -void ml_util_free_tensors_info (ml_tensors_info_s *info); +void ml_tensors_info_free (ml_tensors_info_s *info); /** * @brief Copies tensor metadata from gst tensors info. */ -void ml_util_copy_tensors_info_from_gst (ml_tensors_info_s *ml_info, const GstTensorsInfo *gst_info); +void ml_tensors_info_copy_from_gst (ml_tensors_info_s *ml_info, const GstTensorsInfo *gst_info); /** * @brief Copies tensor metadata from ml tensors info. */ -void ml_util_copy_tensors_info_from_ml (GstTensorsInfo *gst_info, const ml_tensors_info_s *ml_info); +void ml_tensors_info_copy_from_ml (GstTensorsInfo *gst_info, const ml_tensors_info_s *ml_info); /** * @brief Gets caps from tensors info. */ -GstCaps *ml_util_get_caps_from_tensors_info (const ml_tensors_info_s *info); +GstCaps * ml_tensors_info_get_caps (const ml_tensors_info_s *info); #ifdef __cplusplus } diff --git a/api/capi/include/nnstreamer-single.h b/api/capi/include/nnstreamer-single.h index 0fe13db..6ebc92f 100644 --- a/api/capi/include/nnstreamer-single.h +++ b/api/capi/include/nnstreamer-single.h @@ -90,7 +90,7 @@ int ml_single_close (ml_single_h single); * @since_tizen 5.5 * @param[in] single The model handle to be inferred. * @param[in] input The input data to be inferred. - * @param[out] output The allocated output buffer. The caller is responsible for freeing the output buffer with ml_util_destroy_tensors_data(). + * @param[out] output The allocated output buffer. The caller is responsible for freeing the output buffer with ml_tensors_data_destroy(). * @return @c 0 on success. otherwise a negative error value. * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid. @@ -112,7 +112,7 @@ int ml_single_inference (ml_single_h single, const ml_tensors_data_h input, ml_t * Besides, names of tensors may be not available while dimensions and types are available. * @since_tizen 5.5 * @param[in] single The model handle. - * @param[out] info The handle of input tensors information. The caller is responsible for freeing the information with ml_util_destroy_tensors_info(). + * @param[out] info The handle of input tensors information. The caller is responsible for freeing the information with ml_tensors_info_destroy(). * @return @c 0 on success. otherwise a negative error value. * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid. @@ -125,7 +125,7 @@ int ml_single_get_input_info (ml_single_h single, ml_tensors_info_h *info); * Besides, names of tensors may be not available while dimensions and types are available. * @since_tizen 5.5 * @param[in] single The model handle. - * @param[out] info The handle of output tensors information. The caller is responsible for freeing the information with ml_util_destroy_tensors_info(). + * @param[out] info The handle of output tensors information. The caller is responsible for freeing the information with ml_tensors_info_destroy(). * @return @c 0 on success. otherwise a negative error value. * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid. diff --git a/api/capi/include/nnstreamer.h b/api/capi/include/nnstreamer.h index 5108e8e..0f287f0 100644 --- a/api/capi/include/nnstreamer.h +++ b/api/capi/include/nnstreamer.h @@ -213,7 +213,7 @@ typedef enum { * @since_tizen 5.5 * @remarks The @a data can be used only in the callback. To use outside, make a copy. * @remarks The @a info can be used only in the callback. To use outside, make a copy. - * @param[out] data The handle of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by ml_util_get_tensors_count() with the handle 'info'. Note that max num_tensors is 16 (#ML_TENSOR_SIZE_LIMIT). + * @param[out] data The handle of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by ml_tensors_info_get_count() with the handle 'info'. Note that max num_tensors is 16 (#ML_TENSOR_SIZE_LIMIT). * @param[out] info The handle of tensors information (cardinality, dimension, and type of given tensor/tensors). * @param[in,out] user_data User Application's Private Data. */ @@ -360,7 +360,7 @@ int ml_pipeline_src_input_data (ml_pipeline_src_h src_handle, ml_tensors_data_h /** * @brief Gets a handle for the tensors information of given src node. * @since_tizen 5.5 - * @remarks If the function succeeds, @a info handle must be released using ml_util_destroy_tensors_info(). + * @remarks If the function succeeds, @a info handle must be released using ml_tensors_info_destroy(). * @param[in] src_handle The source handle returned by ml_pipeline_src_get_handle(). * @param[out] info The handle of tensors information. * @return 0 on success. Otherwise a negative error value. @@ -456,14 +456,14 @@ int ml_pipeline_valve_set_open (ml_pipeline_valve_h valve_handle, bool open); ** NNStreamer Utilities ** ****************************************************/ /** - * @brief Allocates a tensors information handle with default value. + * @brief Creates a tensors information handle with default value. * @since_tizen 5.5 * @param[out] info The handle of tensors information. * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_allocate_tensors_info (ml_tensors_info_h *info); +int ml_tensors_info_create (ml_tensors_info_h *info); /** * @brief Frees the given handle of a tensors information. @@ -473,7 +473,7 @@ int ml_util_allocate_tensors_info (ml_tensors_info_h *info); * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_destroy_tensors_info (ml_tensors_info_h info); +int ml_tensors_info_destroy (ml_tensors_info_h info); /** * @brief Validates the given tensors information. @@ -485,7 +485,7 @@ int ml_util_destroy_tensors_info (ml_tensors_info_h info); * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_validate_tensors_info (const ml_tensors_info_h info, bool *valid); +int ml_tensors_info_validate (const ml_tensors_info_h info, bool *valid); /** * @brief Copies the tensors information. @@ -496,7 +496,7 @@ int ml_util_validate_tensors_info (const ml_tensors_info_h info, bool *valid); * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_copy_tensors_info (ml_tensors_info_h dest, const ml_tensors_info_h src); +int ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src); /** * @brief Sets the number of tensors with given handle of tensors information. @@ -507,7 +507,7 @@ int ml_util_copy_tensors_info (ml_tensors_info_h dest, const ml_tensors_info_h s * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_set_tensors_count (ml_tensors_info_h info, unsigned int count); +int ml_tensors_info_set_count (ml_tensors_info_h info, unsigned int count); /** * @brief Gets the number of tensors with given handle of tensors information. @@ -518,7 +518,7 @@ int ml_util_set_tensors_count (ml_tensors_info_h info, unsigned int count); * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_get_tensors_count (ml_tensors_info_h info, unsigned int *count); +int ml_tensors_info_get_count (ml_tensors_info_h info, unsigned int *count); /** * @brief Sets the tensor name with given handle of tensors information. @@ -530,7 +530,7 @@ int ml_util_get_tensors_count (ml_tensors_info_h info, unsigned int *count); * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_set_tensor_name (ml_tensors_info_h info, unsigned int index, const char *name); +int ml_tensors_info_set_tensor_name (ml_tensors_info_h info, unsigned int index, const char *name); /** * @brief Gets the tensor name with given handle of tensors information. @@ -542,7 +542,7 @@ int ml_util_set_tensor_name (ml_tensors_info_h info, unsigned int index, const c * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_get_tensor_name (ml_tensors_info_h info, unsigned int index, char **name); +int ml_tensors_info_get_tensor_name (ml_tensors_info_h info, unsigned int index, char **name); /** * @brief Sets the tensor type with given handle of tensors information. @@ -554,7 +554,7 @@ int ml_util_get_tensor_name (ml_tensors_info_h info, unsigned int index, char ** * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_set_tensor_type (ml_tensors_info_h info, unsigned int index, const ml_tensor_type_e type); +int ml_tensors_info_set_tensor_type (ml_tensors_info_h info, unsigned int index, const ml_tensor_type_e type); /** * @brief Gets the tensor type with given handle of tensors information. @@ -566,7 +566,7 @@ int ml_util_set_tensor_type (ml_tensors_info_h info, unsigned int index, const m * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_get_tensor_type (ml_tensors_info_h info, unsigned int index, ml_tensor_type_e *type); +int ml_tensors_info_get_tensor_type (ml_tensors_info_h info, unsigned int index, ml_tensor_type_e *type); /** * @brief Sets the tensor dimension with given handle of tensors information. @@ -578,7 +578,7 @@ int ml_util_get_tensor_type (ml_tensors_info_h info, unsigned int index, ml_tens * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_set_tensor_dimension (ml_tensors_info_h info, unsigned int index, const ml_tensor_dimension dimension); +int ml_tensors_info_set_tensor_dimension (ml_tensors_info_h info, unsigned int index, const ml_tensor_dimension dimension); /** * @brief Gets the tensor dimension with given handle of tensors information. @@ -590,7 +590,7 @@ int ml_util_set_tensor_dimension (ml_tensors_info_h info, unsigned int index, co * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_get_tensor_dimension (ml_tensors_info_h info, unsigned int index, ml_tensor_dimension dimension); +int ml_tensors_info_get_tensor_dimension (ml_tensors_info_h info, unsigned int index, ml_tensor_dimension dimension); /** * @brief Gets the byte size of the given tensors type. @@ -598,19 +598,19 @@ int ml_util_get_tensor_dimension (ml_tensors_info_h info, unsigned int index, ml * @param[in] info The tensors' information handle. * @return @c >= 0 on success with byte size. */ -size_t ml_util_get_tensors_size (const ml_tensors_info_h info); +size_t ml_tensors_info_get_size (const ml_tensors_info_h info); /** - * @brief Allocates a tensor data frame with the given tensors information. + * @brief Creates a tensor data frame with the given tensors information. * @since_tizen 5.5 * @param[in] info The handle of tensors information for the allocation. - * @param[out] data The handle of tensors data. The caller is responsible for freeing the allocated data with ml_util_destroy_tensors_data(). + * @param[out] data The handle of tensors data. The caller is responsible for freeing the allocated data with ml_tensors_data_destroy(). * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. * @retval #ML_ERROR_STREAMS_PIPE Failed to allocate new memory. */ -int ml_util_allocate_tensors_data (const ml_tensors_info_h info, ml_tensors_data_h *data); +int ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h *data); /** * @brief Frees the given tensors' data handle. @@ -620,7 +620,7 @@ int ml_util_allocate_tensors_data (const ml_tensors_info_h info, ml_tensors_data * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_destroy_tensors_data (ml_tensors_data_h data); +int ml_tensors_data_destroy (ml_tensors_data_h data); /** * @brief Gets a tensor data of given handle. @@ -633,7 +633,7 @@ int ml_util_destroy_tensors_data (ml_tensors_data_h data); * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_get_tensor_data (ml_tensors_data_h data, unsigned int index, void **raw_data, size_t *data_size); +int ml_tensors_data_get_tensor_data (ml_tensors_data_h data, unsigned int index, void **raw_data, size_t *data_size); /** * @brief Copies a tensor data to given handle. @@ -646,7 +646,7 @@ int ml_util_get_tensor_data (ml_tensors_data_h data, unsigned int index, void ** * @retval #ML_ERROR_NONE Successful * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_copy_tensor_data (ml_tensors_data_h data, unsigned int index, const void *raw_data, const size_t data_size); +int ml_tensors_data_set_tensor_data (ml_tensors_data_h data, unsigned int index, const void *raw_data, const size_t data_size); /** * @brief Checks the availability of the given execution environments. @@ -661,7 +661,7 @@ int ml_util_copy_tensor_data (ml_tensors_data_h data, unsigned int index, const * @retval #ML_ERROR_NONE Successful and the environments are available. * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -int ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool *available); +int ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool *available); /** * @} diff --git a/api/capi/src/nnstreamer-capi-pipeline.c b/api/capi/src/nnstreamer-capi-pipeline.c index cff4658..62a073f 100644 --- a/api/capi/src/nnstreamer-capi-pipeline.c +++ b/api/capi/src/nnstreamer-capi-pipeline.c @@ -81,7 +81,7 @@ construct_element (GstElement * e, ml_pipeline * p, const char *name, ret->handles = NULL; ret->src = NULL; ret->sink = NULL; - ml_util_initialize_tensors_info (&ret->tensors_info); + ml_tensors_info_initialize (&ret->tensors_info); ret->size = 0; ret->maxid = 0; ret->handle_id = 0; @@ -100,7 +100,7 @@ get_tensors_info_from_caps (GstCaps * caps, ml_tensors_info_s * info) guint i, n_caps; gboolean found = FALSE; - ml_util_initialize_tensors_info (info); + ml_tensors_info_initialize (info); n_caps = gst_caps_get_size (caps); for (i = 0; i < n_caps; i++) { @@ -108,7 +108,7 @@ get_tensors_info_from_caps (GstCaps * caps, ml_tensors_info_s * info) found = gst_tensors_config_from_structure (&config, s); if (found) { - ml_util_copy_tensors_info_from_gst (info, &config.info); + ml_tensors_info_copy_from_gst (info, &config.info); break; } } @@ -188,7 +188,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) } for (i = 0; i < elem->tensors_info.num_tensors; i++) { - size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]); + size_t sz = ml_tensor_info_get_size (&elem->tensors_info.info[i]); if (sz != data->tensors[i].size) { ml_loge @@ -289,7 +289,7 @@ cleanup_node (gpointer data) g_list_free_full (e->handles, g_free); e->handles = NULL; - ml_util_free_tensors_info (&e->tensors_info); + ml_tensors_info_free (&e->tensors_info); g_mutex_unlock (&e->lock); g_mutex_clear (&e->lock); @@ -732,7 +732,7 @@ ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem) if (found) { for (i = 0; i < elem->tensors_info.num_tensors; i++) { - sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]); + sz = ml_tensor_info_get_size (&elem->tensors_info.info[i]); elem->size += sz; } } else { @@ -879,7 +879,7 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, } for (i = 0; i < elem->tensors_info.num_tensors; i++) { - size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]); + size_t sz = ml_tensor_info_get_size (&elem->tensors_info.info[i]); if (sz != _data->tensors[i].size) { ml_loge @@ -933,8 +933,8 @@ ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, ret = ml_pipeline_src_parse_tensors_info (elem); if (ret == ML_ERROR_NONE) { - ml_util_allocate_tensors_info (info); - ml_util_copy_tensors_info (*info, &elem->tensors_info); + ml_tensors_info_create (info); + ml_tensors_info_clone (*info, &elem->tensors_info); } handle_exit (h); diff --git a/api/capi/src/nnstreamer-capi-single.c b/api/capi/src/nnstreamer-capi-single.c index ba0c5ed..2de9f14 100644 --- a/api/capi/src/nnstreamer-capi-single.c +++ b/api/capi/src/nnstreamer-capi-single.c @@ -78,7 +78,7 @@ ml_single_open (ml_single_h * single, const char *model, if (input_info) { /* Validate input tensor info. */ - if (ml_util_validate_tensors_info (input_info, &valid) != ML_ERROR_NONE || + if (ml_tensors_info_validate (input_info, &valid) != ML_ERROR_NONE || valid == false) { ml_loge ("The given param, input tensor info is invalid."); return ML_ERROR_INVALID_PARAMETER; @@ -87,7 +87,7 @@ ml_single_open (ml_single_h * single, const char *model, if (output_info) { /* Validate output tensor info. */ - if (ml_util_validate_tensors_info (output_info, &valid) != ML_ERROR_NONE || + if (ml_tensors_info_validate (output_info, &valid) != ML_ERROR_NONE || valid == false) { ml_loge ("The given param, output tensor info is invalid."); return ML_ERROR_INVALID_PARAMETER; @@ -145,7 +145,7 @@ ml_single_open (ml_single_h * single, const char *model, /* 2. Determine hw */ /** @todo Now the param hw is ignored. (Supposed CPU only) Support others later. */ - status = ml_util_check_nnfw_availability (nnfw, hw, &available); + status = ml_check_nnfw_availability (nnfw, hw, &available); if (status != ML_ERROR_NONE || !available) { ml_loge ("The given nnfw is not available."); return status; @@ -173,8 +173,8 @@ ml_single_open (ml_single_h * single, const char *model, gchar *str_dim, *str_type, *str_name; gchar *in_option, *out_option; - ml_util_copy_tensors_info_from_ml (&in_info, in_tensors_info); - ml_util_copy_tensors_info_from_ml (&out_info, out_tensors_info); + ml_tensors_info_copy_from_ml (&in_info, in_tensors_info); + ml_tensors_info_copy_from_ml (&out_info, out_tensors_info); /* Set input option */ str_dim = gst_tensors_info_get_dimensions_string (&in_info); @@ -236,49 +236,49 @@ ml_single_open (ml_single_h * single, const char *model, single_h->src = appsrc; single_h->sink = appsink; single_h->filter = filter; - ml_util_initialize_tensors_info (&single_h->in_info); - ml_util_initialize_tensors_info (&single_h->out_info); + ml_tensors_info_initialize (&single_h->in_info); + ml_tensors_info_initialize (&single_h->out_info); /* 5. Set in/out caps and metadata */ if (in_tensors_info) { - caps = ml_util_get_caps_from_tensors_info (in_tensors_info); - ml_util_copy_tensors_info (&single_h->in_info, in_tensors_info); + caps = ml_tensors_info_get_caps (in_tensors_info); + ml_tensors_info_clone (&single_h->in_info, in_tensors_info); } else { ml_tensors_info_h in_info; ml_single_get_input_info (single_h, &in_info); - ml_util_copy_tensors_info (&single_h->in_info, in_info); - ml_util_destroy_tensors_info (in_info); + ml_tensors_info_clone (&single_h->in_info, in_info); + ml_tensors_info_destroy (in_info); - status = ml_util_validate_tensors_info (&single_h->in_info, &valid); + status = ml_tensors_info_validate (&single_h->in_info, &valid); if (status != ML_ERROR_NONE || valid == false) { ml_loge ("Failed to get the input tensor info."); goto error; } - caps = ml_util_get_caps_from_tensors_info (&single_h->in_info); + caps = ml_tensors_info_get_caps (&single_h->in_info); } gst_app_src_set_caps (GST_APP_SRC (appsrc), caps); gst_caps_unref (caps); if (out_tensors_info) { - caps = ml_util_get_caps_from_tensors_info (out_tensors_info); - ml_util_copy_tensors_info (&single_h->out_info, out_tensors_info); + caps = ml_tensors_info_get_caps (out_tensors_info); + ml_tensors_info_clone (&single_h->out_info, out_tensors_info); } else { ml_tensors_info_h out_info; ml_single_get_output_info (single_h, &out_info); - ml_util_copy_tensors_info (&single_h->out_info, out_info); - ml_util_destroy_tensors_info (out_info); + ml_tensors_info_clone (&single_h->out_info, out_info); + ml_tensors_info_destroy (out_info); - status = ml_util_validate_tensors_info (&single_h->out_info, &valid); + status = ml_tensors_info_validate (&single_h->out_info, &valid); if (status != ML_ERROR_NONE || valid == false) { ml_loge ("Failed to get the output tensor info."); goto error; } - caps = ml_util_get_caps_from_tensors_info (&single_h->out_info); + caps = ml_tensors_info_get_caps (&single_h->out_info); } gst_app_sink_set_caps (GST_APP_SINK (appsink), caps); @@ -330,8 +330,8 @@ ml_single_close (ml_single_h single) single_h->filter = NULL; } - ml_util_free_tensors_info (&single_h->in_info); - ml_util_free_tensors_info (&single_h->out_info); + ml_tensors_info_free (&single_h->in_info); + ml_tensors_info_free (&single_h->out_info); status = ml_pipeline_destroy (single_h->pipe); g_free (single_h); @@ -363,7 +363,7 @@ ml_single_inference (ml_single_h single, in_data = (ml_tensors_data_s *) input; /* Allocate output buffer */ - status = ml_util_allocate_tensors_data (&single_h->out_info, output); + status = ml_tensors_data_create (&single_h->out_info, output); if (status != ML_ERROR_NONE) { ml_loge ("Failed to allocate the memory block."); *output = NULL; @@ -435,7 +435,7 @@ ml_single_get_input_info (ml_single_h single, ml_tensors_info_h * info) single_h = (ml_single *) single; /* allocate handle for tensors info */ - ml_util_allocate_tensors_info (info); + ml_tensors_info_create (info); input_info = (ml_tensors_info_s *) (*info); gst_tensors_info_init (&gst_info); @@ -463,7 +463,7 @@ ml_single_get_input_info (ml_single_h single, ml_tensors_info_h * info) ml_logw ("Invalid state, input tensor name is mismatched in filter."); } - ml_util_copy_tensors_info_from_gst (input_info, &gst_info); + ml_tensors_info_copy_from_gst (input_info, &gst_info); gst_tensors_info_free (&gst_info); return ML_ERROR_NONE; } @@ -486,7 +486,7 @@ ml_single_get_output_info (ml_single_h single, ml_tensors_info_h * info) single_h = (ml_single *) single; /* allocate handle for tensors info */ - ml_util_allocate_tensors_info (info); + ml_tensors_info_create (info); output_info = (ml_tensors_info_s *) (*info); gst_tensors_info_init (&gst_info); @@ -514,7 +514,7 @@ ml_single_get_output_info (ml_single_h single, ml_tensors_info_h * info) ml_logw ("Invalid state, output tensor name is mismatched in filter."); } - ml_util_copy_tensors_info_from_gst (output_info, &gst_info); + ml_tensors_info_copy_from_gst (output_info, &gst_info); gst_tensors_info_free (&gst_info); return ML_ERROR_NONE; } diff --git a/api/capi/src/nnstreamer-capi-util.c b/api/capi/src/nnstreamer-capi-util.c index 8f8e361..4a93312 100644 --- a/api/capi/src/nnstreamer-capi-util.c +++ b/api/capi/src/nnstreamer-capi-util.c @@ -31,7 +31,7 @@ * @brief Allocates a tensors information handle with default value. */ int -ml_util_allocate_tensors_info (ml_tensors_info_h * info) +ml_tensors_info_create (ml_tensors_info_h * info) { ml_tensors_info_s *tensors_info; @@ -39,7 +39,7 @@ ml_util_allocate_tensors_info (ml_tensors_info_h * info) return ML_ERROR_INVALID_PARAMETER; *info = tensors_info = g_new0 (ml_tensors_info_s, 1); - ml_util_initialize_tensors_info (tensors_info); + ml_tensors_info_initialize (tensors_info); return ML_ERROR_NONE; } @@ -48,7 +48,7 @@ ml_util_allocate_tensors_info (ml_tensors_info_h * info) * @brief Frees the given handle of a tensors information. */ int -ml_util_destroy_tensors_info (ml_tensors_info_h info) +ml_tensors_info_destroy (ml_tensors_info_h info) { ml_tensors_info_s *tensors_info; @@ -57,7 +57,7 @@ ml_util_destroy_tensors_info (ml_tensors_info_h info) if (!tensors_info) return ML_ERROR_INVALID_PARAMETER; - ml_util_free_tensors_info (tensors_info); + ml_tensors_info_free (tensors_info); g_free (tensors_info); return ML_ERROR_NONE; @@ -67,7 +67,7 @@ ml_util_destroy_tensors_info (ml_tensors_info_h info) * @brief Initializes the tensors information with default value. */ int -ml_util_initialize_tensors_info (ml_tensors_info_s * info) +ml_tensors_info_initialize (ml_tensors_info_s * info) { guint i, j; @@ -92,7 +92,7 @@ ml_util_initialize_tensors_info (ml_tensors_info_s * info) * @brief Validates the given tensor info is valid. */ static int -ml_util_validate_tensor_info (const ml_tensor_info_s * info) +ml_tensor_info_validate (const ml_tensor_info_s * info) { guint i; @@ -114,7 +114,7 @@ ml_util_validate_tensor_info (const ml_tensor_info_s * info) * @brief Validates the given tensors info is valid. */ int -ml_util_validate_tensors_info (const ml_tensors_info_h info, bool * valid) +ml_tensors_info_validate (const ml_tensors_info_h info, bool * valid) { ml_tensors_info_s *tensors_info; guint i; @@ -132,7 +132,7 @@ ml_util_validate_tensors_info (const ml_tensors_info_h info, bool * valid) for (i = 0; i < tensors_info->num_tensors; i++) { /* Failed if returned value is not 0 (ML_ERROR_NONE) */ - if (ml_util_validate_tensor_info (&tensors_info->info[i]) != ML_ERROR_NONE) + if (ml_tensor_info_validate (&tensors_info->info[i]) != ML_ERROR_NONE) goto done; } @@ -146,7 +146,7 @@ done: * @brief Sets the number of tensors with given handle of tensors information. */ int -ml_util_set_tensors_count (ml_tensors_info_h info, unsigned int count) +ml_tensors_info_set_count (ml_tensors_info_h info, unsigned int count) { ml_tensors_info_s *tensors_info; @@ -163,7 +163,7 @@ ml_util_set_tensors_count (ml_tensors_info_h info, unsigned int count) * @brief Gets the number of tensors with given handle of tensors information. */ int -ml_util_get_tensors_count (ml_tensors_info_h info, unsigned int *count) +ml_tensors_info_get_count (ml_tensors_info_h info, unsigned int *count) { ml_tensors_info_s *tensors_info; @@ -180,7 +180,7 @@ ml_util_get_tensors_count (ml_tensors_info_h info, unsigned int *count) * @brief Sets the tensor name with given handle of tensors information. */ int -ml_util_set_tensor_name (ml_tensors_info_h info, +ml_tensors_info_set_tensor_name (ml_tensors_info_h info, unsigned int index, const char *name) { ml_tensors_info_s *tensors_info; @@ -208,7 +208,7 @@ ml_util_set_tensor_name (ml_tensors_info_h info, * @brief Gets the tensor name with given handle of tensors information. */ int -ml_util_get_tensor_name (ml_tensors_info_h info, +ml_tensors_info_get_tensor_name (ml_tensors_info_h info, unsigned int index, char **name) { ml_tensors_info_s *tensors_info; @@ -230,7 +230,7 @@ ml_util_get_tensor_name (ml_tensors_info_h info, * @brief Sets the tensor type with given handle of tensors information. */ int -ml_util_set_tensor_type (ml_tensors_info_h info, +ml_tensors_info_set_tensor_type (ml_tensors_info_h info, unsigned int index, const ml_tensor_type_e type) { ml_tensors_info_s *tensors_info; @@ -252,7 +252,7 @@ ml_util_set_tensor_type (ml_tensors_info_h info, * @brief Gets the tensor type with given handle of tensors information. */ int -ml_util_get_tensor_type (ml_tensors_info_h info, +ml_tensors_info_get_tensor_type (ml_tensors_info_h info, unsigned int index, ml_tensor_type_e * type) { ml_tensors_info_s *tensors_info; @@ -274,7 +274,7 @@ ml_util_get_tensor_type (ml_tensors_info_h info, * @brief Sets the tensor dimension with given handle of tensors information. */ int -ml_util_set_tensor_dimension (ml_tensors_info_h info, +ml_tensors_info_set_tensor_dimension (ml_tensors_info_h info, unsigned int index, const ml_tensor_dimension dimension) { ml_tensors_info_s *tensors_info; @@ -299,7 +299,7 @@ ml_util_set_tensor_dimension (ml_tensors_info_h info, * @brief Gets the tensor dimension with given handle of tensors information. */ int -ml_util_get_tensor_dimension (ml_tensors_info_h info, +ml_tensors_info_get_tensor_dimension (ml_tensors_info_h info, unsigned int index, ml_tensor_dimension dimension) { ml_tensors_info_s *tensors_info; @@ -324,7 +324,7 @@ ml_util_get_tensor_dimension (ml_tensors_info_h info, * @brief Gets the byte size of the given tensor info. */ size_t -ml_util_get_tensor_size (const ml_tensor_info_s * info) +ml_tensor_info_get_size (const ml_tensor_info_s * info) { size_t tensor_size; gint i; @@ -367,7 +367,7 @@ ml_util_get_tensor_size (const ml_tensor_info_s * info) * @brief Gets the byte size of the given tensors info. */ size_t -ml_util_get_tensors_size (const ml_tensors_info_h info) +ml_tensors_info_get_size (const ml_tensors_info_h info) { ml_tensors_info_s *tensors_info; size_t tensor_size; @@ -380,7 +380,7 @@ ml_util_get_tensors_size (const ml_tensors_info_h info) tensor_size = 0; for (i = 0; i < tensors_info->num_tensors; i++) { - tensor_size += ml_util_get_tensor_size (&tensors_info->info[i]); + tensor_size += ml_tensor_info_get_size (&tensors_info->info[i]); } return tensor_size; @@ -390,7 +390,7 @@ ml_util_get_tensors_size (const ml_tensors_info_h info) * @brief Frees the tensors info pointer. */ void -ml_util_free_tensors_info (ml_tensors_info_s * info) +ml_tensors_info_free (ml_tensors_info_s * info) { gint i; @@ -404,14 +404,14 @@ ml_util_free_tensors_info (ml_tensors_info_s * info) } } - ml_util_initialize_tensors_info (info); + ml_tensors_info_initialize (info); } /** * @brief Frees the tensors data pointer. */ int -ml_util_destroy_tensors_data (ml_tensors_data_h data) +ml_tensors_data_destroy (ml_tensors_data_h data) { ml_tensors_data_s *_data; guint i; @@ -436,7 +436,7 @@ ml_util_destroy_tensors_data (ml_tensors_data_h data) * @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h) */ int -ml_util_allocate_tensors_data (const ml_tensors_info_h info, +ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h * data) { ml_tensors_data_s *_data; @@ -457,7 +457,7 @@ ml_util_allocate_tensors_data (const ml_tensors_info_h info, _data->num_tensors = tensors_info->num_tensors; for (i = 0; i < _data->num_tensors; i++) { - _data->tensors[i].size = ml_util_get_tensor_size (&tensors_info->info[i]); + _data->tensors[i].size = ml_tensor_info_get_size (&tensors_info->info[i]); _data->tensors[i].tensor = g_malloc0 (_data->tensors[i].size); if (_data->tensors[i].tensor == NULL) goto failed; @@ -481,7 +481,7 @@ failed: * @brief Gets a tensor data of given handle. */ int -ml_util_get_tensor_data (ml_tensors_data_h data, unsigned int index, +ml_tensors_data_get_tensor_data (ml_tensors_data_h data, unsigned int index, void **raw_data, size_t * data_size) { ml_tensors_data_s *_data; @@ -504,7 +504,7 @@ ml_util_get_tensor_data (ml_tensors_data_h data, unsigned int index, * @brief Copies a tensor data to given handle. */ int -ml_util_copy_tensor_data (ml_tensors_data_h data, unsigned int index, +ml_tensors_data_set_tensor_data (ml_tensors_data_h data, unsigned int index, const void *raw_data, const size_t data_size) { ml_tensors_data_s *_data; @@ -528,7 +528,7 @@ ml_util_copy_tensor_data (ml_tensors_data_h data, unsigned int index, * @brief Copies tensor meta info. */ int -ml_util_copy_tensors_info (ml_tensors_info_h dest, const ml_tensors_info_h src) +ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src) { ml_tensors_info_s *dest_info, *src_info; guint i, j; @@ -539,7 +539,7 @@ ml_util_copy_tensors_info (ml_tensors_info_h dest, const ml_tensors_info_h src) if (!dest_info || !src_info) return ML_ERROR_INVALID_PARAMETER; - ml_util_initialize_tensors_info (dest_info); + ml_tensors_info_initialize (dest_info); dest_info->num_tensors = src_info->num_tensors; @@ -559,7 +559,7 @@ ml_util_copy_tensors_info (ml_tensors_info_h dest, const ml_tensors_info_h src) * @brief Copies tensor meta info from gst tensors info. */ void -ml_util_copy_tensors_info_from_gst (ml_tensors_info_s * ml_info, +ml_tensors_info_copy_from_gst (ml_tensors_info_s * ml_info, const GstTensorsInfo * gst_info) { guint i, j; @@ -568,7 +568,7 @@ ml_util_copy_tensors_info_from_gst (ml_tensors_info_s * ml_info, if (!ml_info || !gst_info) return; - ml_util_initialize_tensors_info (ml_info); + ml_tensors_info_initialize (ml_info); max_dim = MIN (ML_TENSOR_RANK_LIMIT, NNS_TENSOR_RANK_LIMIT); ml_info->num_tensors = gst_info->num_tensors; @@ -631,7 +631,7 @@ ml_util_copy_tensors_info_from_gst (ml_tensors_info_s * ml_info, * @brief Copies tensor meta info from gst tensors info. */ void -ml_util_copy_tensors_info_from_ml (GstTensorsInfo * gst_info, +ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info, const ml_tensors_info_s * ml_info) { guint i, j; @@ -703,7 +703,7 @@ ml_util_copy_tensors_info_from_ml (GstTensorsInfo * gst_info, * @brief Gets caps from tensors info. */ GstCaps * -ml_util_get_caps_from_tensors_info (const ml_tensors_info_s * info) +ml_tensors_info_get_caps (const ml_tensors_info_s * info) { GstCaps *caps; GstTensorsConfig config; @@ -711,7 +711,7 @@ ml_util_get_caps_from_tensors_info (const ml_tensors_info_s * info) if (!info) return NULL; - ml_util_copy_tensors_info_from_ml (&config.info, info); + ml_tensors_info_copy_from_ml (&config.info, info); /* set framerate 0/1 */ config.rate_n = 0; @@ -739,7 +739,7 @@ ml_util_get_caps_from_tensors_info (const ml_tensors_info_s * info) * @brief Checks the availability of the given execution environments. */ int -ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, +ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool * available) { if (!available) diff --git a/tests/tizen_capi/unittest_tizen_capi.cpp b/tests/tizen_capi/unittest_tizen_capi.cpp index 7c384b5..4a60c3c 100644 --- a/tests/tizen_capi/unittest_tizen_capi.cpp +++ b/tests/tizen_capi/unittest_tizen_capi.cpp @@ -295,10 +295,10 @@ test_sink_callback_dm01 (const ml_tensors_data_h data, if (fp == NULL) return; - ml_util_get_tensors_count (info, &num); + ml_tensors_info_get_count (info, &num); for (i = 0; i < num; i++) { - status = ml_util_get_tensor_data (data, i, &data_ptr, &data_size); + status = ml_tensors_data_get_tensor_data (data, i, &data_ptr, &data_size); if (status == ML_ERROR_NONE) fwrite (data_ptr, data_size, 1, fp); } @@ -592,24 +592,24 @@ TEST (nnstreamer_capi_src, dummy_01) status = ml_pipeline_src_get_tensors_info (srchandle, &info); EXPECT_EQ (status, ML_ERROR_NONE); - ml_util_get_tensors_count (info, &count); + ml_tensors_info_get_count (info, &count); EXPECT_EQ (count, 1U); - ml_util_get_tensor_type (info, 0, &type); + ml_tensors_info_get_tensor_type (info, 0, &type); EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8); - ml_util_get_tensor_dimension (info, 0, dim); + ml_tensors_info_get_tensor_dimension (info, 0, dim); EXPECT_EQ (dim[0], 4U); EXPECT_EQ (dim[1], 1U); EXPECT_EQ (dim[2], 1U); EXPECT_EQ (dim[3], 1U); - status = ml_util_allocate_tensors_data (info, &data1); + status = ml_tensors_data_create (info, &data1); EXPECT_EQ (status, ML_ERROR_NONE); - ml_util_destroy_tensors_info (info); + ml_tensors_info_destroy (info); - status = ml_util_copy_tensor_data (data1, 0, uintarray1[0], 4); + status = ml_tensors_data_set_tensor_data (data1, 0, uintarray1[0], 4); EXPECT_EQ (status, ML_ERROR_NONE); status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); @@ -629,29 +629,29 @@ TEST (nnstreamer_capi_src, dummy_01) status = ml_pipeline_src_get_tensors_info (srchandle, &info); EXPECT_EQ (status, ML_ERROR_NONE); - ml_util_get_tensors_count (info, &count); + ml_tensors_info_get_count (info, &count); EXPECT_EQ (count, 1U); - ml_util_get_tensor_type (info, 0, &type); + ml_tensors_info_get_tensor_type (info, 0, &type); EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8); - ml_util_get_tensor_dimension (info, 0, dim); + ml_tensors_info_get_tensor_dimension (info, 0, dim); EXPECT_EQ (dim[0], 4U); EXPECT_EQ (dim[1], 1U); EXPECT_EQ (dim[2], 1U); EXPECT_EQ (dim[3], 1U); for (i = 0; i < 10; i++) { - status = ml_util_copy_tensor_data (data1, 0, uintarray1[i], 4); + status = ml_tensors_data_set_tensor_data (data1, 0, uintarray1[i], 4); EXPECT_EQ (status, ML_ERROR_NONE); status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); EXPECT_EQ (status, ML_ERROR_NONE); - status = ml_util_allocate_tensors_data (info, &data2); + status = ml_tensors_data_create (info, &data2); EXPECT_EQ (status, ML_ERROR_NONE); - status = ml_util_copy_tensor_data (data2, 0, uintarray2[i], 4); + status = ml_tensors_data_set_tensor_data (data2, 0, uintarray2[i], 4); EXPECT_EQ (status, ML_ERROR_NONE); status = ml_pipeline_src_input_data (srchandle, data2, ML_PIPELINE_BUF_POLICY_AUTO_FREE); @@ -685,8 +685,8 @@ TEST (nnstreamer_capi_src, dummy_01) } g_free (content); - ml_util_destroy_tensors_info (info); - ml_util_destroy_tensors_data (data1); + ml_tensors_info_destroy (info); + ml_tensors_data_destroy (data1); } /** @@ -763,7 +763,7 @@ TEST (nnstreamer_capi_src, failure_03) status = ml_pipeline_src_get_tensors_info (srchandle, &info); EXPECT_EQ (status, ML_ERROR_NONE); - status = ml_util_allocate_tensors_data (info, &data); + status = ml_tensors_data_create (info, &data); /* null data */ status = ml_pipeline_src_input_data (srchandle, NULL, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE); @@ -778,7 +778,7 @@ TEST (nnstreamer_capi_src, failure_03) status = ml_pipeline_destroy (handle); EXPECT_EQ (status, ML_ERROR_NONE); - status = ml_util_destroy_tensors_data (data); + status = ml_tensors_data_destroy (data); EXPECT_EQ (status, ML_ERROR_NONE); } @@ -1033,26 +1033,26 @@ TEST (nnstreamer_capi_singleshot, invoke_01) "mobilenet_v1_1.0_224_quant.tflite", NULL); ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS)); - ml_util_allocate_tensors_info (&in_info); - ml_util_allocate_tensors_info (&out_info); - ml_util_allocate_tensors_info (&in_res); - ml_util_allocate_tensors_info (&out_res); + ml_tensors_info_create (&in_info); + ml_tensors_info_create (&out_info); + ml_tensors_info_create (&in_res); + ml_tensors_info_create (&out_res); in_dim[0] = 3; in_dim[1] = 224; in_dim[2] = 224; in_dim[3] = 1; - ml_util_set_tensors_count (in_info, 1); - ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); - ml_util_set_tensor_dimension (in_info, 0, in_dim); + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); out_dim[0] = 1001; out_dim[1] = 1; out_dim[2] = 1; out_dim[3] = 1; - ml_util_set_tensors_count (out_info, 1); - ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); - ml_util_set_tensor_dimension (out_info, 0, out_dim); + ml_tensors_info_set_count (out_info, 1); + ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim); status = ml_single_open (&single, test_model, in_info, out_info, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); @@ -1062,19 +1062,19 @@ TEST (nnstreamer_capi_singleshot, invoke_01) status = ml_single_get_input_info (single, &in_res); EXPECT_EQ (status, ML_ERROR_NONE); - status = ml_util_get_tensors_count (in_res, &count); + status = ml_tensors_info_get_count (in_res, &count); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_EQ (count, 1U); - status = ml_util_get_tensor_name (in_res, 0, &name); + status = ml_tensors_info_get_tensor_name (in_res, 0, &name); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (name == NULL); - status = ml_util_get_tensor_type (in_res, 0, &type); + status = ml_tensors_info_get_tensor_type (in_res, 0, &type); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8); - ml_util_get_tensor_dimension (in_res, 0, res_dim); + ml_tensors_info_get_tensor_dimension (in_res, 0, res_dim); EXPECT_TRUE (in_dim[0] == res_dim[0]); EXPECT_TRUE (in_dim[1] == res_dim[1]); EXPECT_TRUE (in_dim[2] == res_dim[2]); @@ -1084,19 +1084,19 @@ TEST (nnstreamer_capi_singleshot, invoke_01) status = ml_single_get_output_info (single, &out_res); EXPECT_EQ (status, ML_ERROR_NONE); - status = ml_util_get_tensors_count (out_res, &count); + status = ml_tensors_info_get_count (out_res, &count); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_EQ (count, 1U); - status = ml_util_get_tensor_name (out_res, 0, &name); + status = ml_tensors_info_get_tensor_name (out_res, 0, &name); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (name == NULL); - status = ml_util_get_tensor_type (out_res, 0, &type); + status = ml_tensors_info_get_tensor_type (out_res, 0, &type); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8); - ml_util_get_tensor_dimension (out_res, 0, res_dim); + ml_tensors_info_get_tensor_dimension (out_res, 0, res_dim); EXPECT_TRUE (out_dim[0] == res_dim[0]); EXPECT_TRUE (out_dim[1] == res_dim[1]); EXPECT_TRUE (out_dim[2] == res_dim[2]); @@ -1105,7 +1105,7 @@ TEST (nnstreamer_capi_singleshot, invoke_01) input = output = NULL; /* generate dummy data */ - status = ml_util_allocate_tensors_data (in_info, &input); + status = ml_tensors_data_create (in_info, &input); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (input != NULL); @@ -1113,17 +1113,17 @@ TEST (nnstreamer_capi_singleshot, invoke_01) EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (output != NULL); - ml_util_destroy_tensors_data (output); - ml_util_destroy_tensors_data (input); + ml_tensors_data_destroy (output); + ml_tensors_data_destroy (input); status = ml_single_close (single); EXPECT_EQ (status, ML_ERROR_NONE); g_free (test_model); - ml_util_destroy_tensors_info (in_info); - ml_util_destroy_tensors_info (out_info); - ml_util_destroy_tensors_info (in_res); - ml_util_destroy_tensors_info (out_res); + ml_tensors_info_destroy (in_info); + ml_tensors_info_destroy (out_info); + ml_tensors_info_destroy (in_res); + ml_tensors_info_destroy (out_res); } /** @@ -1149,24 +1149,24 @@ TEST (nnstreamer_capi_singleshot, invoke_02) "mobilenet_v1_1.0_224_quant.tflite", NULL); ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS)); - ml_util_allocate_tensors_info (&in_info); - ml_util_allocate_tensors_info (&out_info); + ml_tensors_info_create (&in_info); + ml_tensors_info_create (&out_info); in_dim[0] = 3; in_dim[1] = 224; in_dim[2] = 224; in_dim[3] = 1; - ml_util_set_tensors_count (in_info, 1); - ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); - ml_util_set_tensor_dimension (in_info, 0, in_dim); + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); out_dim[0] = 1001; out_dim[1] = 1; out_dim[2] = 1; out_dim[3] = 1; - ml_util_set_tensors_count (out_info, 1); - ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); - ml_util_set_tensor_dimension (out_info, 0, out_dim); + ml_tensors_info_set_count (out_info, 1); + ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim); status = ml_single_open (&single, test_model, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); @@ -1175,7 +1175,7 @@ TEST (nnstreamer_capi_singleshot, invoke_02) input = output = NULL; /* generate dummy data */ - status = ml_util_allocate_tensors_data (in_info, &input); + status = ml_tensors_data_create (in_info, &input); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (input != NULL); @@ -1183,15 +1183,15 @@ TEST (nnstreamer_capi_singleshot, invoke_02) EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (output != NULL); - ml_util_destroy_tensors_data (output); - ml_util_destroy_tensors_data (input); + ml_tensors_data_destroy (output); + ml_tensors_data_destroy (input); status = ml_single_close (single); EXPECT_EQ (status, ML_ERROR_NONE); g_free (test_model); - ml_util_destroy_tensors_info (in_info); - ml_util_destroy_tensors_info (out_info); + ml_tensors_info_destroy (in_info); + ml_tensors_info_destroy (out_info); } #endif /* ENABLE_TENSORFLOW_LITE */ @@ -1221,23 +1221,23 @@ TEST (nnstreamer_capi_singleshot, invoke_03) "libnnstreamer_customfilter_passthrough_variable.so", NULL); ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS)); - ml_util_allocate_tensors_info (&in_info); - ml_util_allocate_tensors_info (&out_info); + ml_tensors_info_create (&in_info); + ml_tensors_info_create (&out_info); - ml_util_set_tensors_count (in_info, 2); + ml_tensors_info_set_count (in_info, 2); in_dim[0] = 10; in_dim[1] = 1; in_dim[2] = 1; in_dim[3] = 1; - ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16); - ml_util_set_tensor_dimension (in_info, 0, in_dim); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); - ml_util_set_tensor_type (in_info, 1, ML_TENSOR_TYPE_FLOAT32); - ml_util_set_tensor_dimension (in_info, 1, in_dim); + ml_tensors_info_set_tensor_type (in_info, 1, ML_TENSOR_TYPE_FLOAT32); + ml_tensors_info_set_tensor_dimension (in_info, 1, in_dim); - ml_util_copy_tensors_info (out_info, in_info); + ml_tensors_info_clone (out_info, in_info); status = ml_single_open (&single, test_model, in_info, out_info, ML_NNFW_TYPE_CUSTOM_FILTER, ML_NNFW_HW_ANY); @@ -1246,7 +1246,7 @@ TEST (nnstreamer_capi_singleshot, invoke_03) input = output = NULL; /* generate input data */ - status = ml_util_allocate_tensors_data (in_info, &input); + status = ml_tensors_data_create (in_info, &input); EXPECT_EQ (status, ML_ERROR_NONE); ASSERT_TRUE (input != NULL); @@ -1254,11 +1254,11 @@ TEST (nnstreamer_capi_singleshot, invoke_03) int16_t i16 = (int16_t) (i + 1); float f32 = (float) (i + .1); - status = ml_util_get_tensor_data (input, 0, &data_ptr, &data_size); + status = ml_tensors_data_get_tensor_data (input, 0, &data_ptr, &data_size); EXPECT_EQ (status, ML_ERROR_NONE); ((int16_t *) data_ptr)[i] = i16; - status = ml_util_get_tensor_data (input, 1, &data_ptr, &data_size); + status = ml_tensors_data_get_tensor_data (input, 1, &data_ptr, &data_size); EXPECT_EQ (status, ML_ERROR_NONE); ((float *) data_ptr)[i] = f32; } @@ -1271,24 +1271,24 @@ TEST (nnstreamer_capi_singleshot, invoke_03) int16_t i16 = (int16_t) (i + 1); float f32 = (float) (i + .1); - status = ml_util_get_tensor_data (output, 0, &data_ptr, &data_size); + status = ml_tensors_data_get_tensor_data (output, 0, &data_ptr, &data_size); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_EQ (((int16_t *) data_ptr)[i], i16); - status = ml_util_get_tensor_data (output, 1, &data_ptr, &data_size); + status = ml_tensors_data_get_tensor_data (output, 1, &data_ptr, &data_size); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_FLOAT_EQ (((float *) data_ptr)[i], f32); } - ml_util_destroy_tensors_data (output); - ml_util_destroy_tensors_data (input); + ml_tensors_data_destroy (output); + ml_tensors_data_destroy (input); status = ml_single_close (single); EXPECT_EQ (status, ML_ERROR_NONE); g_free (test_model); - ml_util_destroy_tensors_info (in_info); - ml_util_destroy_tensors_info (out_info); + ml_tensors_info_destroy (in_info); + ml_tensors_info_destroy (out_info); } #ifdef ENABLE_TENSORFLOW @@ -1328,31 +1328,31 @@ TEST (nnstreamer_capi_singleshot, invoke_04) "yes.wav", NULL); ASSERT_TRUE (g_file_test (test_file, G_FILE_TEST_EXISTS)); - ml_util_allocate_tensors_info (&in_info); - ml_util_allocate_tensors_info (&out_info); - ml_util_allocate_tensors_info (&in_res); - ml_util_allocate_tensors_info (&out_res); + ml_tensors_info_create (&in_info); + ml_tensors_info_create (&out_info); + ml_tensors_info_create (&in_res); + ml_tensors_info_create (&out_res); in_dim[0] = 1; in_dim[1] = 16022; in_dim[2] = 1; in_dim[3] = 1; - ml_util_set_tensors_count (in_info, 1); - ml_util_set_tensor_name (in_info, 0, "wav_data"); - ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16); - ml_util_set_tensor_dimension (in_info, 0, in_dim); + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_name (in_info, 0, "wav_data"); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); out_dim[0] = 12; out_dim[1] = 1; out_dim[2] = 1; out_dim[3] = 1; - ml_util_set_tensors_count (out_info, 1); - ml_util_set_tensor_name (out_info, 0, "labels_softmax"); - ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_FLOAT32); - ml_util_set_tensor_dimension (out_info, 0, out_dim); + ml_tensors_info_set_count (out_info, 1); + ml_tensors_info_set_tensor_name (out_info, 0, "labels_softmax"); + ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_FLOAT32); + ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim); ASSERT_TRUE (g_file_get_contents (test_file, &contents, &len, NULL)); - ASSERT_TRUE (len == ml_util_get_tensors_size (in_info)); + ASSERT_TRUE (len == ml_tensors_info_get_size (in_info)); status = ml_single_open (&single, test_model, in_info, out_info, ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_ANY); @@ -1362,19 +1362,19 @@ TEST (nnstreamer_capi_singleshot, invoke_04) status = ml_single_get_input_info (single, &in_res); EXPECT_EQ (status, ML_ERROR_NONE); - status = ml_util_get_tensors_count (in_res, &count); + status = ml_tensors_info_get_count (in_res, &count); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_EQ (count, 1U); - status = ml_util_get_tensor_name (in_res, 0, &name); + status = ml_tensors_info_get_tensor_name (in_res, 0, &name); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (g_str_equal (name, "wav_data")); - status = ml_util_get_tensor_type (in_res, 0, &type); + status = ml_tensors_info_get_tensor_type (in_res, 0, &type); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_EQ (type, ML_TENSOR_TYPE_INT16); - ml_util_get_tensor_dimension (in_res, 0, res_dim); + ml_tensors_info_get_tensor_dimension (in_res, 0, res_dim); EXPECT_TRUE (in_dim[0] == res_dim[0]); EXPECT_TRUE (in_dim[1] == res_dim[1]); EXPECT_TRUE (in_dim[2] == res_dim[2]); @@ -1384,19 +1384,19 @@ TEST (nnstreamer_capi_singleshot, invoke_04) status = ml_single_get_output_info (single, &out_res); EXPECT_EQ (status, ML_ERROR_NONE); - status = ml_util_get_tensors_count (out_res, &count); + status = ml_tensors_info_get_count (out_res, &count); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_EQ (count, 1U); - status = ml_util_get_tensor_name (out_res, 0, &name); + status = ml_tensors_info_get_tensor_name (out_res, 0, &name); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (g_str_equal (name, "labels_softmax")); - status = ml_util_get_tensor_type (out_res, 0, &type); + status = ml_tensors_info_get_tensor_type (out_res, 0, &type); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_EQ (type, ML_TENSOR_TYPE_FLOAT32); - ml_util_get_tensor_dimension (out_res, 0, res_dim); + ml_tensors_info_get_tensor_dimension (out_res, 0, res_dim); EXPECT_TRUE (out_dim[0] == res_dim[0]); EXPECT_TRUE (out_dim[1] == res_dim[1]); EXPECT_TRUE (out_dim[2] == res_dim[2]); @@ -1405,11 +1405,11 @@ TEST (nnstreamer_capi_singleshot, invoke_04) input = output = NULL; /* generate input data */ - status = ml_util_allocate_tensors_data (in_info, &input); + status = ml_tensors_data_create (in_info, &input); EXPECT_EQ (status, ML_ERROR_NONE); EXPECT_TRUE (input != NULL); - status = ml_util_copy_tensor_data (input, 0, contents, len); + status = ml_tensors_data_set_tensor_data (input, 0, contents, len); EXPECT_EQ (status, ML_ERROR_NONE); status = ml_single_inference (single, input, &output); @@ -1417,10 +1417,10 @@ TEST (nnstreamer_capi_singleshot, invoke_04) EXPECT_TRUE (output != NULL); /* check result (max score index is 2) */ - status = ml_util_get_tensor_data (output, 1, &data_ptr, &data_size); + status = ml_tensors_data_get_tensor_data (output, 1, &data_ptr, &data_size); EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); - status = ml_util_get_tensor_data (output, 0, &data_ptr, &data_size); + status = ml_tensors_data_get_tensor_data (output, 0, &data_ptr, &data_size); EXPECT_EQ (status, ML_ERROR_NONE); max_score = .0; @@ -1435,8 +1435,8 @@ TEST (nnstreamer_capi_singleshot, invoke_04) EXPECT_EQ (max_score_index, 2); - ml_util_destroy_tensors_data (output); - ml_util_destroy_tensors_data (input); + ml_tensors_data_destroy (output); + ml_tensors_data_destroy (input); status = ml_single_close (single); EXPECT_EQ (status, ML_ERROR_NONE); @@ -1444,10 +1444,10 @@ TEST (nnstreamer_capi_singleshot, invoke_04) g_free (test_model); g_free (test_file); g_free (contents); - ml_util_destroy_tensors_info (in_info); - ml_util_destroy_tensors_info (out_info); - ml_util_destroy_tensors_info (in_res); - ml_util_destroy_tensors_info (out_res); + ml_tensors_info_destroy (in_info); + ml_tensors_info_destroy (out_info); + ml_tensors_info_destroy (in_res); + ml_tensors_info_destroy (out_res); } #endif /* ENABLE_TENSORFLOW */ @@ -1474,8 +1474,8 @@ TEST (nnstreamer_capi_singleshot, failure_01) "mobilenet_v1_1.0_224_quant.tflite", NULL); ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS)); - ml_util_allocate_tensors_info (&in_info); - ml_util_allocate_tensors_info (&out_info); + ml_tensors_info_create (&in_info); + ml_tensors_info_create (&out_info); /* invalid file path */ status = ml_single_open (&single, "wrong_file_name", in_info, out_info, @@ -1501,9 +1501,9 @@ TEST (nnstreamer_capi_singleshot, failure_01) in_dim[1] = 224; in_dim[2] = 224; in_dim[3] = 1; - ml_util_set_tensors_count (in_info, 1); - ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); - ml_util_set_tensor_dimension (in_info, 0, in_dim); + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); /* invalid output tensor info */ status = ml_single_open (&single, test_model, in_info, out_info, @@ -1514,9 +1514,9 @@ TEST (nnstreamer_capi_singleshot, failure_01) out_dim[1] = 1; out_dim[2] = 1; out_dim[3] = 1; - ml_util_set_tensors_count (out_info, 1); - ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); - ml_util_set_tensor_dimension (out_info, 0, out_dim); + ml_tensors_info_set_count (out_info, 1); + ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim); /* invalid file extension */ status = ml_single_open (&single, test_model, in_info, out_info, @@ -1536,8 +1536,8 @@ TEST (nnstreamer_capi_singleshot, failure_01) EXPECT_EQ (status, ML_ERROR_NONE); g_free (test_model); - ml_util_destroy_tensors_info (in_info); - ml_util_destroy_tensors_info (out_info); + ml_tensors_info_destroy (in_info); + ml_tensors_info_destroy (out_info); } #endif /* ENABLE_TENSORFLOW_LITE */