* @since_tizen 5.5
* @remarks The @a data can be used only in the callback. To use outside, make a copy.
* @remarks The @a info can be used only in the callback. To use outside, make a copy.
- * @param[out] data The handle of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by ml_util_get_tensors_count() with the handle 'info'. Note that max num_tensors is 16 (#ML_TENSOR_SIZE_LIMIT).
+ * @param[out] data The handle of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by ml_tensors_info_get_count() with the handle 'info'. Note that max num_tensors is 16 (#ML_TENSOR_SIZE_LIMIT).
* @param[out] info The handle of tensors information (cardinality, dimension, and type of given tensor/tensors).
* @param[in,out] user_data User Application's Private Data.
*/
/**
* @brief Gets a handle for the tensors information of given src node.
* @since_tizen 5.5
- * @remarks If the function succeeds, @a info handle must be released using ml_util_destroy_tensors_info().
+ * @remarks If the function succeeds, @a info handle must be released using ml_tensors_info_destroy().
* @param[in] src_handle The source handle returned by ml_pipeline_src_get_handle().
* @param[out] info The handle of tensors information.
* @return 0 on success. Otherwise a negative error value.
** NNStreamer Utilities **
****************************************************/
/**
- * @brief Allocates a tensors information handle with default value.
+ * @brief Creates a tensors information handle with default value.
* @since_tizen 5.5
* @param[out] info The handle of tensors information.
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_allocate_tensors_info (ml_tensors_info_h *info);
+int ml_tensors_info_create (ml_tensors_info_h *info);
/**
* @brief Frees the given handle of a tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_destroy_tensors_info (ml_tensors_info_h info);
+int ml_tensors_info_destroy (ml_tensors_info_h info);
/**
* @brief Validates the given tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_validate_tensors_info (const ml_tensors_info_h info, bool *valid);
+int ml_tensors_info_validate (const ml_tensors_info_h info, bool *valid);
/**
* @brief Copies the tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_copy_tensors_info (ml_tensors_info_h dest, const ml_tensors_info_h src);
+int ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src);
/**
* @brief Sets the number of tensors with given handle of tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_set_tensors_count (ml_tensors_info_h info, unsigned int count);
+int ml_tensors_info_set_count (ml_tensors_info_h info, unsigned int count);
/**
* @brief Gets the number of tensors with given handle of tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_get_tensors_count (ml_tensors_info_h info, unsigned int *count);
+int ml_tensors_info_get_count (ml_tensors_info_h info, unsigned int *count);
/**
* @brief Sets the tensor name with given handle of tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_set_tensor_name (ml_tensors_info_h info, unsigned int index, const char *name);
+int ml_tensors_info_set_tensor_name (ml_tensors_info_h info, unsigned int index, const char *name);
/**
* @brief Gets the tensor name with given handle of tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_get_tensor_name (ml_tensors_info_h info, unsigned int index, char **name);
+int ml_tensors_info_get_tensor_name (ml_tensors_info_h info, unsigned int index, char **name);
/**
* @brief Sets the tensor type with given handle of tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_set_tensor_type (ml_tensors_info_h info, unsigned int index, const ml_tensor_type_e type);
+int ml_tensors_info_set_tensor_type (ml_tensors_info_h info, unsigned int index, const ml_tensor_type_e type);
/**
* @brief Gets the tensor type with given handle of tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_get_tensor_type (ml_tensors_info_h info, unsigned int index, ml_tensor_type_e *type);
+int ml_tensors_info_get_tensor_type (ml_tensors_info_h info, unsigned int index, ml_tensor_type_e *type);
/**
* @brief Sets the tensor dimension with given handle of tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_set_tensor_dimension (ml_tensors_info_h info, unsigned int index, const ml_tensor_dimension dimension);
+int ml_tensors_info_set_tensor_dimension (ml_tensors_info_h info, unsigned int index, const ml_tensor_dimension dimension);
/**
* @brief Gets the tensor dimension with given handle of tensors information.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_get_tensor_dimension (ml_tensors_info_h info, unsigned int index, ml_tensor_dimension dimension);
+int ml_tensors_info_get_tensor_dimension (ml_tensors_info_h info, unsigned int index, ml_tensor_dimension dimension);
/**
* @brief Gets the byte size of the given tensors type.
* @param[in] info The tensors' information handle.
* @return @c >= 0 on success with byte size.
*/
-size_t ml_util_get_tensors_size (const ml_tensors_info_h info);
+size_t ml_tensors_info_get_size (const ml_tensors_info_h info);
/**
- * @brief Allocates a tensor data frame with the given tensors information.
+ * @brief Creates a tensor data frame with the given tensors information.
* @since_tizen 5.5
* @param[in] info The handle of tensors information for the allocation.
- * @param[out] data The handle of tensors data. The caller is responsible for freeing the allocated data with ml_util_destroy_tensors_data().
+ * @param[out] data The handle of tensors data. The caller is responsible for freeing the allocated data with ml_tensors_data_destroy().
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
* @retval #ML_ERROR_STREAMS_PIPE Failed to allocate new memory.
*/
-int ml_util_allocate_tensors_data (const ml_tensors_info_h info, ml_tensors_data_h *data);
+int ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h *data);
/**
* @brief Frees the given tensors' data handle.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_destroy_tensors_data (ml_tensors_data_h data);
+int ml_tensors_data_destroy (ml_tensors_data_h data);
/**
* @brief Gets a tensor data of given handle.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_get_tensor_data (ml_tensors_data_h data, unsigned int index, void **raw_data, size_t *data_size);
+int ml_tensors_data_get_tensor_data (ml_tensors_data_h data, unsigned int index, void **raw_data, size_t *data_size);
/**
* @brief Copies a tensor data to given handle.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_copy_tensor_data (ml_tensors_data_h data, unsigned int index, const void *raw_data, const size_t data_size);
+int ml_tensors_data_set_tensor_data (ml_tensors_data_h data, unsigned int index, const void *raw_data, const size_t data_size);
/**
* @brief Checks the availability of the given execution environments.
* @retval #ML_ERROR_NONE Successful and the environments are available.
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool *available);
+int ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool *available);
/**
* @}
if (input_info) {
/* Validate input tensor info. */
- if (ml_util_validate_tensors_info (input_info, &valid) != ML_ERROR_NONE ||
+ if (ml_tensors_info_validate (input_info, &valid) != ML_ERROR_NONE ||
valid == false) {
ml_loge ("The given param, input tensor info is invalid.");
return ML_ERROR_INVALID_PARAMETER;
if (output_info) {
/* Validate output tensor info. */
- if (ml_util_validate_tensors_info (output_info, &valid) != ML_ERROR_NONE ||
+ if (ml_tensors_info_validate (output_info, &valid) != ML_ERROR_NONE ||
valid == false) {
ml_loge ("The given param, output tensor info is invalid.");
return ML_ERROR_INVALID_PARAMETER;
/* 2. Determine hw */
/** @todo Now the param hw is ignored. (Supposed CPU only) Support others later. */
- status = ml_util_check_nnfw_availability (nnfw, hw, &available);
+ status = ml_check_nnfw_availability (nnfw, hw, &available);
if (status != ML_ERROR_NONE || !available) {
ml_loge ("The given nnfw is not available.");
return status;
gchar *str_dim, *str_type, *str_name;
gchar *in_option, *out_option;
- ml_util_copy_tensors_info_from_ml (&in_info, in_tensors_info);
- ml_util_copy_tensors_info_from_ml (&out_info, out_tensors_info);
+ ml_tensors_info_copy_from_ml (&in_info, in_tensors_info);
+ ml_tensors_info_copy_from_ml (&out_info, out_tensors_info);
/* Set input option */
str_dim = gst_tensors_info_get_dimensions_string (&in_info);
single_h->src = appsrc;
single_h->sink = appsink;
single_h->filter = filter;
- ml_util_initialize_tensors_info (&single_h->in_info);
- ml_util_initialize_tensors_info (&single_h->out_info);
+ ml_tensors_info_initialize (&single_h->in_info);
+ ml_tensors_info_initialize (&single_h->out_info);
/* 5. Set in/out caps and metadata */
if (in_tensors_info) {
- caps = ml_util_get_caps_from_tensors_info (in_tensors_info);
- ml_util_copy_tensors_info (&single_h->in_info, in_tensors_info);
+ caps = ml_tensors_info_get_caps (in_tensors_info);
+ ml_tensors_info_clone (&single_h->in_info, in_tensors_info);
} else {
ml_tensors_info_h in_info;
ml_single_get_input_info (single_h, &in_info);
- ml_util_copy_tensors_info (&single_h->in_info, in_info);
- ml_util_destroy_tensors_info (in_info);
+ ml_tensors_info_clone (&single_h->in_info, in_info);
+ ml_tensors_info_destroy (in_info);
- status = ml_util_validate_tensors_info (&single_h->in_info, &valid);
+ status = ml_tensors_info_validate (&single_h->in_info, &valid);
if (status != ML_ERROR_NONE || valid == false) {
ml_loge ("Failed to get the input tensor info.");
goto error;
}
- caps = ml_util_get_caps_from_tensors_info (&single_h->in_info);
+ caps = ml_tensors_info_get_caps (&single_h->in_info);
}
gst_app_src_set_caps (GST_APP_SRC (appsrc), caps);
gst_caps_unref (caps);
if (out_tensors_info) {
- caps = ml_util_get_caps_from_tensors_info (out_tensors_info);
- ml_util_copy_tensors_info (&single_h->out_info, out_tensors_info);
+ caps = ml_tensors_info_get_caps (out_tensors_info);
+ ml_tensors_info_clone (&single_h->out_info, out_tensors_info);
} else {
ml_tensors_info_h out_info;
ml_single_get_output_info (single_h, &out_info);
- ml_util_copy_tensors_info (&single_h->out_info, out_info);
- ml_util_destroy_tensors_info (out_info);
+ ml_tensors_info_clone (&single_h->out_info, out_info);
+ ml_tensors_info_destroy (out_info);
- status = ml_util_validate_tensors_info (&single_h->out_info, &valid);
+ status = ml_tensors_info_validate (&single_h->out_info, &valid);
if (status != ML_ERROR_NONE || valid == false) {
ml_loge ("Failed to get the output tensor info.");
goto error;
}
- caps = ml_util_get_caps_from_tensors_info (&single_h->out_info);
+ caps = ml_tensors_info_get_caps (&single_h->out_info);
}
gst_app_sink_set_caps (GST_APP_SINK (appsink), caps);
single_h->filter = NULL;
}
- ml_util_free_tensors_info (&single_h->in_info);
- ml_util_free_tensors_info (&single_h->out_info);
+ ml_tensors_info_free (&single_h->in_info);
+ ml_tensors_info_free (&single_h->out_info);
status = ml_pipeline_destroy (single_h->pipe);
g_free (single_h);
in_data = (ml_tensors_data_s *) input;
/* Allocate output buffer */
- status = ml_util_allocate_tensors_data (&single_h->out_info, output);
+ status = ml_tensors_data_create (&single_h->out_info, output);
if (status != ML_ERROR_NONE) {
ml_loge ("Failed to allocate the memory block.");
*output = NULL;
single_h = (ml_single *) single;
/* allocate handle for tensors info */
- ml_util_allocate_tensors_info (info);
+ ml_tensors_info_create (info);
input_info = (ml_tensors_info_s *) (*info);
gst_tensors_info_init (&gst_info);
ml_logw ("Invalid state, input tensor name is mismatched in filter.");
}
- ml_util_copy_tensors_info_from_gst (input_info, &gst_info);
+ ml_tensors_info_copy_from_gst (input_info, &gst_info);
gst_tensors_info_free (&gst_info);
return ML_ERROR_NONE;
}
single_h = (ml_single *) single;
/* allocate handle for tensors info */
- ml_util_allocate_tensors_info (info);
+ ml_tensors_info_create (info);
output_info = (ml_tensors_info_s *) (*info);
gst_tensors_info_init (&gst_info);
ml_logw ("Invalid state, output tensor name is mismatched in filter.");
}
- ml_util_copy_tensors_info_from_gst (output_info, &gst_info);
+ ml_tensors_info_copy_from_gst (output_info, &gst_info);
gst_tensors_info_free (&gst_info);
return ML_ERROR_NONE;
}
* @brief Allocates a tensors information handle with default value.
*/
int
-ml_util_allocate_tensors_info (ml_tensors_info_h * info)
+ml_tensors_info_create (ml_tensors_info_h * info)
{
ml_tensors_info_s *tensors_info;
return ML_ERROR_INVALID_PARAMETER;
*info = tensors_info = g_new0 (ml_tensors_info_s, 1);
- ml_util_initialize_tensors_info (tensors_info);
+ ml_tensors_info_initialize (tensors_info);
return ML_ERROR_NONE;
}
* @brief Frees the given handle of a tensors information.
*/
int
-ml_util_destroy_tensors_info (ml_tensors_info_h info)
+ml_tensors_info_destroy (ml_tensors_info_h info)
{
ml_tensors_info_s *tensors_info;
if (!tensors_info)
return ML_ERROR_INVALID_PARAMETER;
- ml_util_free_tensors_info (tensors_info);
+ ml_tensors_info_free (tensors_info);
g_free (tensors_info);
return ML_ERROR_NONE;
* @brief Initializes the tensors information with default value.
*/
int
-ml_util_initialize_tensors_info (ml_tensors_info_s * info)
+ml_tensors_info_initialize (ml_tensors_info_s * info)
{
guint i, j;
* @brief Validates the given tensor info is valid.
*/
static int
-ml_util_validate_tensor_info (const ml_tensor_info_s * info)
+ml_tensor_info_validate (const ml_tensor_info_s * info)
{
guint i;
* @brief Validates the given tensors info is valid.
*/
int
-ml_util_validate_tensors_info (const ml_tensors_info_h info, bool * valid)
+ml_tensors_info_validate (const ml_tensors_info_h info, bool * valid)
{
ml_tensors_info_s *tensors_info;
guint i;
for (i = 0; i < tensors_info->num_tensors; i++) {
/* Failed if returned value is not 0 (ML_ERROR_NONE) */
- if (ml_util_validate_tensor_info (&tensors_info->info[i]) != ML_ERROR_NONE)
+ if (ml_tensor_info_validate (&tensors_info->info[i]) != ML_ERROR_NONE)
goto done;
}
* @brief Sets the number of tensors with given handle of tensors information.
*/
int
-ml_util_set_tensors_count (ml_tensors_info_h info, unsigned int count)
+ml_tensors_info_set_count (ml_tensors_info_h info, unsigned int count)
{
ml_tensors_info_s *tensors_info;
* @brief Gets the number of tensors with given handle of tensors information.
*/
int
-ml_util_get_tensors_count (ml_tensors_info_h info, unsigned int *count)
+ml_tensors_info_get_count (ml_tensors_info_h info, unsigned int *count)
{
ml_tensors_info_s *tensors_info;
* @brief Sets the tensor name with given handle of tensors information.
*/
int
-ml_util_set_tensor_name (ml_tensors_info_h info,
+ml_tensors_info_set_tensor_name (ml_tensors_info_h info,
unsigned int index, const char *name)
{
ml_tensors_info_s *tensors_info;
* @brief Gets the tensor name with given handle of tensors information.
*/
int
-ml_util_get_tensor_name (ml_tensors_info_h info,
+ml_tensors_info_get_tensor_name (ml_tensors_info_h info,
unsigned int index, char **name)
{
ml_tensors_info_s *tensors_info;
* @brief Sets the tensor type with given handle of tensors information.
*/
int
-ml_util_set_tensor_type (ml_tensors_info_h info,
+ml_tensors_info_set_tensor_type (ml_tensors_info_h info,
unsigned int index, const ml_tensor_type_e type)
{
ml_tensors_info_s *tensors_info;
* @brief Gets the tensor type with given handle of tensors information.
*/
int
-ml_util_get_tensor_type (ml_tensors_info_h info,
+ml_tensors_info_get_tensor_type (ml_tensors_info_h info,
unsigned int index, ml_tensor_type_e * type)
{
ml_tensors_info_s *tensors_info;
* @brief Sets the tensor dimension with given handle of tensors information.
*/
int
-ml_util_set_tensor_dimension (ml_tensors_info_h info,
+ml_tensors_info_set_tensor_dimension (ml_tensors_info_h info,
unsigned int index, const ml_tensor_dimension dimension)
{
ml_tensors_info_s *tensors_info;
* @brief Gets the tensor dimension with given handle of tensors information.
*/
int
-ml_util_get_tensor_dimension (ml_tensors_info_h info,
+ml_tensors_info_get_tensor_dimension (ml_tensors_info_h info,
unsigned int index, ml_tensor_dimension dimension)
{
ml_tensors_info_s *tensors_info;
* @brief Gets the byte size of the given tensor info.
*/
size_t
-ml_util_get_tensor_size (const ml_tensor_info_s * info)
+ml_tensor_info_get_size (const ml_tensor_info_s * info)
{
size_t tensor_size;
gint i;
* @brief Gets the byte size of the given tensors info.
*/
size_t
-ml_util_get_tensors_size (const ml_tensors_info_h info)
+ml_tensors_info_get_size (const ml_tensors_info_h info)
{
ml_tensors_info_s *tensors_info;
size_t tensor_size;
tensor_size = 0;
for (i = 0; i < tensors_info->num_tensors; i++) {
- tensor_size += ml_util_get_tensor_size (&tensors_info->info[i]);
+ tensor_size += ml_tensor_info_get_size (&tensors_info->info[i]);
}
return tensor_size;
* @brief Frees the tensors info pointer.
*/
void
-ml_util_free_tensors_info (ml_tensors_info_s * info)
+ml_tensors_info_free (ml_tensors_info_s * info)
{
gint i;
}
}
- ml_util_initialize_tensors_info (info);
+ ml_tensors_info_initialize (info);
}
/**
* @brief Frees the tensors data pointer.
*/
int
-ml_util_destroy_tensors_data (ml_tensors_data_h data)
+ml_tensors_data_destroy (ml_tensors_data_h data)
{
ml_tensors_data_s *_data;
guint i;
* @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h)
*/
int
-ml_util_allocate_tensors_data (const ml_tensors_info_h info,
+ml_tensors_data_create (const ml_tensors_info_h info,
ml_tensors_data_h * data)
{
ml_tensors_data_s *_data;
_data->num_tensors = tensors_info->num_tensors;
for (i = 0; i < _data->num_tensors; i++) {
- _data->tensors[i].size = ml_util_get_tensor_size (&tensors_info->info[i]);
+ _data->tensors[i].size = ml_tensor_info_get_size (&tensors_info->info[i]);
_data->tensors[i].tensor = g_malloc0 (_data->tensors[i].size);
if (_data->tensors[i].tensor == NULL)
goto failed;
* @brief Gets a tensor data of given handle.
*/
int
-ml_util_get_tensor_data (ml_tensors_data_h data, unsigned int index,
+ml_tensors_data_get_tensor_data (ml_tensors_data_h data, unsigned int index,
void **raw_data, size_t * data_size)
{
ml_tensors_data_s *_data;
* @brief Copies a tensor data to given handle.
*/
int
-ml_util_copy_tensor_data (ml_tensors_data_h data, unsigned int index,
+ml_tensors_data_set_tensor_data (ml_tensors_data_h data, unsigned int index,
const void *raw_data, const size_t data_size)
{
ml_tensors_data_s *_data;
* @brief Copies tensor meta info.
*/
int
-ml_util_copy_tensors_info (ml_tensors_info_h dest, const ml_tensors_info_h src)
+ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src)
{
ml_tensors_info_s *dest_info, *src_info;
guint i, j;
if (!dest_info || !src_info)
return ML_ERROR_INVALID_PARAMETER;
- ml_util_initialize_tensors_info (dest_info);
+ ml_tensors_info_initialize (dest_info);
dest_info->num_tensors = src_info->num_tensors;
* @brief Copies tensor meta info from gst tensors info.
*/
void
-ml_util_copy_tensors_info_from_gst (ml_tensors_info_s * ml_info,
+ml_tensors_info_copy_from_gst (ml_tensors_info_s * ml_info,
const GstTensorsInfo * gst_info)
{
guint i, j;
if (!ml_info || !gst_info)
return;
- ml_util_initialize_tensors_info (ml_info);
+ ml_tensors_info_initialize (ml_info);
max_dim = MIN (ML_TENSOR_RANK_LIMIT, NNS_TENSOR_RANK_LIMIT);
ml_info->num_tensors = gst_info->num_tensors;
* @brief Copies tensor meta info from gst tensors info.
*/
void
-ml_util_copy_tensors_info_from_ml (GstTensorsInfo * gst_info,
+ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info,
const ml_tensors_info_s * ml_info)
{
guint i, j;
* @brief Gets caps from tensors info.
*/
GstCaps *
-ml_util_get_caps_from_tensors_info (const ml_tensors_info_s * info)
+ml_tensors_info_get_caps (const ml_tensors_info_s * info)
{
GstCaps *caps;
GstTensorsConfig config;
if (!info)
return NULL;
- ml_util_copy_tensors_info_from_ml (&config.info, info);
+ ml_tensors_info_copy_from_ml (&config.info, info);
/* set framerate 0/1 */
config.rate_n = 0;
* @brief Checks the availability of the given execution environments.
*/
int
-ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
+ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
bool * available)
{
if (!available)
if (fp == NULL)
return;
- ml_util_get_tensors_count (info, &num);
+ ml_tensors_info_get_count (info, &num);
for (i = 0; i < num; i++) {
- status = ml_util_get_tensor_data (data, i, &data_ptr, &data_size);
+ status = ml_tensors_data_get_tensor_data (data, i, &data_ptr, &data_size);
if (status == ML_ERROR_NONE)
fwrite (data_ptr, data_size, 1, fp);
}
status = ml_pipeline_src_get_tensors_info (srchandle, &info);
EXPECT_EQ (status, ML_ERROR_NONE);
- ml_util_get_tensors_count (info, &count);
+ ml_tensors_info_get_count (info, &count);
EXPECT_EQ (count, 1U);
- ml_util_get_tensor_type (info, 0, &type);
+ ml_tensors_info_get_tensor_type (info, 0, &type);
EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8);
- ml_util_get_tensor_dimension (info, 0, dim);
+ ml_tensors_info_get_tensor_dimension (info, 0, dim);
EXPECT_EQ (dim[0], 4U);
EXPECT_EQ (dim[1], 1U);
EXPECT_EQ (dim[2], 1U);
EXPECT_EQ (dim[3], 1U);
- status = ml_util_allocate_tensors_data (info, &data1);
+ status = ml_tensors_data_create (info, &data1);
EXPECT_EQ (status, ML_ERROR_NONE);
- ml_util_destroy_tensors_info (info);
+ ml_tensors_info_destroy (info);
- status = ml_util_copy_tensor_data (data1, 0, uintarray1[0], 4);
+ status = ml_tensors_data_set_tensor_data (data1, 0, uintarray1[0], 4);
EXPECT_EQ (status, ML_ERROR_NONE);
status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
status = ml_pipeline_src_get_tensors_info (srchandle, &info);
EXPECT_EQ (status, ML_ERROR_NONE);
- ml_util_get_tensors_count (info, &count);
+ ml_tensors_info_get_count (info, &count);
EXPECT_EQ (count, 1U);
- ml_util_get_tensor_type (info, 0, &type);
+ ml_tensors_info_get_tensor_type (info, 0, &type);
EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8);
- ml_util_get_tensor_dimension (info, 0, dim);
+ ml_tensors_info_get_tensor_dimension (info, 0, dim);
EXPECT_EQ (dim[0], 4U);
EXPECT_EQ (dim[1], 1U);
EXPECT_EQ (dim[2], 1U);
EXPECT_EQ (dim[3], 1U);
for (i = 0; i < 10; i++) {
- status = ml_util_copy_tensor_data (data1, 0, uintarray1[i], 4);
+ status = ml_tensors_data_set_tensor_data (data1, 0, uintarray1[i], 4);
EXPECT_EQ (status, ML_ERROR_NONE);
status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_util_allocate_tensors_data (info, &data2);
+ status = ml_tensors_data_create (info, &data2);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_util_copy_tensor_data (data2, 0, uintarray2[i], 4);
+ status = ml_tensors_data_set_tensor_data (data2, 0, uintarray2[i], 4);
EXPECT_EQ (status, ML_ERROR_NONE);
status = ml_pipeline_src_input_data (srchandle, data2, ML_PIPELINE_BUF_POLICY_AUTO_FREE);
}
g_free (content);
- ml_util_destroy_tensors_info (info);
- ml_util_destroy_tensors_data (data1);
+ ml_tensors_info_destroy (info);
+ ml_tensors_data_destroy (data1);
}
/**
status = ml_pipeline_src_get_tensors_info (srchandle, &info);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_util_allocate_tensors_data (info, &data);
+ status = ml_tensors_data_create (info, &data);
/* null data */
status = ml_pipeline_src_input_data (srchandle, NULL, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
status = ml_pipeline_destroy (handle);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_util_destroy_tensors_data (data);
+ status = ml_tensors_data_destroy (data);
EXPECT_EQ (status, ML_ERROR_NONE);
}
"mobilenet_v1_1.0_224_quant.tflite", NULL);
ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));
- ml_util_allocate_tensors_info (&in_info);
- ml_util_allocate_tensors_info (&out_info);
- ml_util_allocate_tensors_info (&in_res);
- ml_util_allocate_tensors_info (&out_res);
+ ml_tensors_info_create (&in_info);
+ ml_tensors_info_create (&out_info);
+ ml_tensors_info_create (&in_res);
+ ml_tensors_info_create (&out_res);
in_dim[0] = 3;
in_dim[1] = 224;
in_dim[2] = 224;
in_dim[3] = 1;
- ml_util_set_tensors_count (in_info, 1);
- ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8);
- ml_util_set_tensor_dimension (in_info, 0, in_dim);
+ ml_tensors_info_set_count (in_info, 1);
+ ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim);
out_dim[0] = 1001;
out_dim[1] = 1;
out_dim[2] = 1;
out_dim[3] = 1;
- ml_util_set_tensors_count (out_info, 1);
- ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8);
- ml_util_set_tensor_dimension (out_info, 0, out_dim);
+ ml_tensors_info_set_count (out_info, 1);
+ ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim);
status = ml_single_open (&single, test_model, in_info, out_info,
ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
status = ml_single_get_input_info (single, &in_res);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_util_get_tensors_count (in_res, &count);
+ status = ml_tensors_info_get_count (in_res, &count);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (count, 1U);
- status = ml_util_get_tensor_name (in_res, 0, &name);
+ status = ml_tensors_info_get_tensor_name (in_res, 0, &name);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (name == NULL);
- status = ml_util_get_tensor_type (in_res, 0, &type);
+ status = ml_tensors_info_get_tensor_type (in_res, 0, &type);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8);
- ml_util_get_tensor_dimension (in_res, 0, res_dim);
+ ml_tensors_info_get_tensor_dimension (in_res, 0, res_dim);
EXPECT_TRUE (in_dim[0] == res_dim[0]);
EXPECT_TRUE (in_dim[1] == res_dim[1]);
EXPECT_TRUE (in_dim[2] == res_dim[2]);
status = ml_single_get_output_info (single, &out_res);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_util_get_tensors_count (out_res, &count);
+ status = ml_tensors_info_get_count (out_res, &count);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (count, 1U);
- status = ml_util_get_tensor_name (out_res, 0, &name);
+ status = ml_tensors_info_get_tensor_name (out_res, 0, &name);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (name == NULL);
- status = ml_util_get_tensor_type (out_res, 0, &type);
+ status = ml_tensors_info_get_tensor_type (out_res, 0, &type);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8);
- ml_util_get_tensor_dimension (out_res, 0, res_dim);
+ ml_tensors_info_get_tensor_dimension (out_res, 0, res_dim);
EXPECT_TRUE (out_dim[0] == res_dim[0]);
EXPECT_TRUE (out_dim[1] == res_dim[1]);
EXPECT_TRUE (out_dim[2] == res_dim[2]);
input = output = NULL;
/* generate dummy data */
- status = ml_util_allocate_tensors_data (in_info, &input);
+ status = ml_tensors_data_create (in_info, &input);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (input != NULL);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (output != NULL);
- ml_util_destroy_tensors_data (output);
- ml_util_destroy_tensors_data (input);
+ ml_tensors_data_destroy (output);
+ ml_tensors_data_destroy (input);
status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);
g_free (test_model);
- ml_util_destroy_tensors_info (in_info);
- ml_util_destroy_tensors_info (out_info);
- ml_util_destroy_tensors_info (in_res);
- ml_util_destroy_tensors_info (out_res);
+ ml_tensors_info_destroy (in_info);
+ ml_tensors_info_destroy (out_info);
+ ml_tensors_info_destroy (in_res);
+ ml_tensors_info_destroy (out_res);
}
/**
"mobilenet_v1_1.0_224_quant.tflite", NULL);
ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));
- ml_util_allocate_tensors_info (&in_info);
- ml_util_allocate_tensors_info (&out_info);
+ ml_tensors_info_create (&in_info);
+ ml_tensors_info_create (&out_info);
in_dim[0] = 3;
in_dim[1] = 224;
in_dim[2] = 224;
in_dim[3] = 1;
- ml_util_set_tensors_count (in_info, 1);
- ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8);
- ml_util_set_tensor_dimension (in_info, 0, in_dim);
+ ml_tensors_info_set_count (in_info, 1);
+ ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim);
out_dim[0] = 1001;
out_dim[1] = 1;
out_dim[2] = 1;
out_dim[3] = 1;
- ml_util_set_tensors_count (out_info, 1);
- ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8);
- ml_util_set_tensor_dimension (out_info, 0, out_dim);
+ ml_tensors_info_set_count (out_info, 1);
+ ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim);
status = ml_single_open (&single, test_model, NULL, NULL,
ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
input = output = NULL;
/* generate dummy data */
- status = ml_util_allocate_tensors_data (in_info, &input);
+ status = ml_tensors_data_create (in_info, &input);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (input != NULL);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (output != NULL);
- ml_util_destroy_tensors_data (output);
- ml_util_destroy_tensors_data (input);
+ ml_tensors_data_destroy (output);
+ ml_tensors_data_destroy (input);
status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);
g_free (test_model);
- ml_util_destroy_tensors_info (in_info);
- ml_util_destroy_tensors_info (out_info);
+ ml_tensors_info_destroy (in_info);
+ ml_tensors_info_destroy (out_info);
}
#endif /* ENABLE_TENSORFLOW_LITE */
"libnnstreamer_customfilter_passthrough_variable.so", NULL);
ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));
- ml_util_allocate_tensors_info (&in_info);
- ml_util_allocate_tensors_info (&out_info);
+ ml_tensors_info_create (&in_info);
+ ml_tensors_info_create (&out_info);
- ml_util_set_tensors_count (in_info, 2);
+ ml_tensors_info_set_count (in_info, 2);
in_dim[0] = 10;
in_dim[1] = 1;
in_dim[2] = 1;
in_dim[3] = 1;
- ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16);
- ml_util_set_tensor_dimension (in_info, 0, in_dim);
+ ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16);
+ ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim);
- ml_util_set_tensor_type (in_info, 1, ML_TENSOR_TYPE_FLOAT32);
- ml_util_set_tensor_dimension (in_info, 1, in_dim);
+ ml_tensors_info_set_tensor_type (in_info, 1, ML_TENSOR_TYPE_FLOAT32);
+ ml_tensors_info_set_tensor_dimension (in_info, 1, in_dim);
- ml_util_copy_tensors_info (out_info, in_info);
+ ml_tensors_info_clone (out_info, in_info);
status = ml_single_open (&single, test_model, in_info, out_info,
ML_NNFW_TYPE_CUSTOM_FILTER, ML_NNFW_HW_ANY);
input = output = NULL;
/* generate input data */
- status = ml_util_allocate_tensors_data (in_info, &input);
+ status = ml_tensors_data_create (in_info, &input);
EXPECT_EQ (status, ML_ERROR_NONE);
ASSERT_TRUE (input != NULL);
int16_t i16 = (int16_t) (i + 1);
float f32 = (float) (i + .1);
- status = ml_util_get_tensor_data (input, 0, &data_ptr, &data_size);
+ status = ml_tensors_data_get_tensor_data (input, 0, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_NONE);
((int16_t *) data_ptr)[i] = i16;
- status = ml_util_get_tensor_data (input, 1, &data_ptr, &data_size);
+ status = ml_tensors_data_get_tensor_data (input, 1, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_NONE);
((float *) data_ptr)[i] = f32;
}
int16_t i16 = (int16_t) (i + 1);
float f32 = (float) (i + .1);
- status = ml_util_get_tensor_data (output, 0, &data_ptr, &data_size);
+ status = ml_tensors_data_get_tensor_data (output, 0, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (((int16_t *) data_ptr)[i], i16);
- status = ml_util_get_tensor_data (output, 1, &data_ptr, &data_size);
+ status = ml_tensors_data_get_tensor_data (output, 1, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_FLOAT_EQ (((float *) data_ptr)[i], f32);
}
- ml_util_destroy_tensors_data (output);
- ml_util_destroy_tensors_data (input);
+ ml_tensors_data_destroy (output);
+ ml_tensors_data_destroy (input);
status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);
g_free (test_model);
- ml_util_destroy_tensors_info (in_info);
- ml_util_destroy_tensors_info (out_info);
+ ml_tensors_info_destroy (in_info);
+ ml_tensors_info_destroy (out_info);
}
#ifdef ENABLE_TENSORFLOW
"yes.wav", NULL);
ASSERT_TRUE (g_file_test (test_file, G_FILE_TEST_EXISTS));
- ml_util_allocate_tensors_info (&in_info);
- ml_util_allocate_tensors_info (&out_info);
- ml_util_allocate_tensors_info (&in_res);
- ml_util_allocate_tensors_info (&out_res);
+ ml_tensors_info_create (&in_info);
+ ml_tensors_info_create (&out_info);
+ ml_tensors_info_create (&in_res);
+ ml_tensors_info_create (&out_res);
in_dim[0] = 1;
in_dim[1] = 16022;
in_dim[2] = 1;
in_dim[3] = 1;
- ml_util_set_tensors_count (in_info, 1);
- ml_util_set_tensor_name (in_info, 0, "wav_data");
- ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16);
- ml_util_set_tensor_dimension (in_info, 0, in_dim);
+ ml_tensors_info_set_count (in_info, 1);
+ ml_tensors_info_set_tensor_name (in_info, 0, "wav_data");
+ ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16);
+ ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim);
out_dim[0] = 12;
out_dim[1] = 1;
out_dim[2] = 1;
out_dim[3] = 1;
- ml_util_set_tensors_count (out_info, 1);
- ml_util_set_tensor_name (out_info, 0, "labels_softmax");
- ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_FLOAT32);
- ml_util_set_tensor_dimension (out_info, 0, out_dim);
+ ml_tensors_info_set_count (out_info, 1);
+ ml_tensors_info_set_tensor_name (out_info, 0, "labels_softmax");
+ ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_FLOAT32);
+ ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim);
ASSERT_TRUE (g_file_get_contents (test_file, &contents, &len, NULL));
- ASSERT_TRUE (len == ml_util_get_tensors_size (in_info));
+ ASSERT_TRUE (len == ml_tensors_info_get_size (in_info));
status = ml_single_open (&single, test_model, in_info, out_info,
ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_ANY);
status = ml_single_get_input_info (single, &in_res);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_util_get_tensors_count (in_res, &count);
+ status = ml_tensors_info_get_count (in_res, &count);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (count, 1U);
- status = ml_util_get_tensor_name (in_res, 0, &name);
+ status = ml_tensors_info_get_tensor_name (in_res, 0, &name);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (g_str_equal (name, "wav_data"));
- status = ml_util_get_tensor_type (in_res, 0, &type);
+ status = ml_tensors_info_get_tensor_type (in_res, 0, &type);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (type, ML_TENSOR_TYPE_INT16);
- ml_util_get_tensor_dimension (in_res, 0, res_dim);
+ ml_tensors_info_get_tensor_dimension (in_res, 0, res_dim);
EXPECT_TRUE (in_dim[0] == res_dim[0]);
EXPECT_TRUE (in_dim[1] == res_dim[1]);
EXPECT_TRUE (in_dim[2] == res_dim[2]);
status = ml_single_get_output_info (single, &out_res);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_util_get_tensors_count (out_res, &count);
+ status = ml_tensors_info_get_count (out_res, &count);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (count, 1U);
- status = ml_util_get_tensor_name (out_res, 0, &name);
+ status = ml_tensors_info_get_tensor_name (out_res, 0, &name);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (g_str_equal (name, "labels_softmax"));
- status = ml_util_get_tensor_type (out_res, 0, &type);
+ status = ml_tensors_info_get_tensor_type (out_res, 0, &type);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (type, ML_TENSOR_TYPE_FLOAT32);
- ml_util_get_tensor_dimension (out_res, 0, res_dim);
+ ml_tensors_info_get_tensor_dimension (out_res, 0, res_dim);
EXPECT_TRUE (out_dim[0] == res_dim[0]);
EXPECT_TRUE (out_dim[1] == res_dim[1]);
EXPECT_TRUE (out_dim[2] == res_dim[2]);
input = output = NULL;
/* generate input data */
- status = ml_util_allocate_tensors_data (in_info, &input);
+ status = ml_tensors_data_create (in_info, &input);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (input != NULL);
- status = ml_util_copy_tensor_data (input, 0, contents, len);
+ status = ml_tensors_data_set_tensor_data (input, 0, contents, len);
EXPECT_EQ (status, ML_ERROR_NONE);
status = ml_single_inference (single, input, &output);
EXPECT_TRUE (output != NULL);
/* check result (max score index is 2) */
- status = ml_util_get_tensor_data (output, 1, &data_ptr, &data_size);
+ status = ml_tensors_data_get_tensor_data (output, 1, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
- status = ml_util_get_tensor_data (output, 0, &data_ptr, &data_size);
+ status = ml_tensors_data_get_tensor_data (output, 0, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_NONE);
max_score = .0;
EXPECT_EQ (max_score_index, 2);
- ml_util_destroy_tensors_data (output);
- ml_util_destroy_tensors_data (input);
+ ml_tensors_data_destroy (output);
+ ml_tensors_data_destroy (input);
status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);
g_free (test_model);
g_free (test_file);
g_free (contents);
- ml_util_destroy_tensors_info (in_info);
- ml_util_destroy_tensors_info (out_info);
- ml_util_destroy_tensors_info (in_res);
- ml_util_destroy_tensors_info (out_res);
+ ml_tensors_info_destroy (in_info);
+ ml_tensors_info_destroy (out_info);
+ ml_tensors_info_destroy (in_res);
+ ml_tensors_info_destroy (out_res);
}
#endif /* ENABLE_TENSORFLOW */
"mobilenet_v1_1.0_224_quant.tflite", NULL);
ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));
- ml_util_allocate_tensors_info (&in_info);
- ml_util_allocate_tensors_info (&out_info);
+ ml_tensors_info_create (&in_info);
+ ml_tensors_info_create (&out_info);
/* invalid file path */
status = ml_single_open (&single, "wrong_file_name", in_info, out_info,
in_dim[1] = 224;
in_dim[2] = 224;
in_dim[3] = 1;
- ml_util_set_tensors_count (in_info, 1);
- ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8);
- ml_util_set_tensor_dimension (in_info, 0, in_dim);
+ ml_tensors_info_set_count (in_info, 1);
+ ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim);
/* invalid output tensor info */
status = ml_single_open (&single, test_model, in_info, out_info,
out_dim[1] = 1;
out_dim[2] = 1;
out_dim[3] = 1;
- ml_util_set_tensors_count (out_info, 1);
- ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8);
- ml_util_set_tensor_dimension (out_info, 0, out_dim);
+ ml_tensors_info_set_count (out_info, 1);
+ ml_tensors_info_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim);
/* invalid file extension */
status = ml_single_open (&single, test_model, in_info, out_info,
EXPECT_EQ (status, ML_ERROR_NONE);
g_free (test_model);
- ml_util_destroy_tensors_info (in_info);
- ml_util_destroy_tensors_info (out_info);
+ ml_tensors_info_destroy (in_info);
+ ml_tensors_info_destroy (out_info);
}
#endif /* ENABLE_TENSORFLOW_LITE */