typedef void *ml_tensors_info_h;
/**
+ * @brief A handle of input or output frames. #ml_tensors_info_h is the handle for tensors metadata.
+ * @since_tizen 5.5
+ */
+typedef void *ml_tensors_data_h;
+
+/**
* @brief A handle of an NNStreamer pipeline.
* @since_tizen 5.5
*/
} ml_pipeline_switch_e;
/**
- * @brief An instance of a single input or output frame.
- * @since_tizen 5.5
- */
-typedef struct {
- void *tensor; /**< The instance of tensor data. */
- size_t size; /**< The size of tensor. */
-} ml_tensor_data_s;
-
-/**
- * @brief An instance of input or output frames. #ml_tensors_info_h is the handle for tensors metadata.
- * @since_tizen 5.5
- */
-typedef struct {
- unsigned int num_tensors; /**< The number of tensors. */
- ml_tensor_data_s tensors[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor data. NULL for unused tensors. */
-} ml_tensors_data_s;
-
-/**
* @brief Callback for sink element of NNStreamer pipelines (pipeline's output)
* @detail If an application wants to accept data outputs of an NNStreamer stream, use this callback to get data from the stream. Note that the buffer may be deallocated after the return and this is synchronously called. Thus, if you need the data afterwards, copy the data to another buffer and return fast. Do not hold too much time in the callback. It is recommended to use very small tensors at sinks.
* @since_tizen 5.5
* @remarks The @a data can be used only in the callback. To use outside, make a copy.
* @remarks The @a info can be used only in the callback. To use outside, make a copy.
- * @param[out] data The contents of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by data->num_tensors. Note that max num_tensors is 16 (#ML_TENSOR_SIZE_LIMIT).
+ * @param[out] data The handle of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by ml_util_get_tensors_count() with the handle 'info'. Note that max num_tensors is 16 (#ML_TENSOR_SIZE_LIMIT).
* @param[out] info The handle of tensors information (cardinality, dimension, and type of given tensor/tensors).
* @param[in,out] user_data User Application's Private Data.
*/
-typedef void (*ml_pipeline_sink_cb) (const ml_tensors_data_s *data, const ml_tensors_info_h info, void *user_data);
+typedef void (*ml_pipeline_sink_cb) (const ml_tensors_data_h data, const ml_tensors_info_h info, void *user_data);
/****************************************************
** NNStreamer Pipeline Construction (gst-parse) **
/**
* @brief Puts an input data frame.
* @param[in] h The source handle returned by ml_pipeline_src_get_handle().
- * @param[in] data The input tensors, in the format of tensors info given by ml_pipeline_src_get_tensors_info().
+ * @param[in] data The handle of input tensors, in the format of tensors info given by ml_pipeline_src_get_tensors_info().
* @param[in] policy The policy of buf deallocation.
* @return 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_STREAMS_PIPE The pipeline has inconsistent padcaps. Not negotiated?
* @retval #ML_ERROR_TRY_AGAIN The pipeline is not ready yet.
*/
-int ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s *data, ml_pipeline_buf_policy_e policy);
+int ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_h data, ml_pipeline_buf_policy_e policy);
/**
* @brief Gets a handle for the tensors information of given src node.
size_t ml_util_get_tensors_size (const ml_tensors_info_h info);
/**
- * @brief Frees the tensors data pointer.
+ * @brief Allocates a tensor data frame with the given tensors information.
* @since_tizen 5.5
- * @param[in] data The tensors data pointer to be freed.
+ * @param[in] info The handle of tensors information for the allocation.
+ * @param[out] data The handle of tensors data allocated. Caller is responsible to free the allocated data with ml_util_destroy_tensors_data().
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ * @retval #ML_ERROR_STREAMS_PIPE Failed to allocate new memory.
*/
-void ml_util_free_tensors_data (ml_tensors_data_s **data);
+int ml_util_allocate_tensors_data (const ml_tensors_info_h info, ml_tensors_data_h *data);
/**
- * @brief Allocates a tensor data frame with the given tensors information.
+ * @brief Frees the given handle of a tensors data.
* @since_tizen 5.5
- * @param[in] info The handle of tensors information for the allocation.
- * @return @c Tensors data pointer allocated. Null if error. Caller is responsible to free the allocated data with ml_util_free_tensors_data().
- * @retval NULL There is an error. Call ml_util_get_last_error() to get specific error code.
+ * @param[in] data The handle of tensors data.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ */
+int ml_util_destroy_tensors_data (ml_tensors_data_h data);
+
+/**
+ * @brief Gets a tensor data of given handle.
+ * @since_tizen 5.5
+ * @param[in] data The handle of tensors data.
+ * @param[in] index The index of tensor in tensors data.
+ * @param[out] raw_data Raw tensor data in the handle.
+ * @param[out] data_size Byte size of tensor data.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ */
+int ml_util_get_tensor_data (ml_tensors_data_h data, const unsigned int index, void **raw_data, size_t *data_size);
+
+/**
+ * @brief Copies a tensor data to given handle.
+ * @since_tizen 5.5
+ * @param[in] data The handle of tensors data.
+ * @param[in] index The index of tensor in tensors data.
+ * @param[out] raw_data Raw tensor data to be copied.
+ * @param[out] data_size Byte size of raw data.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-ml_tensors_data_s *ml_util_allocate_tensors_data (const ml_tensors_info_h info);
+int ml_util_copy_tensor_data (ml_tensors_data_h data, const unsigned int index, const void *raw_data, const size_t data_size);
/**
* @brief Checks the availability of the given execution environments.
int ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool *available);
/**
- * @brief Gets the last error code.
- * @since_tizen 5.5
- * @return @c 0 on success. Otherwise a negative error value.
- */
-int ml_util_get_last_error (void);
-
-/**
* @}
*/
#ifdef __cplusplus
guint i;
guint num_mems;
GList *l;
- ml_tensors_data_s tensors_data;
+ ml_tensors_data_s *data = NULL;
size_t total_size = 0;
num_mems = gst_buffer_n_memory (b);
}
/* set tensor data */
- memset (&tensors_data, 0, sizeof (ml_tensors_data_s));
+ data = g_new0 (ml_tensors_data_s, 1);
+ g_assert (data);
- tensors_data.num_tensors = num_mems;
+ data->num_tensors = num_mems;
for (i = 0; i < num_mems; i++) {
mem[i] = gst_buffer_peek_memory (b, i);
gst_memory_map (mem[i], &info[i], GST_MAP_READ);
- tensors_data.tensors[i].tensor = info[i].data;
- tensors_data.tensors[i].size = info[i].size;
+ data->tensors[i].tensor = info[i].data;
+ data->tensors[i].size = info[i].size;
total_size += info[i].size;
}
for (i = 0; i < elem->tensors_info.num_tensors; i++) {
size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]);
- if (sz != tensors_data.tensors[i].size) {
+ if (sz != data->tensors[i].size) {
ml_loge
("The sink event of [%s] cannot be handled because the tensor dimension mismatches.",
elem->name);
ml_pipeline_sink *sink = l->data;
ml_pipeline_sink_cb callback = sink->cb;
- callback (&tensors_data, &elem->tensors_info, sink->pdata);
+ callback (data, &elem->tensors_info, sink->pdata);
/** @todo Measure time. Warn if it takes long. Kill if it takes too long. */
}
gst_memory_unmap (mem[i], &info[i]);
}
+ if (data) {
+ g_free (data);
+ data = NULL;
+ }
return;
}
* @brief Push a data frame to a src (more info in nnstreamer.h)
*/
int
-ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s * data,
+ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_h data,
ml_pipeline_buf_policy_e policy)
{
/** @todo NYI */
GstBuffer *buffer;
+ GstMemory *mem;
GstFlowReturn gret;
+ ml_tensors_data_s *_data;
unsigned int i;
handle_init (src, src, h);
- if (!data) {
+ _data = (ml_tensors_data_s *) data;
+ if (!_data) {
ml_loge ("The given param data is invalid.");
ret = ML_ERROR_INVALID_PARAMETER;
goto unlock_return;
}
- if (data->num_tensors < 1 || data->num_tensors > ML_TENSOR_SIZE_LIMIT) {
+ if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) {
ml_loge ("The tensor size is invalid. It should be 1 ~ %u; where it is %u",
- ML_TENSOR_SIZE_LIMIT, data->num_tensors);
+ ML_TENSOR_SIZE_LIMIT, _data->num_tensors);
ret = ML_ERROR_INVALID_PARAMETER;
goto unlock_return;
}
goto unlock_return;
}
- if (elem->tensors_info.num_tensors != data->num_tensors) {
+ if (elem->tensors_info.num_tensors != _data->num_tensors) {
ml_loge
("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u",
- elem->name, elem->tensors_info.num_tensors, data->num_tensors);
+ elem->name, elem->tensors_info.num_tensors, _data->num_tensors);
ret = ML_ERROR_INVALID_PARAMETER;
goto unlock_return;
for (i = 0; i < elem->tensors_info.num_tensors; i++) {
size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]);
- if (sz != data->tensors[i].size) {
+ if (sz != _data->tensors[i].size) {
ml_loge
("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)",
- i, data->tensors[i].size, sz);
+ i, _data->tensors[i].size, sz);
ret = ML_ERROR_INVALID_PARAMETER;
goto unlock_return;
/* Create buffer to be pushed from buf[] */
buffer = gst_buffer_new ();
- for (i = 0; i < data->num_tensors; i++) {
- GstBuffer *addbuffer =
- gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY,
- data->tensors[i].tensor, data->tensors[i].size, 0,
- data->tensors[i].size, data->tensors[i].tensor, ml_buf_policy[policy]);
- buffer = gst_buffer_append (buffer, addbuffer);
+ for (i = 0; i < _data->num_tensors; i++) {
+ mem = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
+ _data->tensors[i].tensor, _data->tensors[i].size, 0,
+ _data->tensors[i].size, _data->tensors[i].tensor, ml_buf_policy[policy]);
+ gst_buffer_append_memory (buffer, mem);
/** @todo Verify that gst_buffer_append lists tensors/gstmem in the correct order */
}
* @bug No known bugs except for NYI items
*/
+#include <string.h>
#include <nnstreamer/nnstreamer_plugin_api.h>
#include <nnstreamer/nnstreamer_plugin_api_filter.h>
#include "nnstreamer.h"
#include "nnstreamer-capi-private.h"
-static int ml_internal_error_code = ML_ERROR_NONE;
-
-/**
- * @brief Gets the last error code.
- */
-int
-ml_util_get_last_error (void)
-{
- return ml_internal_error_code;
-}
-
-/**
- * @brief Sets the last error code.
- */
-void
-ml_util_set_error (int error_code)
-{
- ml_internal_error_code = error_code;
-}
-
/**
* @brief Allocates a tensors information handle with default value.
*/
/**
* @brief Frees the tensors data pointer.
*/
-void
-ml_util_free_tensors_data (ml_tensors_data_s ** data)
+int
+ml_util_destroy_tensors_data (ml_tensors_data_h data)
{
- gint i;
+ ml_tensors_data_s *_data;
+ guint i;
- if (data == NULL || (*data) == NULL)
- return;
+ if (!data)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ _data = (ml_tensors_data_s *) data;
- for (i = 0; i < (*data)->num_tensors; i++) {
- if ((*data)->tensors[i].tensor) {
- g_free ((*data)->tensors[i].tensor);
- (*data)->tensors[i].tensor = NULL;
+ for (i = 0; i < _data->num_tensors; i++) {
+ if (_data->tensors[i].tensor) {
+ g_free (_data->tensors[i].tensor);
+ _data->tensors[i].tensor = NULL;
}
}
- g_free (*data);
- *data = NULL;
+ g_free (_data);
+ return ML_ERROR_NONE;
}
/**
* @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h)
*/
-ml_tensors_data_s *
-ml_util_allocate_tensors_data (const ml_tensors_info_h info)
+int
+ml_util_allocate_tensors_data (const ml_tensors_info_h info,
+ ml_tensors_data_h * data)
{
- ml_tensors_data_s *data;
+ ml_tensors_data_s *_data;
ml_tensors_info_s *tensors_info;
gint i;
+ if (!info || !data)
+ return ML_ERROR_INVALID_PARAMETER;
+
tensors_info = (ml_tensors_info_s *) info;
+ *data = NULL;
- if (!tensors_info) {
- ml_util_set_error (ML_ERROR_INVALID_PARAMETER);
- return NULL;
+ _data = g_new0 (ml_tensors_data_s, 1);
+ if (!_data) {
+ ml_loge ("Failed to allocate the memory block.");
+ return ML_ERROR_STREAMS_PIPE;
}
- data = g_new0 (ml_tensors_data_s, 1);
- if (!data) {
- ml_loge ("Failed to allocate the memory block.");
- ml_util_set_error (ML_ERROR_STREAMS_PIPE);
- return NULL;
+ _data->num_tensors = tensors_info->num_tensors;
+ for (i = 0; i < _data->num_tensors; i++) {
+ _data->tensors[i].size = ml_util_get_tensor_size (&tensors_info->info[i]);
+ _data->tensors[i].tensor = g_malloc0 (_data->tensors[i].size);
+ if (_data->tensors[i].tensor == NULL)
+ goto failed;
}
- data->num_tensors = tensors_info->num_tensors;
- for (i = 0; i < data->num_tensors; i++) {
- data->tensors[i].size = ml_util_get_tensor_size (&tensors_info->info[i]);
- data->tensors[i].tensor = g_malloc0 (data->tensors[i].size);
+ *data = _data;
+ return ML_ERROR_NONE;
+
+failed:
+ if (_data) {
+ for (i = 0; i < _data->num_tensors; i++) {
+ g_free (_data->tensors[i].tensor);
+ }
}
- ml_util_set_error (ML_ERROR_NONE);
- return data;
+ ml_loge ("Failed to allocate the memory block.");
+ return ML_ERROR_STREAMS_PIPE;
+}
+
+/**
+ * @brief Gets a tensor data of given handle.
+ */
+int
+ml_util_get_tensor_data (ml_tensors_data_h data, const unsigned int index,
+ void **raw_data, size_t * data_size)
+{
+ ml_tensors_data_s *_data;
+
+ if (!data)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ _data = (ml_tensors_data_s *) data;
+
+ if (_data->num_tensors <= index)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ *raw_data = _data->tensors[index].tensor;
+ *data_size = _data->tensors[index].size;
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Copies a tensor data to given handle.
+ */
+int
+ml_util_copy_tensor_data (ml_tensors_data_h data, const unsigned int index,
+ const void *raw_data, const size_t data_size)
+{
+ ml_tensors_data_s *_data;
+
+ if (!data)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ _data = (ml_tensors_data_s *) data;
+
+ if (_data->num_tensors <= index)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ if (data_size <= 0 || _data->tensors[index].size < data_size)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ memcpy (_data->tensors[index].tensor, raw_data, data_size);
+ return ML_ERROR_NONE;
}
/**
*/
int
ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
- bool *available)
+ bool * available)
{
if (!available)
return ML_ERROR_INVALID_PARAMETER;
* @brief A tensor-sink callback for sink handle in a pipeline
*/
static void
-test_sink_callback_dm01 (const ml_tensors_data_s * data,
+test_sink_callback_dm01 (const ml_tensors_data_h data,
const ml_tensors_info_h info, void *user_data)
{
gchar *filepath = (gchar *) user_data;
unsigned int i, num = 0;
+ void *data_ptr;
+ size_t data_size;
+ int status;
FILE *fp = g_fopen (filepath, "a");
if (fp == NULL)
ml_util_get_tensors_count (info, &num);
for (i = 0; i < num; i++) {
- fwrite (data->tensors[i].tensor, data->tensors[i].size, 1, fp);
+ status = ml_util_get_tensor_data (data, i, &data_ptr, &data_size);
+ if (status == ML_ERROR_NONE)
+ fwrite (data_ptr, data_size, 1, fp);
}
fclose (fp);
* @brief A tensor-sink callback for sink handle in a pipeline
*/
static void
-test_sink_callback_count (const ml_tensors_data_s * data,
+test_sink_callback_count (const ml_tensors_data_h data,
const ml_tensors_info_h info, void *user_data)
{
guint *count = (guint *) user_data;
g_free (count_sink);
}
-static char uintarray[10][4];
-static char *uia_index[10];
-
/**
* @brief Test NNStreamer pipeline src
*/
ml_pipeline_src_h srchandle;
int status;
ml_tensors_info_h info;
- ml_tensors_data_s data1, data2;
+ ml_tensors_data_h data1, data2;
unsigned int count = 0;
ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN;
ml_tensor_dimension dim = { 0, };
int i;
- char *uintarray2[10];
+ uint8_t *uintarray1[10];
+ uint8_t *uintarray2[10];
uint8_t *content;
- gboolean r;
gsize len;
status = ml_pipeline_construct (pipeline, &handle);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (dir != NULL);
for (i = 0; i < 10; i++) {
- uia_index[i] = &uintarray[i][0];
+ uintarray1[i] = (uint8_t *) g_malloc (4);
+ uintarray1[i][0] = i + 4;
+ uintarray1[i][1] = i + 1;
+ uintarray1[i][2] = i + 3;
+ uintarray1[i][3] = i + 2;
- uintarray[i][0] = i;
- uintarray[i][1] = i + 1;
- uintarray[i][2] = i + 3;
- uintarray[i][3] = i + 2;
-
- uintarray2[i] = (char *) g_malloc (4);
+ uintarray2[i] = (uint8_t *) g_malloc (4);
uintarray2[i][0] = i + 3;
uintarray2[i][1] = i + 2;
uintarray2[i][2] = i + 1;
- uintarray2[i][3] = i;
+ uintarray2[i][3] = i + 4;
/* These will be free'ed by gstreamer (ML_PIPELINE_BUF_POLICY_AUTO_FREE) */
/** @todo Check whether gstreamer really deallocates this */
}
EXPECT_EQ (dim[2], 1U);
EXPECT_EQ (dim[3], 1U);
+ status = ml_util_allocate_tensors_data (info, &data1);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
ml_util_destroy_tensors_info (info);
- data1.num_tensors = 1;
- data1.tensors[0].tensor = uia_index[0];
- data1.tensors[0].size = 4;
+ status = ml_util_copy_tensor_data (data1, 0, uintarray1[0], 4);
+ EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_pipeline_src_input_data (srchandle, &data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
+ status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
EXPECT_EQ (status, ML_ERROR_NONE);
+ g_usleep (50000); /* 50ms. Wait a bit. */
- status = ml_pipeline_src_input_data (srchandle, &data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
+ status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
EXPECT_EQ (status, ML_ERROR_NONE);
+ g_usleep (50000); /* 50ms. Wait a bit. */
status = ml_pipeline_src_put_handle (srchandle);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (dim[3], 1U);
for (i = 0; i < 10; i++) {
- data1.num_tensors = 1;
- data1.tensors[0].tensor = uia_index[i];
- data1.tensors[0].size = 4;
- status = ml_pipeline_src_input_data (srchandle, &data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
+ status = ml_util_copy_tensor_data (data1, 0, uintarray1[i], 4);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_src_input_data (srchandle, data1, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_util_allocate_tensors_data (info, &data2);
EXPECT_EQ (status, ML_ERROR_NONE);
- data2.num_tensors = 1;
- data2.tensors[0].tensor = uintarray2[i];
- data2.tensors[0].size = 4;
- status = ml_pipeline_src_input_data (srchandle, &data2, ML_PIPELINE_BUF_POLICY_AUTO_FREE);
+ status = ml_util_copy_tensor_data (data2, 0, uintarray2[i], 4);
EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_src_input_data (srchandle, data2, ML_PIPELINE_BUF_POLICY_AUTO_FREE);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ g_usleep (50000); /* 50ms. Wait a bit. */
}
status = ml_pipeline_src_put_handle (srchandle);
g_free (pipeline);
- r = g_file_get_contents (file1, (gchar **) &content, &len, NULL);
- EXPECT_EQ (r, TRUE);
-
+ EXPECT_TRUE (g_file_get_contents (file1, (gchar **) &content, &len, NULL));
EXPECT_EQ (len, 8U * 11);
for (i = 0; i < 10; i++) {
- EXPECT_EQ (content[i * 8 + 0 + 8], i);
+ EXPECT_EQ (content[i * 8 + 0 + 8], i + 4);
EXPECT_EQ (content[i * 8 + 1 + 8], i + 1);
EXPECT_EQ (content[i * 8 + 2 + 8], i + 3);
EXPECT_EQ (content[i * 8 + 3 + 8], i + 2);
EXPECT_EQ (content[i * 8 + 4 + 8], i + 3);
EXPECT_EQ (content[i * 8 + 5 + 8], i + 2);
EXPECT_EQ (content[i * 8 + 6 + 8], i + 1);
- EXPECT_EQ (content[i * 8 + 7 + 8], i);
+ EXPECT_EQ (content[i * 8 + 7 + 8], i + 4);
}
g_free (content);
+ ml_util_destroy_tensors_info (info);
+ ml_util_destroy_tensors_data (data1);
}
/**
*/
TEST (nnstreamer_capi_src, failure_03)
{
- const int num_tensors = ML_TENSOR_SIZE_LIMIT + 1;
- const int num_dims = 4;
-
const char *pipeline = "appsrc name=srcx ! other/tensor,dimension=(string)4:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! tensor_sink";
ml_pipeline_h handle;
ml_pipeline_src_h srchandle;
- ml_tensors_data_s data;
-
- for (int i = 0; i < ML_TENSOR_SIZE_LIMIT; ++i) {
- data.tensors[i].tensor = g_malloc0 (sizeof (char) * num_dims);
- data.tensors[i].size = num_dims;
- }
+ ml_tensors_data_h data;
+ ml_tensors_info_h info;
int status = ml_pipeline_construct (pipeline, &handle);
EXPECT_EQ (status, ML_ERROR_NONE);
status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle);
EXPECT_EQ (status, ML_ERROR_NONE);
- /* null data */
- status = ml_pipeline_src_input_data (srchandle, NULL, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
- EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
+ status = ml_pipeline_src_get_tensors_info (srchandle, &info);
+ EXPECT_EQ (status, ML_ERROR_NONE);
- /* invalid number of tensors (max size) */
- data.num_tensors = num_tensors;
- status = ml_pipeline_src_input_data (srchandle, &data, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
- EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
+ status = ml_util_allocate_tensors_data (info, &data);
- /* invalid number of tensors (size is 0) */
- data.num_tensors = 0;
- status = ml_pipeline_src_input_data (srchandle, &data, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
+ /* null data */
+ status = ml_pipeline_src_input_data (srchandle, NULL, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
status = ml_pipeline_src_put_handle (srchandle);
status = ml_pipeline_destroy (handle);
EXPECT_EQ (status, ML_ERROR_NONE);
- for (int i = 0; i < ML_TENSOR_SIZE_LIMIT; ++i)
- g_free (data.tensors[i].tensor);
+ status = ml_util_destroy_tensors_data (data);
+ EXPECT_EQ (status, ML_ERROR_NONE);
}
/**
ml_single_h single;
ml_tensors_info_h in_info, out_info;
ml_tensors_info_h in_res, out_res;
- ml_tensors_data_s *input, *output1, *output2;
+ ml_tensors_data_h input, output;
ml_tensor_dimension in_dim, out_dim, res_dim;
ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN;
unsigned int count = 0;
EXPECT_TRUE (out_dim[2] == res_dim[2]);
EXPECT_TRUE (out_dim[3] == res_dim[3]);
- /* generate dummy data */
- input = ml_util_allocate_tensors_data (in_info);
- EXPECT_TRUE (input != NULL);
-
- status = ml_util_get_last_error ();
- EXPECT_EQ (status, ML_ERROR_NONE);
-
- output1 = ml_single_inference (single, input, NULL);
- EXPECT_TRUE (output1 != NULL);
-
- status = ml_util_get_last_error ();
- EXPECT_EQ (status, ML_ERROR_NONE);
-
- ml_util_free_tensors_data (&output1);
-
- output2 = ml_util_allocate_tensors_data (out_info);
- EXPECT_TRUE (output2 != NULL);
+ input = output = NULL;
- status = ml_util_get_last_error ();
+ /* generate dummy data */
+ status = ml_util_allocate_tensors_data (in_info, &input);
EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (input != NULL);
- output1 = ml_single_inference (single, input, output2);
- EXPECT_TRUE (output1 != NULL);
- EXPECT_TRUE (output1 == output2);
-
- status = ml_util_get_last_error ();
+ status = ml_single_inference (single, input, &output);
EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (output != NULL);
- ml_util_free_tensors_data (&output2);
- ml_util_free_tensors_data (&input);
+ ml_util_destroy_tensors_data (output);
+ ml_util_destroy_tensors_data (input);
status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);
{
ml_single_h single;
ml_tensors_info_h in_info, out_info;
- ml_tensors_data_s *input, *output1, *output2;
+ ml_tensors_data_h input, output;
ml_tensor_dimension in_dim, out_dim;
int status;
ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_NONE);
- /* generate dummy data */
- input = ml_util_allocate_tensors_data (in_info);
- EXPECT_TRUE (input != NULL);
-
- status = ml_util_get_last_error ();
- EXPECT_EQ (status, ML_ERROR_NONE);
-
- output1 = ml_single_inference (single, input, NULL);
- EXPECT_TRUE (output1 != NULL);
-
- status = ml_util_get_last_error ();
- EXPECT_EQ (status, ML_ERROR_NONE);
-
- ml_util_free_tensors_data (&output1);
+ input = output = NULL;
- output2 = ml_util_allocate_tensors_data (out_info);
- EXPECT_TRUE (output2 != NULL);
-
- status = ml_util_get_last_error ();
+ /* generate dummy data */
+ status = ml_util_allocate_tensors_data (in_info, &input);
EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (input != NULL);
- output1 = ml_single_inference (single, input, output2);
- EXPECT_TRUE (output1 != NULL);
- EXPECT_TRUE (output1 == output2);
-
- status = ml_util_get_last_error ();
+ status = ml_single_inference (single, input, &output);
EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (output != NULL);
- ml_util_free_tensors_data (&output2);
- ml_util_free_tensors_data (&input);
+ ml_util_destroy_tensors_data (output);
+ ml_util_destroy_tensors_data (input);
status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);
{
ml_single_h single;
ml_tensors_info_h in_info, out_info;
- ml_tensors_data_s *input, *output1, *output2;
+ ml_tensors_data_h input, output;
ml_tensor_dimension in_dim;
- int i, status;
+ int status;
+ unsigned int i;
+ void *data_ptr;
+ size_t data_size;
const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
gchar *test_model;
ML_NNFW_TYPE_CUSTOM_FILTER, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_NONE);
+ input = output = NULL;
+
/* generate input data */
- input = ml_util_allocate_tensors_data (in_info);
+ status = ml_util_allocate_tensors_data (in_info, &input);
+ EXPECT_EQ (status, ML_ERROR_NONE);
ASSERT_TRUE (input != NULL);
- EXPECT_TRUE (input->num_tensors == 2U);
for (i = 0; i < 10; i++) {
int16_t i16 = (int16_t) (i + 1);
float f32 = (float) (i + .1);
- ((int16_t *) input->tensors[0].tensor)[i] = i16;
- ((float *) input->tensors[1].tensor)[i] = f32;
- }
-
- status = ml_util_get_last_error ();
- EXPECT_EQ (status, ML_ERROR_NONE);
-
- output1 = ml_single_inference (single, input, NULL);
- EXPECT_TRUE (output1 != NULL);
-
- status = ml_util_get_last_error ();
- EXPECT_EQ (status, ML_ERROR_NONE);
-
- for (i = 0; i < 10; i++) {
- int16_t i16 = (int16_t) (i + 1);
- float f32 = (float) (i + .1);
+ status = ml_util_get_tensor_data (input, 0, &data_ptr, &data_size);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ ((int16_t *) data_ptr)[i] = i16;
- EXPECT_EQ (((int16_t *) output1->tensors[0].tensor)[i], i16);
- EXPECT_FLOAT_EQ (((float *) output1->tensors[1].tensor)[i], f32);
+ status = ml_util_get_tensor_data (input, 1, &data_ptr, &data_size);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ ((float *) data_ptr)[i] = f32;
}
- ml_util_free_tensors_data (&output1);
-
- output2 = ml_util_allocate_tensors_data (out_info);
- EXPECT_TRUE (output2 != NULL);
-
- status = ml_util_get_last_error ();
- EXPECT_EQ (status, ML_ERROR_NONE);
-
- output1 = ml_single_inference (single, input, output2);
- EXPECT_TRUE (output1 != NULL);
- EXPECT_TRUE (output1 == output2);
-
- status = ml_util_get_last_error ();
+ status = ml_single_inference (single, input, &output);
EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (output != NULL);
for (i = 0; i < 10; i++) {
int16_t i16 = (int16_t) (i + 1);
float f32 = (float) (i + .1);
- EXPECT_EQ (((int16_t *) output1->tensors[0].tensor)[i], i16);
- EXPECT_FLOAT_EQ (((float *) output1->tensors[1].tensor)[i], f32);
+ status = ml_util_get_tensor_data (output, 0, &data_ptr, &data_size);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (((int16_t *) data_ptr)[i], i16);
+
+ status = ml_util_get_tensor_data (output, 1, &data_ptr, &data_size);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_FLOAT_EQ (((float *) data_ptr)[i], f32);
}
- ml_util_free_tensors_data (&output2);
- ml_util_free_tensors_data (&input);
+ ml_util_destroy_tensors_data (output);
+ ml_util_destroy_tensors_data (input);
status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);
ml_single_h single;
ml_tensors_info_h in_info, out_info;
ml_tensors_info_h in_res, out_res;
- ml_tensors_data_s *input, *output;
+ ml_tensors_data_h input, output;
ml_tensor_dimension in_dim, out_dim, res_dim;
ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN;
unsigned int count = 0;
char *name = NULL;
int status, max_score_index;
float score, max_score;
+ void *data_ptr;
+ size_t data_size;
const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
gchar *test_model, *test_file;
EXPECT_TRUE (out_dim[2] == res_dim[2]);
EXPECT_TRUE (out_dim[3] == res_dim[3]);
+ input = output = NULL;
+
/* generate input data */
- input = ml_util_allocate_tensors_data (in_info);
+ status = ml_util_allocate_tensors_data (in_info, &input);
+ EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (input != NULL);
- status = ml_util_get_last_error ();
+ status = ml_util_copy_tensor_data (input, 0, contents, len);
EXPECT_EQ (status, ML_ERROR_NONE);
- memcpy (input->tensors[0].tensor, contents, len);
-
- output = ml_single_inference (single, input, NULL);
- EXPECT_TRUE (output != NULL);
-
- status = ml_util_get_last_error ();
+ status = ml_single_inference (single, input, &output);
EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (output != NULL);
/* check result (max score index is 2) */
- EXPECT_EQ (output->num_tensors, 1U);
+ status = ml_util_get_tensor_data (output, 1, &data_ptr, &data_size);
+ EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
+
+ status = ml_util_get_tensor_data (output, 0, &data_ptr, &data_size);
+ EXPECT_EQ (status, ML_ERROR_NONE);
max_score = .0;
max_score_index = 0;
for (gint i = 0; i < 12; i++) {
- score = ((float *) output->tensors[0].tensor)[i];
+ score = ((float *) data_ptr)[i];
if (score > max_score) {
max_score = score;
max_score_index = i;
EXPECT_EQ (max_score_index, 2);
- ml_util_free_tensors_data (&output);
- ml_util_free_tensors_data (&input);
+ ml_util_destroy_tensors_data (output);
+ ml_util_destroy_tensors_data (input);
status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);