}
/**
+ * @brief Allocates memory in given tensors_info for extra tensor infos.
+ */
+gboolean
+_ml_tensors_info_create_extra (ml_tensors_info_s * ml_info)
+{
+ ml_tensor_info_s *new;
+ guint i;
+
+ if (!ml_info)
+ _ml_error_report_return (FALSE, "The parameter, ml_info, is NULL.");
+
+ if (ml_info->extra) {
+ return TRUE;
+ }
+
+ new = g_try_new0 (ml_tensor_info_s, ML_TENSOR_SIZE_EXTRA_LIMIT);
+ if (!new) {
+ _ml_loge ("Failed to allocate memory for extra tensors info.");
+ return FALSE;
+ }
+
+ for (i = 0; i < ML_TENSOR_SIZE_EXTRA_LIMIT; i++) {
+ if (_ml_tensor_info_initialize (&new[i]) != ML_ERROR_NONE) {
+ _ml_loge ("Failed to initialize extra tensors info.");
+ g_free (new);
+ return FALSE;
+ }
+ }
+
+ ml_info->extra = new;
+
+ return TRUE;
+}
+
+/**
+ * @brief Initializes given tensor_info with default value.
+ */
+int
+_ml_tensor_info_initialize (ml_tensor_info_s * info)
+{
+ guint i;
+
+ if (!info)
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, info, is NULL. Provide a valid pointer.");
+
+ info->name = NULL;
+ info->type = ML_TENSOR_TYPE_UNKNOWN;
+ for (i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
+ info->dimension[i] = 0;
+ }
+
+ return ML_ERROR_NONE;
+}
+
+/**
* @brief Initializes the tensors information with default value.
*/
int
_ml_tensors_info_initialize (ml_tensors_info_s * info)
{
- guint i, j;
+ guint i;
if (!info)
_ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
info->num_tensors = 0;
- for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) {
- info->info[i].name = NULL;
- info->info[i].type = ML_TENSOR_TYPE_UNKNOWN;
-
- for (j = 0; j < ML_TENSOR_RANK_LIMIT; j++) {
- info->info[i].dimension[j] = 0;
- }
+ for (i = 0; i < ML_TENSOR_SIZE_LIMIT_STATIC; i++) {
+ _ml_tensor_info_initialize (&info->info[i]);
}
+ info->extra = NULL;
+
return ML_ERROR_NONE;
}
/**
+ * @brief Get the pointer of nth tensor info.
+ */
+ml_tensor_info_s *
+ml_tensors_info_get_nth_info (ml_tensors_info_s * info, guint nth)
+{
+ if (!info)
+ return NULL;
+
+ if (nth >= ML_TENSOR_SIZE_LIMIT) {
+ _ml_loge ("The given nth is out of range. It should be less than %d.",
+ ML_TENSOR_SIZE_LIMIT);
+ return NULL;
+ }
+
+ if (nth < ML_TENSOR_SIZE_LIMIT_STATIC)
+ return &info->info[nth];
+
+ if (!_ml_tensors_info_create_extra (info))
+ return NULL;
+
+ return &info->extra[nth - ML_TENSOR_SIZE_LIMIT_STATIC];
+}
+
+/**
* @brief Compares the given tensor info.
*/
static gboolean
}
for (i = 0; i < info->num_tensors; i++) {
- if (!ml_tensor_info_validate (&info->info[i], info->is_extended))
+ ml_tensor_info_s *tensor_info =
+ ml_tensors_info_get_nth_info ((ml_tensors_info_s *) info, i);
+ if (!ml_tensor_info_validate (tensor_info, info->is_extended))
goto done;
}
goto done;
for (i = 0; i < i1->num_tensors; i++) {
- if (!ml_tensor_info_compare (&i1->info[i], &i2->info[i], i1->is_extended))
+ ml_tensor_info_s *ti1 = ml_tensors_info_get_nth_info (i1, i);
+ ml_tensor_info_s *ti2 = ml_tensors_info_get_nth_info (i2, i);
+ if (!ml_tensor_info_compare (ti1, ti2, i1->is_extended))
goto done;
}
"The parameter, info, is NULL. It should be a valid ml_tensors_info_h handle, which is usually created by ml_tensors_info_create().");
if (count > ML_TENSOR_SIZE_LIMIT || count == 0)
_ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
- "The parameter, count, is the number of tensors, which should be between 1 and 16. The given count is %u.",
- count);
+ "The parameter, count, is the number of tensors, which should be between 1 and %d. The given count is %u.",
+ ML_TENSOR_SIZE_LIMIT, count);
tensors_info = (ml_tensors_info_s *) info;
unsigned int index, const char *name)
{
ml_tensors_info_s *tensors_info;
+ ml_tensor_info_s *_tensor_info;
check_feature_state (ML_FEATURE);
tensors_info->num_tensors, index);
}
- if (tensors_info->info[index].name) {
- g_free (tensors_info->info[index].name);
- tensors_info->info[index].name = NULL;
+ _tensor_info = ml_tensors_info_get_nth_info (tensors_info, index);
+ if (!_tensor_info) {
+ G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
+ if (_tensor_info->name) {
+ g_free (_tensor_info->name);
+ _tensor_info->name = NULL;
}
if (name)
- tensors_info->info[index].name = g_strdup (name);
+ _tensor_info->name = g_strdup (name);
G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
return ML_ERROR_NONE;
unsigned int index, char **name)
{
ml_tensors_info_s *tensors_info;
+ ml_tensor_info_s *_tensor_info;
check_feature_state (ML_FEATURE);
tensors_info->num_tensors, index);
}
- *name = g_strdup (tensors_info->info[index].name);
+ _tensor_info = ml_tensors_info_get_nth_info (tensors_info, index);
+ if (!_tensor_info) {
+ G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
+ *name = g_strdup (_tensor_info->name);
+
G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
return ML_ERROR_NONE;
unsigned int index, const ml_tensor_type_e type)
{
ml_tensors_info_s *tensors_info;
+ ml_tensor_info_s *_tensor_info;
check_feature_state (ML_FEATURE);
return ML_ERROR_INVALID_PARAMETER;
}
- tensors_info->info[index].type = type;
+ _tensor_info = ml_tensors_info_get_nth_info (tensors_info, index);
+ if (!_tensor_info) {
+ G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
+ _tensor_info->type = type;
G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
return ML_ERROR_NONE;
unsigned int index, ml_tensor_type_e * type)
{
ml_tensors_info_s *tensors_info;
+ ml_tensor_info_s *_tensor_info;
check_feature_state (ML_FEATURE);
return ML_ERROR_INVALID_PARAMETER;
}
- *type = tensors_info->info[index].type;
+ _tensor_info = ml_tensors_info_get_nth_info (tensors_info, index);
+ if (!_tensor_info) {
+ G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+ *type = _tensor_info->type;
G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
return ML_ERROR_NONE;
unsigned int index, const ml_tensor_dimension dimension)
{
ml_tensors_info_s *tensors_info;
+ ml_tensor_info_s *_tensor_info;
guint i;
check_feature_state (ML_FEATURE);
tensors_info->num_tensors, index, index);
}
+ _tensor_info = ml_tensors_info_get_nth_info (tensors_info, index);
+ if (!_tensor_info) {
+ G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
for (i = 0; i < ML_TENSOR_RANK_LIMIT_PREV; i++) {
- tensors_info->info[index].dimension[i] = dimension[i];
+ _tensor_info->dimension[i] = dimension[i];
}
for (i = ML_TENSOR_RANK_LIMIT_PREV; i < ML_TENSOR_RANK_LIMIT; i++) {
- tensors_info->info[index].dimension[i] =
- (tensors_info->is_extended ? dimension[i] : 1);
+ _tensor_info->dimension[i] = (tensors_info->is_extended ? dimension[i] : 1);
}
G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
unsigned int index, ml_tensor_dimension dimension)
{
ml_tensors_info_s *tensors_info;
+ ml_tensor_info_s *_tensor_info;
guint i, valid_rank = ML_TENSOR_RANK_LIMIT;
check_feature_state (ML_FEATURE);
return ML_ERROR_INVALID_PARAMETER;
}
+ _tensor_info = ml_tensors_info_get_nth_info (tensors_info, index);
+ if (!_tensor_info) {
+ G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
if (!tensors_info->is_extended)
valid_rank = ML_TENSOR_RANK_LIMIT_PREV;
for (i = 0; i < valid_rank; i++) {
- dimension[i] = tensors_info->info[index].dimension[i];
+ dimension[i] = _tensor_info->dimension[i];
}
G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
int index, size_t *data_size)
{
ml_tensors_info_s *tensors_info;
+ ml_tensor_info_s *_tensor_info;
check_feature_state (ML_FEATURE);
/* get total byte size */
for (i = 0; i < tensors_info->num_tensors; i++) {
+ _tensor_info = ml_tensors_info_get_nth_info (tensors_info, i);
+ if (!_tensor_info) {
+ G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
*data_size +=
- _ml_tensor_info_get_size (&tensors_info->info[i],
- tensors_info->is_extended);
+ _ml_tensor_info_get_size (_tensor_info, tensors_info->is_extended);
}
} else {
+ _tensor_info = ml_tensors_info_get_nth_info (tensors_info, index);
+ if (!_tensor_info) {
+ G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
+ return ML_ERROR_INVALID_PARAMETER;
+ }
if (tensors_info->num_tensors <= index) {
G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
_ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
}
*data_size =
- _ml_tensor_info_get_size (&tensors_info->info[index],
- tensors_info->is_extended);
+ _ml_tensor_info_get_size (_tensor_info, tensors_info->is_extended);
}
G_UNLOCK_UNLESS_NOLOCK (*tensors_info);
if (!info)
return;
- for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) {
+ for (i = 0; i < ML_TENSOR_SIZE_LIMIT_STATIC; i++) {
if (info->info[i].name) {
g_free (info->info[i].name);
}
}
+ if (info->extra) {
+ for (i = 0; i < ML_TENSOR_SIZE_EXTRA_LIMIT; i++) {
+ if (info->extra[i].name) {
+ g_free (info->extra[i].name);
+ }
+ }
+ }
+
+ g_free (info->extra);
+
_ml_tensors_info_initialize (info);
}
G_LOCK_UNLESS_NOLOCK (*_info);
_data->num_tensors = _info->num_tensors;
for (i = 0; i < _data->num_tensors; i++) {
+ ml_tensor_info_s *_tensor_info = ml_tensors_info_get_nth_info (_info, i);
_data->tensors[i].size =
- _ml_tensor_info_get_size (&_info->info[i], _info->is_extended);
+ _ml_tensor_info_get_size (_tensor_info, _info->is_extended);
_data->tensors[i].tensor = NULL;
}
G_UNLOCK_UNLESS_NOLOCK (*_info);
}
if (!valid) {
_ml_error_report
- ("The parameter, src, is a ml_tensors_info_h handle without valid data. Every tensor-info of tensors-info should have a valid type and dimension information and the number of tensors should be between 1 and 16.");
+ ("The parameter, src, is a ml_tensors_info_h handle without valid data. Every tensor-info of tensors-info should have a valid type and dimension information and the number of tensors should be between 1 and %d.",
+ ML_TENSOR_SIZE_LIMIT);
status = ML_ERROR_INVALID_PARAMETER;
goto done;
}
dest_info->is_extended = src_info->is_extended;
for (i = 0; i < dest_info->num_tensors; i++) {
- dest_info->info[i].name =
- (src_info->info[i].name) ? g_strdup (src_info->info[i].name) : NULL;
- dest_info->info[i].type = src_info->info[i].type;
+ ml_tensor_info_s *dest_tensor_info =
+ ml_tensors_info_get_nth_info (dest_info, i);
+ ml_tensor_info_s *src_tensor_info =
+ ml_tensors_info_get_nth_info (src_info, i);
+
+ if (!dest_tensor_info || !src_tensor_info) {
+ _ml_error_report
+ ("Cannot get the %u'th tensor info from src or dest. Maybe src or dest is not valid or its internal data is not consistent.",
+ i);
+ status = ML_ERROR_INVALID_PARAMETER;
+ goto done;
+ }
+
+ dest_tensor_info->name =
+ (src_tensor_info->name) ? g_strdup (src_tensor_info->name) : NULL;
+ dest_tensor_info->type = src_tensor_info->type;
for (j = 0; j < ML_TENSOR_RANK_LIMIT; j++) {
- dest_info->info[i].dimension[j] = src_info->info[i].dimension[j];
+ dest_tensor_info->dimension[j] = src_tensor_info->dimension[j];
}
}
{
int i, j;
for (i = 0; i < gst_info->num_tensors; i++) {
+ GstTensorInfo *_gst_tensor_info =
+ gst_tensors_info_get_nth_info ((GstTensorsInfo *) gst_info, i);
for (j = ML_TENSOR_RANK_LIMIT_PREV; j < NNS_TENSOR_RANK_LIMIT; j++) {
- if (gst_info->info[i].dimension[j] != 1)
+ if (_gst_tensor_info->dimension[j] != 1)
return TRUE;
}
}
ml_info->num_tensors = gst_info->num_tensors;
ml_info->is_extended = gst_info_is_extended (gst_info);
+ if (gst_info->extra) {
+ /* create ml_info_extra in ml_tensors_info_s */
+ _ml_tensors_info_create_extra (ml_info);
+ }
for (i = 0; i < gst_info->num_tensors; i++) {
- /* Copy name string */
- if (gst_info->info[i].name) {
- ml_info->info[i].name = g_strdup (gst_info->info[i].name);
+ GstTensorInfo *_gst_tensor_info =
+ gst_tensors_info_get_nth_info ((GstTensorsInfo *) gst_info, i);
+ ml_tensor_info_s *_ml_tensor_info =
+ ml_tensors_info_get_nth_info (ml_info, i);
+ if (!_gst_tensor_info) {
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, gst_info, is invalid. It should be a valid GstTensorsInfo instance. This is probably an internal bug of ML API.");
}
- ml_info->info[i].type =
- convert_ml_tensor_type_from (gst_info->info[i].type);
+ if (!_ml_tensor_info) {
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, ml_info, is invalid. It should be a valid ml_tensors_info_s instance, usually created by ml_tensors_info_create(). This is probably an internal bug of ML API.");
+ }
+
+ if (_gst_tensor_info->name) {
+ _ml_tensor_info->name = g_strdup (_gst_tensor_info->name);
+ }
+
+ _ml_tensor_info->type =
+ convert_ml_tensor_type_from (_gst_tensor_info->type);
/* Set dimension */
for (j = 0; j < max_dim; j++) {
- ml_info->info[i].dimension[j] = gst_info->info[i].dimension[j];
+ _ml_tensor_info->dimension[j] = _gst_tensor_info->dimension[j];
}
for (; j < ML_TENSOR_RANK_LIMIT; j++) {
- ml_info->info[i].dimension[j] = 1;
+ _ml_tensor_info->dimension[j] = 1U;
}
if (!ml_info->is_extended) {
for (j = ML_TENSOR_RANK_LIMIT_PREV; j < ML_TENSOR_RANK_LIMIT; j++) {
- ml_info->info[i].dimension[j] = 1;
+ _ml_tensor_info->dimension[j] = 1U;
}
}
}
gst_info->num_tensors = ml_info->num_tensors;
for (i = 0; i < ml_info->num_tensors; i++) {
+ ml_tensor_info_s *_ml_tensor_info =
+ ml_tensors_info_get_nth_info ((ml_tensors_info_s *) ml_info, i);
+ GstTensorInfo *_gst_tensor_info =
+ gst_tensors_info_get_nth_info (gst_info, i);
+
/* Copy name string */
- if (ml_info->info[i].name) {
- gst_info->info[i].name = g_strdup (ml_info->info[i].name);
+ if (_ml_tensor_info->name) {
+ _gst_tensor_info->name = g_strdup (_ml_tensor_info->name);
}
- gst_info->info[i].type = convert_tensor_type_from (ml_info->info[i].type);
+ /* Copy type */
+ _gst_tensor_info->type = convert_tensor_type_from (_ml_tensor_info->type);
/* Set dimension */
for (j = 0; j < max_dim; j++) {
- gst_info->info[i].dimension[j] = ml_info->info[i].dimension[j];
+ _gst_tensor_info->dimension[j] = _ml_tensor_info->dimension[j];
}
for (; j < NNS_TENSOR_RANK_LIMIT; j++) {
- gst_info->info[i].dimension[j] = 1;
+ _gst_tensor_info->dimension[j] = 1;
}
if (!ml_info->is_extended) {
for (j = ML_TENSOR_RANK_LIMIT_PREV; j < NNS_TENSOR_RANK_LIMIT; j++) {
- gst_info->info[i].dimension[j] = 1;
+ _gst_tensor_info->dimension[j] = 1;
}
}
}