Code clean, use GstTensorMemory struct defined in nnstreamer.
Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
status);
} else {
for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) {
- if (_data->tensors[i].tensor) {
- g_free (_data->tensors[i].tensor);
- _data->tensors[i].tensor = NULL;
+ if (_data->tensors[i].data) {
+ g_free (_data->tensors[i].data);
+ _data->tensors[i].data = NULL;
}
}
}
ml_tensor_info_s *_tensor_info = ml_tensors_info_get_nth_info (_info, i);
_data->tensors[i].size =
_ml_tensor_info_get_size (_tensor_info, _info->is_extended);
- _data->tensors[i].tensor = NULL;
+ _data->tensors[i].data = NULL;
}
G_UNLOCK_UNLESS_NOLOCK (*_info);
}
_data->num_tensors = data_src->num_tensors;
memcpy (_data->tensors, data_src->tensors,
- sizeof (ml_tensor_data_s) * data_src->num_tensors);
+ sizeof (GstTensorMemory) * data_src->num_tensors);
*data = _data;
G_UNLOCK_UNLESS_NOLOCK (*_data);
_out = (ml_tensors_data_s *) (*out);
for (i = 0; i < _out->num_tensors; ++i) {
- memcpy (_out->tensors[i].tensor, _in->tensors[i].tensor,
- _in->tensors[i].size);
+ memcpy (_out->tensors[i].data, _in->tensors[i].data, _in->tensors[i].size);
}
error:
}
for (i = 0; i < _data->num_tensors; i++) {
- _data->tensors[i].tensor = g_malloc0 (_data->tensors[i].size);
- if (_data->tensors[i].tensor == NULL) {
+ _data->tensors[i].data = g_malloc0 (_data->tensors[i].size);
+ if (_data->tensors[i].data == NULL) {
goto failed_oom;
}
}
goto report;
}
- *raw_data = _data->tensors[index].tensor;
+ *raw_data = _data->tensors[index].data;
*data_size = _data->tensors[index].size;
report:
goto report;
}
- if (_data->tensors[index].tensor != raw_data)
- memcpy (_data->tensors[index].tensor, raw_data, data_size);
+ if (_data->tensors[index].data != raw_data)
+ memcpy (_data->tensors[index].data, raw_data, data_size);
report:
G_UNLOCK_UNLESS_NOLOCK (*_data);
goto error;
}
- _data->tensors[i].tensor = map[i].data;
+ _data->tensors[i].data = map[i].data;
_data->tensors[i].size = map[i].size;
}
gst_tensor_meta_info_convert (&meta,
gst_tensors_info_get_nth_info (&gst_info, i));
- _data->tensors[i].tensor = map[i].data + hsize;
+ _data->tensors[i].data = map[i].data + hsize;
_data->tensors[i].size = map[i].size - hsize;
}
} else {
for (i = 0; i < _data->num_tensors; i++) {
GstTensorInfo *_gst_tensor_info =
gst_tensors_info_get_nth_info (&gst_info, i);
- mem_data = _data->tensors[i].tensor;
+ mem_data = _data->tensors[i].data;
mem_size = _data->tensors[i].size;
mem = tmp = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
_data = (ml_tensors_data_s *) in_data;
for (i = 0; i < _data->num_tensors; i++)
- _data->tensors[i].tensor = in[i].data;
+ _data->tensors[i].data = in[i].data;
status = _ml_tensors_data_create_no_alloc (c->out_info, &out_data);
if (status != ML_ERROR_NONE) {
_data = (ml_tensors_data_s *) out_data;
for (i = 0; i < _data->num_tensors; i++)
- _data->tensors[i].tensor = out[i].data;
+ _data->tensors[i].data = out[i].data;
/* call invoke callback */
status = c->cb (in_data, out_data, c->pdata);
_data = (ml_tensors_data_s *) in_data;
for (i = 0; i < _data->num_tensors; i++)
- _data->tensors[i].tensor = input[i].data;
+ _data->tensors[i].data = input[i].data;
/* call invoke callback */
g_mutex_lock (&c->lock);
in_tensors->num_tensors = single_h->in_info.num_tensors;
for (i = 0; i < in_tensors->num_tensors; i++) {
/** memory will be allocated by tensor_filter_single */
- in_tensors->tensors[i].tensor = NULL;
+ in_tensors->tensors[i].data = NULL;
in_tensors->tensors[i].size =
gst_tensors_info_get_size (&single_h->in_info, i);
}
out_tensors->num_tensors = single_h->out_info.num_tensors;
for (i = 0; i < out_tensors->num_tensors; i++) {
/** memory will be allocated by tensor_filter_single */
- out_tensors->tensors[i].tensor = NULL;
+ out_tensors->tensors[i].data = NULL;
out_tensors->tensors[i].size =
gst_tensors_info_get_size (&single_h->out_info, i);
}
if (G_LIKELY (single_h->filter)) {
if (single_h->klass->allocate_in_invoke (single_h->filter)) {
- single_h->klass->destroy_notify (single_h->filter,
- (GstTensorMemory *) data->tensors);
+ single_h->klass->destroy_notify (single_h->filter, data->tensors);
}
}
{
ml_tensors_data_s *in_data, *out_data;
int status = ML_ERROR_NONE;
- GstTensorMemory *in_tensors, *out_tensors;
in_data = (ml_tensors_data_s *) in;
out_data = (ml_tensors_data_s *) out;
return ML_ERROR_STREAMS_PIPE;
}
- in_tensors = (GstTensorMemory *) in_data->tensors;
- out_tensors = (GstTensorMemory *) out_data->tensors;
-
/* Invoke the thread. */
- if (!single_h->klass->invoke (single_h->filter, in_tensors, out_tensors,
- single_h->free_output)) {
+ if (!single_h->klass->invoke (single_h->filter, in_data->tensors,
+ out_data->tensors, single_h->free_output)) {
const char *fw_name = _ml_get_nnfw_subplugin_name (single_h->nnfw);
_ml_error_report
("Failed to invoke the tensors. The invoke callback of the tensor-filter subplugin '%s' has failed. Please contact the author of tensor-filter-%s (nnstreamer-%s) or review its source code. Note that this usually happens when the designated framework does not support the given model (e.g., trying to run tf-lite 2.6 model with tf-lite 1.13).",
_model->num_tensors);
for (i = 0; i < _data->num_tensors; i++) {
- if (G_UNLIKELY (!_data->tensors[i].tensor))
+ if (G_UNLIKELY (!_data->tensors[i].data))
_ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
"The %d-th input tensor is not valid. There is no valid dimension metadata for this tensor.",
i);
#include <glib.h>
#include <ml-api-common.h>
+#include <tensor_typedef.h>
#ifdef __cplusplus
extern "C" {
typedef int (*ml_handle_destroy_cb) (void *handle, void *user_data);
/**
- * @brief An instance of a single input or output frame.
- * @since_tizen 5.5
- */
-typedef struct {
- void *tensor; /**< The instance of tensor data. */
- size_t size; /**< The size of tensor. */
-} ml_tensor_data_s;
-
-/**
* @brief An instance of input or output frames. #ml_tensors_info_h is the handle for tensors metadata.
* @since_tizen 5.5
*/
typedef struct {
unsigned int num_tensors; /**< The number of tensors. */
- ml_tensor_data_s tensors[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor data. NULL for unused tensors. */
+ GstTensorMemory tensors[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor data. NULL for unused tensors. */
/* private */
ml_tensors_info_h info;
_copied_data_s = (ml_tensors_data_s *) copied_data;
for (i = 0; i < count; ++i) {
- memcpy (_copied_data_s->tensors[i].tensor, data_s->tensors[i].tensor,
+ memcpy (_copied_data_s->tensors[i].data, data_s->tensors[i].data,
data_s->tensors[i].size);
}
jobject tensor = (*env)->GetObjectArrayElement (env, data_arr, i);
gpointer data_ptr = (*env)->GetDirectBufferAddress (env, tensor);
- memcpy (data_ptr, data->tensors[i].tensor, data->tensors[i].size);
+ memcpy (data_ptr, data->tensors[i].data, data->tensors[i].size);
(*env)->DeleteLocalRef (env, tensor);
}
gpointer data_ptr = (*env)->GetDirectBufferAddress (env, tensor);
if (clone) {
- if (data->tensors[i].tensor == NULL)
- data->tensors[i].tensor = g_malloc (data_size);
+ if (data->tensors[i].data == NULL)
+ data->tensors[i].data = g_malloc (data_size);
- memcpy (data->tensors[i].tensor, data_ptr, data_size);
+ memcpy (data->tensors[i].data, data_ptr, data_size);
} else {
- data->tensors[i].tensor = data_ptr;
+ data->tensors[i].data = data_ptr;
}
data->tensors[i].size = data_size;