Fix svace issue, prevent index error case.
Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
guint num_tensors;
gboolean is_flexible;
GstTensorMetaInfo meta;
- GstTensorsConfig pbd_config;
+ GstTensorInfo *_info;
if (!config || !input || !outbuf) {
ml_loge ("NULL parameter is passed to tensor_decoder::protobuf");
return GST_FLOW_ERROR;
}
- gst_tensors_config_copy (&pbd_config, config);
- is_flexible = gst_tensors_config_is_flexible (&pbd_config);
- num_tensors = pbd_config.info.num_tensors;
+ is_flexible = gst_tensors_config_is_flexible (config);
+
+ num_tensors = config->info.num_tensors;
if (num_tensors <= 0 || num_tensors > NNS_TENSOR_SIZE_LIMIT) {
ml_loge ("The number of input tenosrs "
"exceeds more than NNS_TENSOR_SIZE_LIMIT, %s",
return GST_FLOW_ERROR;
}
- fr->set_rate_n (pbd_config.rate_n);
- fr->set_rate_d (pbd_config.rate_d);
+ fr->set_rate_n (config->rate_n);
+ fr->set_rate_d (config->rate_d);
tensors.set_format (
- (nnstreamer::protobuf::Tensors::Tensor_format) pbd_config.info.format);
+ (nnstreamer::protobuf::Tensors::Tensor_format) config->info.format);
for (unsigned int i = 0; i < num_tensors; ++i) {
nnstreamer::protobuf::Tensor *tensor = tensors.add_tensor ();
- gchar *name = NULL;
+
+ _info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) &config->info, i);
if (is_flexible) {
gst_tensor_meta_info_parse_header (&meta, input[i].data);
- gst_tensor_meta_info_convert (&meta, &pbd_config.info.info[i]);
- }
- name = pbd_config.info.info[i].name;
-
- if (name == NULL) {
- tensor->set_name ("");
- } else {
- tensor->set_name (name);
+ gst_tensor_meta_info_convert (&meta, _info);
}
- tensor->set_type (
- (nnstreamer::protobuf::Tensor::Tensor_type) pbd_config.info.info[i].type);
+ tensor->set_name (_info->name ? _info->name : "");
+ tensor->set_type ((nnstreamer::protobuf::Tensor::Tensor_type) _info->type);
for (int j = 0; j < NNS_TENSOR_RANK_LIMIT; ++j) {
- tensor->add_dimension (pbd_config.info.info[i].dimension[j]);
+ tensor->add_dimension (_info->dimension[j]);
}
tensor->set_data (input[i].data, (int) input[i].size);
_info->dimension[j] = dim[j].AsInt32 ();
}
flexbuffers::Blob tensor_data = tensor[3].AsBlob ();
- mem_size = gst_tensor_info_get_size (&config->info.info[i]);
+ mem_size = gst_tensor_info_get_size (_info);
if (gst_tensors_config_is_flexible (config)) {
GstTensorMetaInfo meta;
gst_tensor_meta_info_parse_header (&meta, (gpointer) tensor_data.data ());
GstTensorMetaInfo meta;
gpointer h = map.data + offset;
- if (num >= NNS_TENSOR_SIZE_LIMIT - 1) {
+ if (num >= NNS_TENSOR_MEMORY_MAX - 1) {
/* Suppose remained memory may include extra tensors. */
mem_size[num++] = total - offset;
break;
TEST (commonTensorInfo, size01_p)
{
GstTensorsInfo info1, info2;
+ GstTensorInfo *_info;
gsize size1, size2;
guint i;
size1 = 0;
for (i = 0; i < info2.num_tensors; i++) {
- size1 += gst_tensor_info_get_size (&info2.info[i]);
+ _info = gst_tensors_info_get_nth_info (&info2, i);
+ size1 += gst_tensor_info_get_size (_info);
}
size2 = gst_tensors_info_get_size (&info2, -1);
guint mem_size;
gpointer mem_data;
guint *received = (guint *) data;
+ GstTensorInfo *_info;
if (!in_buf || !config)
return NULL;
for (guint i = 0; i < config->info.num_tensors; i++) {
gchar *tensor_key = g_strdup_printf ("tensor_%d", i);
flexbuffers::Vector tensor = tensors[tensor_key].AsVector ();
- config->info.info[i].name = g_strdup (tensor[0].AsString ().c_str ());
- config->info.info[i].type = (tensor_type) tensor[1].AsInt32 ();
+ flexbuffers::String _name = tensor[0].AsString ();
+ const gchar *name = _name.c_str ();
+
+ _info = gst_tensors_info_get_nth_info (&config->info, i);
+
+ _info->name = (name && strlen (name) > 0) ? g_strdup (name) : NULL;
+ _info->type = (tensor_type) tensor[1].AsInt32 ();
flexbuffers::TypedVector dim = tensor[2].AsTypedVector ();
for (guint j = 0; j < NNS_TENSOR_RANK_LIMIT; j++) {
- config->info.info[i].dimension[j] = dim[j].AsInt32 ();
+ _info->dimension[j] = dim[j].AsInt32 ();
}
flexbuffers::Blob tensor_data = tensor[3].AsBlob ();
mem_size = tensor_data.size ();
const GstTensorsInfo * in_info, GstTensorsInfo * out_info)
{
unsigned int i, t;
+ GstTensorInfo *_in, *_out;
+
UNUSED (prop);
UNUSED (private_data);
out_info->num_tensors = in_info->num_tensors;
for (t = 0; t < in_info->num_tensors; t++) {
+ _in = gst_tensors_info_get_nth_info ((GstTensorsInfo *) in_info, t);
+ _out = gst_tensors_info_get_nth_info (out_info, t);
+
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
- out_info->info[t].dimension[i] = in_info->info[t].dimension[i];
+ _out->dimension[i] = _in->dimension[i];
}
- out_info->info[t].type = in_info->info[t].type;
+ _out->type = _in->type;
}
return 0;
assert (output);
for (t = 0; t < prop->output_meta.num_tensors; t++) {
- size = gst_tensor_info_get_size (&prop->output_meta.info[t]);
+ size = gst_tensors_info_get_size (&prop->output_meta, t);
assert (input[t].data != output[t].data);
memcpy (output[t].data, input[t].data, size);
#include <NvInferRuntimeCommon.h>
#include <cuda_runtime_api.h>
+/**
+ * @brief Min rank in this example (TensorRT uses the NCHW data format).
+ */
+#define MIN_RANK (4)
+
using Severity = nvinfer1::ILogger::Severity;
/** @brief a global object of ILogger */
if (prop->custom_properties && strlen (prop->custom_properties) > 0) {
gchar **strv = g_strsplit (prop->custom_properties, ":", -1);
- gsize i;
+ guint i;
- if (g_strv_length (strv) != NNS_TENSOR_RANK_LIMIT - 1) {
+ if (g_strv_length (strv) != MIN_RANK - 1) {
g_critical ("Please specify a proper 'custom' property");
goto err;
}
info.num_tensors = 1;
- for (i = 0; i < NNS_TENSOR_RANK_LIMIT - 1; i++) {
+ for (i = 0; i < MIN_RANK - 1; i++) {
info.info[0].type = _NNS_FLOAT32;
info.info[0].dimension[i] = (int) g_ascii_strtoll (strv[i], NULL, 10);
}
- info.info[0].dimension[NNS_TENSOR_RANK_LIMIT - 1] = 1;
+ info.info[0].dimension[MIN_RANK - 1] = 1;
g_strfreev (strv);
} else {
gst_tensor_parse_dimension ("3:4:2:2", config.info.info[1].dimension);
for (i = 0; i < config.info.num_tensors; i++) {
- input[i].size = gst_tensor_info_get_size (&config.info.info[i]);
+ input[i].size = gst_tensors_info_get_size (&config.info, i);
input[i].data = g_malloc0 (input[0].size);
memcpy (input[i].data, aggr_test_frames[i], input[i].size);
}
const GstTensorsInfo * in_info, GstTensorsInfo * out_info)
{{
_{sname}_data *data = _data;
+ GstTensorInfo *_in, *_out;
int i, j;
assert (data);
/** @todo Configure the name/type/dimension of tensors in a output frame. */
for (i = 0; i < out_info->num_tensors; i++) {{
- out_info->info[i].name = NULL; /** Optional, default is null. Set new memory for tensor name string. */
- out_info->info[i].type = in_info->info[i].type;
+ _in = gst_tensors_info_get_nth_info ((GstTensorsInfo *) in_info, i);
+ _out = gst_tensors_info_get_nth_info (out_info, i);
+
+ _out->name = NULL; /** Optional, default is null. Set new memory for tensor name string. */
+ _out->type = _in->type;
for (j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
- out_info->info[i].dimension[j] = in_info->info[i].dimension[j];
+ _out->dimension[j] = _in->dimension[j];
}}
return 0;
/** Allocate output buffer */
for (i = 0; i < out_info->num_tensors; i++)
- output[i].data = malloc (gst_tensor_info_get_size (&out_info->info[i]));
+ output[i].data = malloc (gst_tensors_info_get_size (out_info, i));
/** @todo Add your inference code/calls. Fill in the output buffer */
for (i = 0; i < out_info->num_tensors; i++) {{
- int s, size = gst_tensor_info_get_size (&out_info->info[i]);
+ int s, size = gst_tensors_info_get_size (out_info, i);
uint8_t *ptr = output[i].data;
for (s = 0; s < size; s++)
ptr[s] = (uint8_t) s;
/** @todo Add your inference code/calls. Fill in the output buffer */
for (i = 0; i < out_info->num_tensors; i++) {{
- int s, size = gst_tensor_info_get_size (&out_info->info[i]);
+ int s, size = gst_tensors_info_get_size (out_info, i);
uint8_t *ptr = output[i].data;
for (s = 0; s < size; s++)
ptr[s] = (uint8_t) s;