Use util function to get nth info from tensors-info struct.
Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
for (i = 0; i < nnfw_info->num_tensors; i++) {
const nnfw_tensorinfo *ninfo = &nnfw_info->info[i];
- GstTensorInfo *ginfo = &gst_info->info[i];
+ GstTensorInfo *ginfo = gst_tensors_info_get_nth_info (gst_info, i);
gint idx;
if (ninfo->rank > NNS_TENSOR_RANK_LIMIT)
struct nnfw_tensorinfo nnfw_info;
gint err;
gint idx;
- const GstTensorInfo *info = &tensors_info->info[tensor_idx];
+ GstTensorInfo *info;
+
+ info = gst_tensors_info_get_nth_info ((GstTensorsInfo *) tensors_info,
+ tensor_idx);
+ if (!info)
+ return -EINVAL;
err = nnfw_tensor_type_from_gst (info->type, &nnfw_info.dtype);
if (err)
void
TensorFilterTRIxEngine::configure_instance (const GstTensorFilterProperties *prop)
{
+ GstTensorInfo *_info;
uint32_t i, j, rank_limit;
if (!prop->model_files[0] || prop->model_files[0][0] == '\0') {
if (prop->input_meta.num_tensors == 0) {
nns_in_info_.num_tensors = model_meta_->input_seg_num;
for (i = 0; i < nns_in_info_.num_tensors; i++) {
- nns_in_info_.info[i].type = _NNS_UINT8;
+ _info = gst_tensors_info_get_nth_info (&nns_in_info_, i);
+
+ _info->type = _NNS_UINT8;
for (j = 0; j < rank_limit; j++)
- nns_in_info_.info[i].dimension[j]
- = model_meta_->input_seg_dims[i][rank_limit - j - 1];
+ _info->dimension[j] = model_meta_->input_seg_dims[i][rank_limit - j - 1];
for (; j < NNS_TENSOR_RANK_LIMIT; j++)
- nns_in_info_.info[i].dimension[j] = 1;
+ _info->dimension[j] = 1;
}
} else {
gst_tensors_info_copy (&nns_in_info_, &prop->input_meta);
if (prop->output_meta.num_tensors == 0) {
nns_out_info_.num_tensors = model_meta_->output_seg_num;
for (i = 0; i < nns_out_info_.num_tensors; i++) {
- nns_out_info_.info[i].type = _NNS_UINT8;
+ _info = gst_tensors_info_get_nth_info (&nns_out_info_, i);
+
+ _info->type = _NNS_UINT8;
for (j = 0; j < rank_limit; j++)
- nns_out_info_.info[i].dimension[j]
- = model_meta_->output_seg_dims[i][rank_limit - j - 1];
+ _info->dimension[j] = model_meta_->output_seg_dims[i][rank_limit - j - 1];
for (; j < NNS_TENSOR_RANK_LIMIT; j++)
- nns_out_info_.info[i].dimension[j] = 1;
+ _info->dimension[j] = 1;
}
} else {
gst_tensors_info_copy (&nns_out_info_, &prop->output_meta);
const GstTensorFilterFramework *fw = nnstreamer_filter_find (fw_name);
GstTensorFilterProperties *prop = NULL;
GstTensorsInfo nns_tensors_info;
+ GstTensorInfo *_info;
gpointer private_data = NULL;
std::string str_test_model;
gchar *test_model;
EXPECT_EQ (ret, 0);
EXPECT_EQ (nns_tensors_info.num_tensors, MOBINET_V2_IN_NUM_TENSOR);
for (uint32_t i = 0; i < MOBINET_V2_IN_NUM_TENSOR; ++i) {
- for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; ++j) {
- EXPECT_EQ (nns_tensors_info.info[i].dimension[j], MOBINET_V2_IN_DIMS[j]);
- }
+ _info = gst_tensors_info_get_nth_info (&nns_tensors_info, i);
+ EXPECT_TRUE (gst_tensor_dimension_is_equal (_info->dimension, MOBINET_V2_IN_DIMS));
}
/* Test getOutputDimension () */
EXPECT_EQ (ret, 0);
EXPECT_EQ (nns_tensors_info.num_tensors, MOBINET_V2_OUT_NUM_TENSOR);
for (uint32_t i = 0; i < MOBINET_V2_OUT_NUM_TENSOR; ++i) {
- for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; ++j) {
- EXPECT_EQ (nns_tensors_info.info[i].dimension[j], MOBINET_V2_OUT_DIMS[j]);
- }
+ _info = gst_tensors_info_get_nth_info (&nns_tensors_info, i);
+ EXPECT_TRUE (gst_tensor_dimension_is_equal (_info->dimension, MOBINET_V2_OUT_DIMS));
}
fw->close (prop, &private_data);
*/
TEST (nnstreamerNnfwRuntimeRawFunctions, setDimension)
{
- int ret, i;
+ int ret;
void *data = NULL;
GstTensorsInfo in_info, out_info, res;
GstTensorMemory input, output;
EXPECT_EQ (res.num_tensors, in_info.num_tensors);
EXPECT_EQ (res.info[0].type, in_info.info[0].type);
-
- for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- EXPECT_EQ (res.info[0].dimension[i], in_info.info[0].dimension[i]);
+ EXPECT_TRUE (gst_tensor_dimension_is_equal (
+ res.info[0].dimension, in_info.info[0].dimension));
ret = sp->getOutputDimension (&prop, &data, &out_info);
EXPECT_EQ (ret, 0);
EXPECT_EQ (res.num_tensors, out_info.num_tensors);
EXPECT_EQ (res.info[0].type, out_info.info[0].type);
- for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- EXPECT_EQ (res.info[0].dimension[i], out_info.info[0].dimension[i]);
+ EXPECT_TRUE (gst_tensor_dimension_is_equal (
+ res.info[0].dimension, out_info.info[0].dimension));
input.size = gst_tensor_info_get_size (&in_info.info[0]);
output.size = gst_tensor_info_get_size (&out_info.info[0]);