From: Jaeyun Date: Fri, 2 Aug 2019 07:24:44 +0000 (+0900) Subject: [Api/Tizen] function to get data size X-Git-Tag: accepted/tizen/unified/20190905.060558~11 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d35ba334b73abeb7a328679fedaea3afd5ada915;p=platform%2Fupstream%2Fnnstreamer.git [Api/Tizen] function to get data size Current function returns total byte size of info handle. Change this to get the byte size with data index. Signed-off-by: Jaeyun Jung --- diff --git a/api/capi/include/nnstreamer.h b/api/capi/include/nnstreamer.h index a18a066..33918b5 100644 --- a/api/capi/include/nnstreamer.h +++ b/api/capi/include/nnstreamer.h @@ -700,12 +700,18 @@ int ml_tensors_info_set_tensor_dimension (ml_tensors_info_h info, unsigned int i int ml_tensors_info_get_tensor_dimension (ml_tensors_info_h info, unsigned int index, ml_tensor_dimension dimension); /** - * @brief Gets the byte size of the given tensors type. + * @brief Gets the byte size of the given handle of tensors information. + * @details If an application needs to get the total byte size of tensors, set the @a index '-1'. Note that the maximum number of tensors is 16 (#ML_TENSOR_SIZE_LIMIT). * @since_tizen 5.5 - * @param[in] info The tensors' information handle. - * @return @c >= 0 on success with byte size. + * @param[in] info The handle of tensors information. + * @param[in] index The index of the tensor. + * @param[out] data_size The byte size of tensor data. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ -size_t ml_tensors_info_get_size (const ml_tensors_info_h info); +int ml_tensors_info_get_tensor_size (ml_tensors_info_h info, int index, size_t *data_size); /** * @brief Creates a tensor data frame with the given tensors information. diff --git a/api/capi/src/nnstreamer-capi-util.c b/api/capi/src/nnstreamer-capi-util.c index 7dd6d77..241dee6 100644 --- a/api/capi/src/nnstreamer-capi-util.c +++ b/api/capi/src/nnstreamer-capi-util.c @@ -418,29 +418,39 @@ ml_tensor_info_get_size (const ml_tensor_info_s * info) } /** - * @brief Gets the byte size of the given tensors info. + * @brief Gets the byte size of the given handle of tensors information. */ -size_t -ml_tensors_info_get_size (const ml_tensors_info_h info) +int +ml_tensors_info_get_tensor_size (ml_tensors_info_h info, + int index, size_t *data_size) { ml_tensors_info_s *tensors_info; - size_t tensor_size; - gint i; - if (ML_ERROR_NONE != ml_get_feature_enabled()) - return 0; + check_feature_state (); + + if (!info || !data_size) + return ML_ERROR_INVALID_PARAMETER; tensors_info = (ml_tensors_info_s *) info; - if (!tensors_info) - return 0; + /* init 0 */ + *data_size = 0; - tensor_size = 0; - for (i = 0; i < tensors_info->num_tensors; i++) { - tensor_size += ml_tensor_info_get_size (&tensors_info->info[i]); + if (index < 0) { + guint i; + + /* get total byte size */ + for (i = 0; i < tensors_info->num_tensors; i++) { + *data_size += ml_tensor_info_get_size (&tensors_info->info[i]); + } + } else { + if (tensors_info->num_tensors <= index) + return ML_ERROR_INVALID_PARAMETER; + + *data_size = ml_tensor_info_get_size (&tensors_info->info[index]); } - return tensor_size; + return ML_ERROR_NONE; } /** diff --git a/tests/tizen_capi/unittest_tizen_capi.cpp b/tests/tizen_capi/unittest_tizen_capi.cpp index ff0be71..e724ba2 100644 --- a/tests/tizen_capi/unittest_tizen_capi.cpp +++ b/tests/tizen_capi/unittest_tizen_capi.cpp @@ -1077,6 +1077,107 @@ TEST (nnstreamer_capi_util, availability_00) EXPECT_EQ (result, false); } +/** + * @brief Test NNStreamer Utility for checking tensors info handle + */ +TEST (nnstreamer_capi_util, tensors_info) +{ + ml_tensors_info_h info; + ml_tensor_dimension in_dim, out_dim; + ml_tensor_type_e out_type; + gchar *out_name; + size_t data_size; + int status; + + status = ml_tensors_info_create (&info); + EXPECT_EQ (status, ML_ERROR_NONE); + + in_dim[0] = 3; + in_dim[1] = 300; + in_dim[2] = 300; + in_dim[3] = 1; + + /* add tensor info */ + status = ml_tensors_info_set_count (info, 2); + EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_type (info, 0, ML_TENSOR_TYPE_UINT8); + EXPECT_EQ (status, ML_ERROR_NONE); + status = ml_tensors_info_set_tensor_dimension (info, 0, in_dim); + EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_type (info, 1, ML_TENSOR_TYPE_FLOAT64); + EXPECT_EQ (status, ML_ERROR_NONE); + status = ml_tensors_info_set_tensor_dimension (info, 1, in_dim); + EXPECT_EQ (status, ML_ERROR_NONE); + status = ml_tensors_info_set_tensor_name (info, 1, "tensor-name-test"); + EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_tensors_info_set_tensor_type (info, 2, ML_TENSOR_TYPE_UINT64); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + status = ml_tensors_info_set_tensor_dimension (info, 2, in_dim); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + /* get tensor info */ + status = ml_tensors_info_get_tensor_type (info, 0, &out_type); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (out_type, ML_TENSOR_TYPE_UINT8); + + status = ml_tensors_info_get_tensor_dimension (info, 0, out_dim); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (out_dim[0], 3U); + EXPECT_EQ (out_dim[1], 300U); + EXPECT_EQ (out_dim[2], 300U); + EXPECT_EQ (out_dim[3], 1U); + + status = ml_tensors_info_get_tensor_name (info, 0, &out_name); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (out_name == NULL); + + status = ml_tensors_info_get_tensor_type (info, 1, &out_type); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (out_type, ML_TENSOR_TYPE_FLOAT64); + + status = ml_tensors_info_get_tensor_dimension (info, 1, out_dim); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (out_dim[0], 3U); + EXPECT_EQ (out_dim[1], 300U); + EXPECT_EQ (out_dim[2], 300U); + EXPECT_EQ (out_dim[3], 1U); + + status = ml_tensors_info_get_tensor_name (info, 1, &out_name); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (out_name && g_str_equal (out_name, "tensor-name-test")); + + status = ml_tensors_info_get_tensor_type (info, 2, &out_type); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_tensors_info_get_tensor_dimension (info, 2, out_dim); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_tensors_info_get_tensor_name (info, 2, &out_name); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + /* get tensor size */ + status = ml_tensors_info_get_tensor_size (info, 0, &data_size); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (data_size == (3 * 300 * 300)); + + status = ml_tensors_info_get_tensor_size (info, 1, &data_size); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (data_size == (3 * 300 * 300 * 8)); + + status = ml_tensors_info_get_tensor_size (info, -1, &data_size); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (data_size == ((3 * 300 * 300) + (3 * 300 * 300 * 8))); + + status = ml_tensors_info_get_tensor_size (info, 2, &data_size); + EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER); + + status = ml_tensors_info_destroy (info); + EXPECT_EQ (status, ML_ERROR_NONE); +} + #ifdef ENABLE_TENSORFLOW_LITE /** * @brief Test NNStreamer single shot (tensorflow-lite) @@ -1423,7 +1524,9 @@ TEST (nnstreamer_capi_singleshot, invoke_04) ml_tensors_info_set_tensor_dimension (out_info, 0, out_dim); ASSERT_TRUE (g_file_get_contents (test_file, &contents, &len, NULL)); - ASSERT_TRUE (len == ml_tensors_info_get_size (in_info)); + status = ml_tensors_info_get_tensor_size (in_info, 0, &data_size); + EXPECT_EQ (status, ML_ERROR_NONE); + ASSERT_TRUE (len == data_size); status = ml_single_open (&single, test_model, in_info, out_info, ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_ANY);