From f7c31d883177a35e6984fd818c013f6a8fd181f6 Mon Sep 17 00:00:00 2001 From: hyeonseok lee Date: Wed, 21 Jul 2021 20:22:30 +0900 Subject: [PATCH] [CAPI] Implement ml_train_model_get_input|output_dims - Implement ml_train_model_get_input_dimensions - Implement ml_train_model_get_output_dimensions - Make a unittest for implemented apis Self evaluation: Build test: [X]Passed [ ]Failed [ ]Skipped Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: hyeonseok lee --- api/capi/include/nntrainer.h | 8 +- api/capi/src/nntrainer.cpp | 116 ++++++++++++++ api/ccapi/include/model.h | 10 +- nntrainer/dataset/random_data_producers.cpp | 4 +- nntrainer/models/neuralnet.h | 6 + test/tizen_capi/unittest_tizen_capi.cpp | 230 ++++++++++++++++++++++++++++ 6 files changed, 366 insertions(+), 8 deletions(-) diff --git a/api/capi/include/nntrainer.h b/api/capi/include/nntrainer.h index 1c6b016..478fca6 100644 --- a/api/capi/include/nntrainer.h +++ b/api/capi/include/nntrainer.h @@ -234,15 +234,17 @@ int ml_train_model_set_dataset(ml_train_model_h model, * @param[out] info The tensors information handle. * @return @c 0 on successs. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter. + * @retval #ML_ERROR_OUT_OF_MEMORY Failed to allocate required memory. */ int ml_train_model_get_input_tensors_info(ml_train_model_h model, ml_tensors_info_h *info); /** * @brief Get output tensors information information of the model - * @details Use this function to get output tensors information information of - * the model. destroy @a info with @a ml_tensors_info_destroy() after use. + * @details Use this function to get output tensors information of the model. + * destroy @a info with @a ml_tensors_info_destroy() after use. * @remarks @a model must be compiled before calling this function. * @remarks the returned @a info is newly created so it does not reflect future * changes in the model @@ -251,7 +253,9 @@ int ml_train_model_get_input_tensors_info(ml_train_model_h model, * @param[out] info The tensors information handle. * @return @c 0 on successs. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter. + * @retval #ML_ERROR_OUT_OF_MEMORY Failed to allocate required memory. */ int ml_train_model_get_output_tensors_info(ml_train_model_h model, ml_tensors_info_h *info); diff --git a/api/capi/src/nntrainer.cpp b/api/capi/src/nntrainer.cpp index 957d7a9..c2f2479 100644 --- a/api/capi/src/nntrainer.cpp +++ b/api/capi/src/nntrainer.cpp @@ -968,6 +968,122 @@ int ml_train_dataset_destroy(ml_train_dataset_h dataset) { return status; } +int ml_train_model_get_input_tensors_info(ml_train_model_h model, + ml_tensors_info_h *info) { + int status = ML_ERROR_NONE; + ml_train_model *nnmodel; + std::shared_ptr m; + returnable f; + + check_feature_state(); + + if (!info) { + return ML_ERROR_INVALID_PARAMETER; + } + + ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model); + ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock); + m = nnmodel->model; + if (m == NULL) { + return ML_ERROR_INVALID_PARAMETER; + } + + std::vector dims; + f = [&]() { + dims = m->getInputDimension(); + return ML_ERROR_NONE; + }; + status = nntrainer_exception_boundary(f); + if (status != ML_ERROR_NONE) { + return status; + } + + status = ml_tensors_info_create(info); + if (status != ML_ERROR_NONE) { + return status; + } + + status = ml_tensors_info_set_count(*info, dims.size()); + if (status != ML_ERROR_NONE) { + ml_tensors_info_destroy(info); + return status; + } + + for (unsigned int i = 0; i < dims.size(); ++i) { + status = ml_tensors_info_set_tensor_type(*info, i, ML_TENSOR_TYPE_FLOAT32); + if (status != ML_ERROR_NONE) { + ml_tensors_info_destroy(info); + return status; + } + + status = ml_tensors_info_set_tensor_dimension(*info, i, dims[i].getDim()); + if (status != ML_ERROR_NONE) { + ml_tensors_info_destroy(info); + return status; + } + } + + return status; +} + +int ml_train_model_get_output_tensors_info(ml_train_model_h model, + ml_tensors_info_h *info) { + int status = ML_ERROR_NONE; + ml_train_model *nnmodel; + std::shared_ptr m; + returnable f; + + check_feature_state(); + + if (!info) { + return ML_ERROR_INVALID_PARAMETER; + } + + ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model); + ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock); + m = nnmodel->model; + if (m == NULL) { + return ML_ERROR_INVALID_PARAMETER; + } + + std::vector dims; + f = [&]() { + dims = m->getOutputDimension(); + return ML_ERROR_NONE; + }; + status = nntrainer_exception_boundary(f); + if (status != ML_ERROR_NONE) { + return status; + } + + status = ml_tensors_info_create(info); + if (status != ML_ERROR_NONE) { + return status; + } + + status = ml_tensors_info_set_count(*info, dims.size()); + if (status != ML_ERROR_NONE) { + ml_tensors_info_destroy(info); + return status; + } + + for (unsigned int i = 0; i < dims.size(); ++i) { + status = ml_tensors_info_set_tensor_type(*info, i, ML_TENSOR_TYPE_FLOAT32); + if (status != ML_ERROR_NONE) { + ml_tensors_info_destroy(info); + return status; + } + + status = ml_tensors_info_set_tensor_dimension(*info, i, dims[i].getDim()); + if (status != ML_ERROR_NONE) { + ml_tensors_info_destroy(info); + return status; + } + } + + return status; +} + #ifdef __cplusplus } #endif diff --git a/api/ccapi/include/model.h b/api/ccapi/include/model.h index d652521..a6f7545 100644 --- a/api/ccapi/include/model.h +++ b/api/ccapi/include/model.h @@ -27,6 +27,7 @@ #include #include #include +#include /** Define more aliases for the model in the API */ namespace ml { @@ -184,18 +185,17 @@ public: */ virtual int getLayer(const char *name, std::shared_ptr *layer) = 0; - /// @todo uncomment this by opening TensorDim /** * @brief get input dimension of a model - * @retval std::vector input dimension + * @retval std::vector input dimension */ - // virtual std::vector getInputDimension() = 0; + virtual std::vector getInputDimension() = 0; /** * @brief get output dimension of a model - * @retval std::vector output dimension + * @retval std::vector output dimension */ - // virtual std::vector getOutputDimension() = 0; + virtual std::vector getOutputDimension() = 0; /** * @brief Summarize the model diff --git a/nntrainer/dataset/random_data_producers.cpp b/nntrainer/dataset/random_data_producers.cpp index f4a0837..94b8900 100644 --- a/nntrainer/dataset/random_data_producers.cpp +++ b/nntrainer/dataset/random_data_producers.cpp @@ -93,7 +93,9 @@ DataProducer::Gernerator RandomDataOneHotProducer::finalize(const std::vector &input_dims, const std::vector &label_dims) { /** check if the given producer is ready to finalize */ - auto &[min_, max_, _] = *rd_one_hot_props; + nntrainer::PropsMin min_; + nntrainer::PropsMax max_; + std::tie(min_, max_, std::ignore) = *rd_one_hot_props; /// @todo expand this to non onehot case NNTR_THROW_IF(std::any_of(label_dims.begin(), label_dims.end(), diff --git a/nntrainer/models/neuralnet.h b/nntrainer/models/neuralnet.h index 6c8a2d1..f85d48d 100644 --- a/nntrainer/models/neuralnet.h +++ b/nntrainer/models/neuralnet.h @@ -361,6 +361,9 @@ public: * @retval std::vector input dimension */ std::vector getInputDimension() { + if (!compiled) { + throw std::logic_error("model should be compiled before get dimension"); + } return model_graph.getInputDimension(); } @@ -369,6 +372,9 @@ public: * @retval std::vector output dimension */ std::vector getOutputDimension() { + if (!compiled) { + throw std::logic_error("model should be compiled before get dimension"); + } return model_graph.getOutputDimension(); } diff --git a/test/tizen_capi/unittest_tizen_capi.cpp b/test/tizen_capi/unittest_tizen_capi.cpp index ecb6dc3..594f61d 100644 --- a/test/tizen_capi/unittest_tizen_capi.cpp +++ b/test/tizen_capi/unittest_tizen_capi.cpp @@ -968,6 +968,236 @@ TEST(nntrainer_capi_summary, summary_02_n) { EXPECT_EQ(status, ML_ERROR_NONE); } +TEST(nntrainer_capi_nnmodel, get_input_output_dimension_01_p) { + ml_train_model_h handle = NULL; + ml_tensors_info_h input_info, output_info; + + unsigned int input_count, output_count; + const unsigned int MAXDIM = 4; + unsigned int input_dim_expected[MAXDIM] = {32, 1, 1, 62720}; + unsigned int output_dim_expected[MAXDIM] = {32, 1, 1, 10}; + unsigned int dim[MAXDIM]; + + int status = ML_ERROR_NONE; + + ScopedIni s("test_get_input_dimension_01_p", + {model_base, optimizer, dataset, inputlayer, outputlayer}); + status = ml_train_model_construct_with_conf(s.getIniName().c_str(), &handle); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_train_model_compile(handle, NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_train_model_get_input_tensors_info(handle, &input_info); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_tensors_info_get_count(input_info, &input_count); + EXPECT_EQ(status, ML_ERROR_NONE); + + EXPECT_EQ(input_count, 1ul); + + ml_tensors_info_get_tensor_dimension(input_info, 0, dim); + for (unsigned int i = 0; i < MAXDIM; ++i) { + EXPECT_EQ(dim[i], input_dim_expected[i]); + } + + status = ml_train_model_get_output_tensors_info(handle, &output_info); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_tensors_info_get_count(output_info, &output_count); + EXPECT_EQ(status, ML_ERROR_NONE); + + EXPECT_EQ(output_count, 1ul); + + ml_tensors_info_get_tensor_dimension(output_info, 0, dim); + for (unsigned int i = 0; i < MAXDIM; ++i) { + EXPECT_EQ(dim[i], output_dim_expected[i]); + } + + status = ml_tensors_info_destroy(input_info); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_tensors_info_destroy(output_info); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_train_model_destroy(handle); + EXPECT_EQ(status, ML_ERROR_NONE); +} + +TEST(nntrainer_capi_nnmodel, get_input_output_dimension_02_p) { + ml_train_model_h handle = NULL; + ml_tensors_info_h input_info, output_info; + + unsigned int input_count, output_count; + const unsigned int MAXDIM = 4; + unsigned int input_dim_expected[MAXDIM] = {32, 1, 1, 62720}; + unsigned int output_dim_expected[MAXDIM] = {32, 1, 1, 10}; + unsigned int dim[MAXDIM]; + + int status = ML_ERROR_NONE; + + ScopedIni s("test_get_input_dimension_02_p", + {model_base, optimizer, dataset, inputlayer, outputlayer}); + status = ml_train_model_construct_with_conf(s.getIniName().c_str(), &handle); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_train_model_compile(handle, NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_train_model_get_input_tensors_info(handle, &input_info); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_train_model_get_output_tensors_info(handle, &output_info); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_train_model_destroy(handle); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_tensors_info_get_count(input_info, &input_count); + EXPECT_EQ(status, ML_ERROR_NONE); + + EXPECT_EQ(input_count, 1ul); + + ml_tensors_info_get_tensor_dimension(input_info, 0, dim); + for (unsigned int i = 0; i < MAXDIM; ++i) { + EXPECT_EQ(dim[i], input_dim_expected[i]); + } + + status = ml_tensors_info_get_count(output_info, &output_count); + EXPECT_EQ(status, ML_ERROR_NONE); + + EXPECT_EQ(output_count, 1ul); + + ml_tensors_info_get_tensor_dimension(output_info, 0, dim); + for (unsigned int i = 0; i < MAXDIM; ++i) { + EXPECT_EQ(dim[i], output_dim_expected[i]); + } + + status = ml_tensors_info_destroy(input_info); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_tensors_info_destroy(output_info); + EXPECT_EQ(status, ML_ERROR_NONE); +} + +TEST(nntrainer_capi_nnmodel, get_input_output_dimension_03_n) { + ml_train_model_h handle = NULL; + ml_tensors_info_h input_info, output_info; + + int status = ML_ERROR_NONE; + + ScopedIni s("test_get_input_dimension_03_n", + {model_base, optimizer, dataset, inputlayer, outputlayer}); + status = ml_train_model_construct_with_conf(s.getIniName().c_str(), &handle); + EXPECT_EQ(status, ML_ERROR_NONE); + + EXPECT_THROW(ml_train_model_get_input_tensors_info(handle, &input_info), + std::invalid_argument); + EXPECT_THROW(ml_train_model_get_output_tensors_info(handle, &output_info), + std::invalid_argument); + + status = ml_tensors_info_destroy(input_info); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_tensors_info_destroy(output_info); + EXPECT_EQ(status, ML_ERROR_NONE); +} + +TEST(nntrainer_capi_nnmodel, get_input_output_dimension_04_n) { + ml_train_model_h handle = NULL; + ml_tensors_info_h input_info, output_info; + + int status = ML_ERROR_NONE; + + EXPECT_EQ(ml_train_model_get_input_tensors_info(handle, &input_info), + ML_ERROR_INVALID_PARAMETER); + EXPECT_EQ(ml_train_model_get_output_tensors_info(handle, &output_info), + ML_ERROR_INVALID_PARAMETER); + + status = ml_tensors_info_destroy(input_info); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_tensors_info_destroy(output_info); + EXPECT_EQ(status, ML_ERROR_NONE); +} + +TEST(nntrainer_capi_nnmodel, get_input_output_dimension_05_n) { + ml_train_model_h handle = NULL; + ml_tensors_info_h input_info, output_info; + + int status = ML_ERROR_NONE; + + ScopedIni s("test_get_input_dimension_05_n", + {model_base, optimizer, dataset, inputlayer, outputlayer}); + status = ml_train_model_construct_with_conf(s.getIniName().c_str(), &handle); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_train_model_compile(handle, NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_train_model_destroy(handle); + EXPECT_EQ(status, ML_ERROR_NONE); + + EXPECT_EQ(ml_train_model_get_input_tensors_info(handle, &input_info), + ML_ERROR_INVALID_PARAMETER); + EXPECT_EQ(ml_train_model_get_output_tensors_info(handle, &output_info), + ML_ERROR_INVALID_PARAMETER); + + status = ml_tensors_info_destroy(input_info); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_tensors_info_destroy(output_info); + EXPECT_EQ(status, ML_ERROR_NONE); +} + +TEST(nntrainer_capi_nnmodel, get_input_output_dimension_06_n) { + ml_train_model_h handle = NULL; + ml_tensors_info_h input_info, output_info; + + unsigned int input_count, output_count; + const unsigned int MAXDIM = 4; + unsigned int input_dim_expected[MAXDIM] = {32, 1, 1, 62720}; + unsigned int output_dim_expected[MAXDIM] = {32, 1, 1, 10}; + unsigned int dim[MAXDIM]; + + int status = ML_ERROR_NONE; + + ScopedIni s("test_get_input_dimension_06_n", + {model_base, optimizer, dataset, inputlayer, outputlayer}); + status = ml_train_model_construct_with_conf(s.getIniName().c_str(), &handle); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_train_model_compile(handle, NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_train_model_get_input_tensors_info(handle, &input_info); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_tensors_info_get_count(input_info, &input_count); + EXPECT_EQ(status, ML_ERROR_NONE); + + EXPECT_EQ(input_count, 1ul); + + ml_tensors_info_get_tensor_dimension(input_info, 0, dim); + for (unsigned int i = 0; i < MAXDIM; ++i) { + EXPECT_NE(dim[i], input_dim_expected[i] - 1); + EXPECT_NE(dim[i], input_dim_expected[i] + 1); + } + + status = ml_train_model_get_output_tensors_info(handle, &output_info); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_tensors_info_get_count(output_info, &output_count); + EXPECT_EQ(status, ML_ERROR_NONE); + + EXPECT_EQ(output_count, 1ul); + + ml_tensors_info_get_tensor_dimension(output_info, 0, dim); + for (unsigned int i = 0; i < MAXDIM; ++i) { + EXPECT_NE(dim[i], output_dim_expected[i] - 1); + EXPECT_NE(dim[i], output_dim_expected[i] + 1); + } + + status = ml_tensors_info_destroy(input_info); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_tensors_info_destroy(output_info); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_train_model_destroy(handle); + EXPECT_EQ(status, ML_ERROR_NONE); +} + /** * @brief Main gtest */ -- 2.7.4