From d15c3d60048bceee9224e8680b892ac61ca48d72 Mon Sep 17 00:00:00 2001 From: Parichay Kapoor Date: Mon, 6 Jul 2020 14:51:59 +0900 Subject: [PATCH] [API] Search layer by name in C-API Support searching of layer by name in C-API This is part of the more changes to support this functionality Related issue - #260 Signed-off-by: Parichay Kapoor --- .../Classification/res/Classification_new.ini | 4 - api/capi/include/nntrainer.h | 2 +- api/capi/include/nntrainer_internal.h | 24 +++++- api/capi/src/nntrainer.cpp | 47 ++++++++-- nntrainer/include/layer.h | 22 +---- nntrainer/include/neuralnet.h | 24 ++++++ nntrainer/src/layer.cpp | 38 +-------- nntrainer/src/neuralnet.cpp | 58 ++++++++++++- nntrainer/src/parse_util.cpp | 5 +- test/tizen_capi/test_conf.ini | 3 - test/tizen_capi/unittest_tizen_capi.cpp | 99 +++++++++++++++++++++- test/unittest/unittest_nntrainer_layers.cpp | 17 +--- 12 files changed, 255 insertions(+), 88 deletions(-) diff --git a/Applications/Classification/res/Classification_new.ini b/Applications/Classification/res/Classification_new.ini index 27bf1a9..ff074e5 100644 --- a/Applications/Classification/res/Classification_new.ini +++ b/Applications/Classification/res/Classification_new.ini @@ -19,23 +19,19 @@ epsilon = 1e-8 # epsilon for adam # Layer Section : Name [inputlayer] Type = InputLayer -Id = 0 # Layer Id HiddenSize = 62720 # Input Layer Dimension Bias_init_zero = true # Zero Bias [fc1layer] Type = FullyConnectedLayer -Id = 1 HiddenSize = 128 # Hidden Layer Dimension ( = Weight Width ) Bias_init_zero = true [batchnormalization] Type = BatchNormalizationLayer -Id = 2 [outputlayer] Type = OutputLayer -Id = 3 HiddenSize = 10 # Output Layer Dimension ( = Weight Width ) Bias_init_zero = true Softmax = true diff --git a/api/capi/include/nntrainer.h b/api/capi/include/nntrainer.h index ee9f199..567d50a 100644 --- a/api/capi/include/nntrainer.h +++ b/api/capi/include/nntrainer.h @@ -136,7 +136,7 @@ int ml_nnmodel_train_with_generator(ml_nnmodel_h model, /** * @brief Destructs the neural network model. - * @details Use this function to delete Neural Netowrk Model. + * @details Use this function to delete Neural Network Model. * @since_tizen 6.x * @param[in] model The NNTrainer model handler from the given description. * @return @c 0 on success. Otherwise a negative error value. diff --git a/api/capi/include/nntrainer_internal.h b/api/capi/include/nntrainer_internal.h index d4ac517..350c7e8 100644 --- a/api/capi/include/nntrainer_internal.h +++ b/api/capi/include/nntrainer_internal.h @@ -24,8 +24,12 @@ #ifndef __NNTRAINER_INTERNAL_H__ #define __NNTRAINER_INTERNAL_H__ +#include +#include #include -#include +#include +#include +#include #define ML_NNTRAINER_MAGIC 0x777F888F @@ -48,7 +52,7 @@ typedef struct { typedef struct { uint magic; std::shared_ptr network; - std::vector layers; + std::unordered_map layers_map; ml_nnopt *optimizer; } ml_nnmodel; @@ -91,6 +95,22 @@ typedef struct { } \ } while (0) +/** + * @brief Get neural network layer from the model with the given name. + * @details Use this function to get already created Neural Network Layer. The + * returned layer must not be deleted as it is owned by the model. + * @since_tizen 6.x + * @param[in] model The NNTrainer model handler from the given description. + * @param[in] layer_name Name of the already created layer. + * @param[out] layer The NNTrainer Layer handler from the given description. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter. + * @retval #ML_ERROR_CANNOT_ASSIGN_ADDRESS Cannot assign object. + */ +int ml_nnmodel_get_layer(ml_nnmodel_h model, const char *layer_name, + ml_nnlayer_h *layer); + #ifdef __cplusplus } diff --git a/api/capi/src/nntrainer.cpp b/api/capi/src/nntrainer.cpp index e6fe658..ce45180 100644 --- a/api/capi/src/nntrainer.cpp +++ b/api/capi/src/nntrainer.cpp @@ -243,9 +243,9 @@ int ml_nnmodel_destruct(ml_nnmodel_h model) { if (nnmodel->optimizer) delete nnmodel->optimizer; - for (auto iter = nnmodel->layers.begin(); iter != nnmodel->layers.end(); - ++iter) - delete (*iter); + for (auto &x : nnmodel->layers_map) + delete (x.second); + nnmodel->layers_map.clear(); delete nnmodel; return status; @@ -270,7 +270,7 @@ int ml_nnmodel_add_layer(ml_nnmodel_h model, ml_nnlayer_h layer) { status = nntrainer_exception_boundary(f); if (status == ML_ERROR_NONE) { nnlayer->in_use = true; - nnmodel->layers.push_back(nnlayer); + nnmodel->layers_map.insert({NL->getName(), nnlayer}); } return status; @@ -303,12 +303,48 @@ int ml_nnmodel_set_optimizer(ml_nnmodel_h model, ml_nnopt_h optimizer) { return status; } +int ml_nnmodel_get_layer(ml_nnmodel_h model, const char *layer_name, + ml_nnlayer_h *layer) { + int status = ML_ERROR_NONE; + ml_nnmodel *nnmodel; + ML_NNTRAINER_CHECK_MODEL_VALIDATION(nnmodel, model); + + std::shared_ptr NN; + std::shared_ptr NL; + + std::unordered_map::iterator layer_iter = + nnmodel->layers_map.find(std::string(layer_name)); + if (layer_iter != nnmodel->layers_map.end()) { + *layer = layer_iter->second; + return status; + } + + NN = nnmodel->network; + returnable f = [&]() { return NN->getLayer(layer_name, &NL); }; + status = nntrainer_exception_boundary(f); + + if (status != ML_ERROR_NONE) + return status; + + ml_nnlayer *nnlayer = new ml_nnlayer; + nnlayer->magic = ML_NNTRAINER_MAGIC; + nnlayer->layer = NL; + *layer = nnlayer; + + status = ml_nnmodel_add_layer(model, *layer); + if (status != ML_ERROR_NONE) { + delete nnlayer; + *layer = nullptr; + } + + return status; +} + int ml_nnlayer_create(ml_nnlayer_h *layer, ml_layer_type_e type) { int status = ML_ERROR_NONE; returnable f; ml_nnlayer *nnlayer = new ml_nnlayer; nnlayer->magic = ML_NNTRAINER_MAGIC; - *layer = nnlayer; try { switch (type) { @@ -331,6 +367,7 @@ int ml_nnlayer_create(ml_nnlayer_h *layer, ml_layer_type_e type) { } nnlayer->in_use = false; + *layer = nnlayer; return status; } diff --git a/nntrainer/include/layer.h b/nntrainer/include/layer.h index f0cb0b9..de02a38 100644 --- a/nntrainer/include/layer.h +++ b/nntrainer/include/layer.h @@ -119,6 +119,7 @@ typedef enum { class Layer { public: Layer() : + name(std::string()), last_layer(false), bias_init_zero(false), type(LAYER_UNKNOWN), @@ -339,12 +340,12 @@ public: int setName(std::string name); /** - * @brief Set name of the layer + * @brief Get name of the layer */ - std::string getName(); + std::string getName() { return name; } /** - * @brief Get base name of the layer + * @brief Get base name of the layer */ virtual std::string getBaseName() = 0; @@ -494,21 +495,6 @@ protected: private: /** - * @brief Set containing all the names of layers - */ - static std::set layer_names; - - /** - * @brief Count assigned to layer names declared by default - */ - static int def_name_count; - - /** - * @brief Ensure that layer has a name - */ - void ensureName(); - - /** * @brief Convert vector of reference to vector of objects */ std::shared_ptr> diff --git a/nntrainer/include/neuralnet.h b/nntrainer/include/neuralnet.h index 514b67d..e379d16 100644 --- a/nntrainer/include/neuralnet.h +++ b/nntrainer/include/neuralnet.h @@ -266,6 +266,15 @@ public: */ int setOptimizer(std::shared_ptr optimizer); + /* + * @brief get layer by name from neural network model + * @param[in] name name of the layer to get + * @param[out] layer shared_ptr to hold the layer to get + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. + */ + int getLayer(const char *name, std::shared_ptr *layer); + enum class PropertyType { loss = 0, cost = 1, @@ -355,6 +364,16 @@ private: bool initialized; /** + * @brief Set containing all the names of layers in the model + */ + std::set layer_names; + + /** + * @brief Count assigned to layer names declared by default + */ + int def_name_count; + + /** * @brief Sets up and initialize the loss layer */ int initLossLayer(); @@ -397,6 +416,11 @@ private: * @note layer is inserted at the back of layers */ int initFlattenLayer(); + + /** + * @brief Ensure that layer has a name + */ + void ensureName(std::shared_ptr layer, std::string prefix = ""); }; } /* namespace nntrainer */ diff --git a/nntrainer/src/layer.cpp b/nntrainer/src/layer.cpp index f7cef86..a8d595b 100644 --- a/nntrainer/src/layer.cpp +++ b/nntrainer/src/layer.cpp @@ -29,9 +29,6 @@ namespace nntrainer { -int Layer::def_name_count = 0; -std::set Layer::layer_names; - int Layer::setActivation(ActiType acti) { int status = ML_ERROR_NONE; if (acti == ACT_UNKNOWN) { @@ -179,40 +176,11 @@ int Layer::setProperty(std::vector values) { } int Layer::setName(std::string name) { - int status = ML_ERROR_NONE; - std::pair::iterator, bool> ret; - if (name.empty()) - status = ML_ERROR_INVALID_PARAMETER; - - if (name == this->name) - return status; - - ret = layer_names.insert(name); - if (ret.second == false) - status = ML_ERROR_INVALID_PARAMETER; - else - this->name = name; - - return status; -} - -std::string Layer::getName() { - ensureName(); - return name; -} - -void Layer::ensureName() { - if (name.empty()) { - std::set::iterator iter; - - do { - name = getBaseName() + std::to_string(def_name_count++); - iter = layer_names.find(name); - } while (iter != layer_names.end()); + return ML_ERROR_INVALID_PARAMETER; - layer_names.insert(name); - } + this->name = name; + return ML_ERROR_NONE; } } /* namespace nntrainer */ diff --git a/nntrainer/src/neuralnet.cpp b/nntrainer/src/neuralnet.cpp index 2f1818a..460dd82 100644 --- a/nntrainer/src/neuralnet.cpp +++ b/nntrainer/src/neuralnet.cpp @@ -116,7 +116,8 @@ NeuralNetwork::NeuralNetwork(std::string config) : data_buffer(NULL), continue_train(false), iter(0), - initialized(false) { + initialized(false), + def_name_count(0) { this->setConfig(config); } @@ -260,6 +261,10 @@ int NeuralNetwork::loadFromConfig() { case LAYER_IN: { std::shared_ptr input_layer = std::make_shared(); + status = input_layer->setName(layer_name); + if (status != ML_ERROR_NONE) + return status; + std::string input_shape_str = iniparser_getstring( ini, (layer_name + ":Input_Shape").c_str(), unknown); @@ -285,6 +290,9 @@ int NeuralNetwork::loadFromConfig() { std::shared_ptr conv2d_layer = std::make_shared(); + status = conv2d_layer->setName(layer_name); + NN_INI_RETURN_STATUS(); + std::string input_shape_str = iniparser_getstring( ini, (layer_name + ":Input_Shape").c_str(), unknown); @@ -350,6 +358,9 @@ int NeuralNetwork::loadFromConfig() { std::shared_ptr pooling2d_layer = std::make_shared(); + status = pooling2d_layer->setName(layer_name); + NN_INI_RETURN_STATUS(); + status = getValues( POOLING2D_DIM, iniparser_getstring(ini, (layer_name + ":pooling_size").c_str(), @@ -391,6 +402,9 @@ int NeuralNetwork::loadFromConfig() { std::shared_ptr flatten_layer = std::make_shared(); + status = flatten_layer->setName(layer_name); + NN_INI_RETURN_STATUS(); + addLayer(flatten_layer); } break; @@ -399,6 +413,9 @@ int NeuralNetwork::loadFromConfig() { std::shared_ptr fc_layer = std::make_shared(); + status = fc_layer->setName(layer_name); + NN_INI_RETURN_STATUS(); + std::string input_shape_str = iniparser_getstring( ini, (layer_name + ":Input_Shape").c_str(), unknown); @@ -433,6 +450,9 @@ int NeuralNetwork::loadFromConfig() { std::shared_ptr bn_layer = std::make_shared(); + status = bn_layer->setName(layer_name); + NN_INI_RETURN_STATUS(); + // fixme: deprecate this. layers.back()->setBNfollow(true); @@ -495,6 +515,8 @@ int NeuralNetwork::initLossLayer() { } std::shared_ptr loss_layer = std::make_shared(); + ensureName(loss_layer); + loss_layer->setInputDimension(layers.back()->getOutputDimension()); status = loss_layer->initialize(true); NN_RETURN_STATUS(); @@ -1038,6 +1060,8 @@ int NeuralNetwork::addLayer(std::shared_ptr layer) { return ML_ERROR_NOT_SUPPORTED; } + ensureName(layer); + /** @todo This might be redundant. Remove this after testing */ for (auto iter = layers.begin(); iter != layers.end(); ++iter) { if ((*iter)->getName() == layer->getName()) { @@ -1066,6 +1090,36 @@ int NeuralNetwork::setOptimizer(std::shared_ptr optimizer) { return ML_ERROR_NONE; } +void NeuralNetwork::ensureName(std::shared_ptr layer, + std::string prefix) { + if (layer->getName().empty()) { + std::set::iterator iter; + std::string name; + + do { + name = prefix + layer->getBaseName() + std::to_string(def_name_count++); + iter = layer_names.find(name); + } while (iter != layer_names.end()); + + layer_names.insert(name); + layer->setName(name); + } +} + +int NeuralNetwork::getLayer(const char *name, std::shared_ptr *layer) { + int status = ML_ERROR_INVALID_PARAMETER; + std::string name_str(name); + + for (auto iter = layers.begin(); iter != layers.end(); ++iter) { + if ((*iter)->getName() == name_str) { + *layer = *iter; + return ML_ERROR_NONE; + } + } + + return status; +} + std::shared_ptr NeuralNetwork::_make_act_layer(ActiType act, std::shared_ptr prev) { if (layers.back()->getType() == LAYER_ACTIVATION) { @@ -1079,6 +1133,7 @@ NeuralNetwork::_make_act_layer(ActiType act, std::shared_ptr prev) { std::shared_ptr act_layer = std::make_shared(); + ensureName(act_layer, prev->getName()); act_layer->setActivation(act); act_layer->setInputDimension(prev->getOutputDimension()); act_layer->initialize(prev->getLast()); @@ -1108,6 +1163,7 @@ int NeuralNetwork::initFlattenLayer(unsigned int &position) { std::shared_ptr flatten_layer = std::make_shared(); + ensureName(flatten_layer, layers[position]->getName()); flatten_layer->setInputDimension(layers[position]->getOutputDimension()); flatten_layer->initialize(layers[position]->getLast()); layers.insert(layers.begin() + position + 1, flatten_layer); diff --git a/nntrainer/src/parse_util.cpp b/nntrainer/src/parse_util.cpp index 233cfeb..d8930a5 100644 --- a/nntrainer/src/parse_util.cpp +++ b/nntrainer/src/parse_util.cpp @@ -266,6 +266,7 @@ unsigned int parseLayerProperty(std::string property) { * pooling_size = 14 * pooling = 15 * flatten = 16 + * name = 17 * * InputLayer has 0, 1, 2, 3 properties. * FullyConnectedLayer has 1, 4, 6, 7, 8, 9 properties. @@ -273,12 +274,12 @@ unsigned int parseLayerProperty(std::string property) { * Pooling2DLayer has 12, 13, 14, 15 properties. * BatchNormalizationLayer has 0, 1, 5, 6, 7 properties. */ - std::array property_string = { + std::array property_string = { "input_shape", "bias_init_zero", "normalization", "standardization", "activation", "epsilon", "weight_decay", "weight_decay_lambda", "unit", "weight_ini", "filter", "kernel_size", "stride", "padding", "pooling_size", "pooling", - "flatten", "unknown"}; + "flatten", "name", "unknown"}; for (i = 0; i < property_string.size(); i++) { unsigned int size = (property_string[i].size() > property.size()) diff --git a/test/tizen_capi/test_conf.ini b/test/tizen_capi/test_conf.ini index 725f68d..29cd287 100644 --- a/test/tizen_capi/test_conf.ini +++ b/test/tizen_capi/test_conf.ini @@ -21,21 +21,18 @@ LabelData = "label.dat" # Layer Section : Name [inputlayer] Type = input -Id = 0 # Layer Id HiddenSize = 128 # Input Layer Dimension Bias_init_zero = true # Zero Bias Activation = sigmoid [fc1layer] Type = fully_connected -Id = 1 HiddenSize = 20 # Hidden Layer Dimension ( = Weight Width ) Bias_init_zero = true Activation = sigmoid [outputlayer] Type = output -Id = 3 HiddenSize = 3 # Output Layer Dimension ( = Weight Width ) Bias_init_zero = true Activation = sigmoid diff --git a/test/tizen_capi/unittest_tizen_capi.cpp b/test/tizen_capi/unittest_tizen_capi.cpp index fb61e70..70012df 100644 --- a/test/tizen_capi/unittest_tizen_capi.cpp +++ b/test/tizen_capi/unittest_tizen_capi.cpp @@ -21,6 +21,7 @@ */ #include "nntrainer_test_util.h" #include +#include /** * @brief Neural Network Model Contruct / Destruct Test (possitive test ) @@ -114,6 +115,7 @@ TEST(nntrainer_capi_nnmodel, compile_05_p) { ml_nnmodel_h model; ml_nnlayer_h layers[2]; + ml_nnlayer_h get_layer; ml_nnopt_h optimizer; status = ml_nnmodel_construct(&model); @@ -130,18 +132,28 @@ TEST(nntrainer_capi_nnmodel, compile_05_p) { status = ml_nnmodel_add_layer(model, layers[0]); EXPECT_EQ(status, ML_ERROR_NONE); + /** Find layer based on default name */ + status = ml_nnmodel_get_layer(model, "Input0", &get_layer); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(get_layer, layers[0]); + status = ml_nnlayer_create(&layers[1], ML_LAYER_TYPE_FC); EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax", - "bias_init_zero=true", "weight_decay=l2norm", - "weight_decay_lambda=0.005", - "weight_ini=xavier_uniform", NULL); + status = ml_nnlayer_set_property( + layers[1], "unit= 10", "activation=softmax", "bias_init_zero=true", + "weight_decay=l2norm", "weight_decay_lambda=0.005", + "weight_ini=xavier_uniform", "name=fc100", NULL); EXPECT_EQ(status, ML_ERROR_NONE); status = ml_nnmodel_add_layer(model, layers[1]); EXPECT_EQ(status, ML_ERROR_NONE); + /** Find layer based on set name */ + status = ml_nnmodel_get_layer(model, "fc100", &get_layer); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(get_layer, layers[1]); + status = ml_nnoptimizer_create(&optimizer, "adam"); EXPECT_EQ(status, ML_ERROR_NONE); @@ -161,6 +173,85 @@ TEST(nntrainer_capi_nnmodel, compile_05_p) { } /** + * @brief Neural Network Model Optimizer Test + */ +TEST(nntrainer_capi_nnmodel, compile_06_n) { + int status = ML_ERROR_NONE; + + ml_nnmodel_h model; + ml_nnlayer_h layers[3]; + ml_nnlayer_h get_layer; + + status = ml_nnmodel_construct(&model); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_nnlayer_create(&layers[0], ML_LAYER_TYPE_INPUT); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = + ml_nnlayer_set_property(layers[0], "input_shape= 32:1:1:62720", + "normalization=true", "bias_init_zero=true", NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + + /** Find layer before adding */ + status = ml_nnmodel_get_layer(model, "Input0", &get_layer); + EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); + + status = ml_nnmodel_add_layer(model, layers[0]); + EXPECT_EQ(status, ML_ERROR_NONE); + + /** Find layer based on default name */ + status = ml_nnmodel_get_layer(model, "Input0", &get_layer); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_nnlayer_create(&layers[1], ML_LAYER_TYPE_FC); + EXPECT_EQ(status, ML_ERROR_NONE); + + /** Create another layer with same name, different type */ + status = ml_nnlayer_set_property(layers[1], "name=Input0", NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + + /** Not add layer with existing name */ + status = ml_nnmodel_add_layer(model, layers[1]); + EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); + + status = ml_nnlayer_set_property(layers[1], "name=fc0", NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + + /** add layer with different name, different layer type */ + status = ml_nnmodel_add_layer(model, layers[1]); + EXPECT_EQ(status, ML_ERROR_NONE); + + /** Find layer based on default name */ + status = ml_nnmodel_get_layer(model, "fc0", &get_layer); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_nnlayer_create(&layers[2], ML_LAYER_TYPE_FC); + EXPECT_EQ(status, ML_ERROR_NONE); + + /** Create another layer with same name, same type */ + status = ml_nnlayer_set_property(layers[2], "name=fc0", NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + + /** add layer with different name, different layer type */ + status = ml_nnmodel_add_layer(model, layers[2]); + EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); + + status = ml_nnlayer_set_property(layers[2], "name=fc1", NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + + /** add layer with different name, different layer type */ + status = ml_nnmodel_add_layer(model, layers[2]); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_nnmodel_get_layer(model, "fc1", &get_layer); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_nnmodel_destruct(model); + EXPECT_EQ(status, ML_ERROR_NONE); +} + +/** * @brief Neural Network Model Train Test */ TEST(nntrainer_capi_nnmodel, train_01_p) { diff --git a/test/unittest/unittest_nntrainer_layers.cpp b/test/unittest/unittest_nntrainer_layers.cpp index 74771a8..b5a5dfe 100644 --- a/test/unittest/unittest_nntrainer_layers.cpp +++ b/test/unittest/unittest_nntrainer_layers.cpp @@ -260,22 +260,13 @@ TEST(nntrainer_FullyConnectedLayer_init_name, initialize_05_n) { nntrainer::TensorDim d; std::string layer_name; - /** Default name is set */ + /** no name is set */ layer_name = layer0.getName(); - EXPECT_GT(layer_name.length(), 0); + EXPECT_EQ(layer_name.length(), 0); - /** Set same name again */ - status = layer0.setName(layer_name); - EXPECT_EQ(status, ML_ERROR_NONE); - EXPECT_EQ(layer0.getName(), layer_name); - - /** Do not set the name already allocated */ - status = layer1.setName(layer_name); + /** Set empty name */ + status = layer0.setName(std::string()); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); - - /** Default name is set even after error */ - layer_name = layer1.getName(); - EXPECT_GT(layer_name.length(), 0); } /** -- 2.7.4