From: Parichay Kapoor Date: Mon, 6 Jul 2020 10:12:02 +0000 (+0900) Subject: [property] Add name property for layer X-Git-Tag: accepted/tizen/unified/20200709.212755^0 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=398a29ac15d3788bbd708f70101840e0aef36650;p=platform%2Fcore%2Fml%2Fnntrainer.git [property] Add name property for layer Add name property for layers Default names are added if layer name is not given by creator Layer addition should not be done directly by adding to layers now Signed-off-by: Parichay Kapoor --- diff --git a/nntrainer/include/activation_layer.h b/nntrainer/include/activation_layer.h index 69f65f7..ae42696 100644 --- a/nntrainer/include/activation_layer.h +++ b/nntrainer/include/activation_layer.h @@ -126,6 +126,12 @@ public: */ int setProperty(std::vector values); + /** + * @brief get the base name for the layer + * @retval base name of the layer + */ + std::string getBaseName() { return "Activation"; }; + private: std::function _act_fn; std::function _act_prime_fn; diff --git a/nntrainer/include/bn_layer.h b/nntrainer/include/bn_layer.h index 83626fc..2311616 100644 --- a/nntrainer/include/bn_layer.h +++ b/nntrainer/include/bn_layer.h @@ -118,6 +118,12 @@ public: */ int setProperty(std::vector values); + /** + * @brief get the base name for the layer + * @retval base name of the layer + */ + std::string getBaseName() { return "BatchNormalization"; }; + private: Tensor weight; Tensor bias; diff --git a/nntrainer/include/conv2d_layer.h b/nntrainer/include/conv2d_layer.h index 0216bab..fa9f880 100644 --- a/nntrainer/include/conv2d_layer.h +++ b/nntrainer/include/conv2d_layer.h @@ -162,6 +162,12 @@ public: /* unknown = 3, */ /* }; */ + /** + * @brief get the base name for the layer + * @retval base name of the layer + */ + std::string getBaseName() { return "Convolution2D"; }; + private: unsigned int filter_size; unsigned int kernel_size[CONV2D_DIM]; diff --git a/nntrainer/include/fc_layer.h b/nntrainer/include/fc_layer.h index f1d480c..e639d37 100644 --- a/nntrainer/include/fc_layer.h +++ b/nntrainer/include/fc_layer.h @@ -101,6 +101,12 @@ public: */ int setOptimizer(Optimizer &opt); + /** + * @brief get the base name for the layer + * @retval base name of the layer + */ + std::string getBaseName() { return "FullyConnected"; }; + private: unsigned int unit; Tensor weight; diff --git a/nntrainer/include/flatten_layer.h b/nntrainer/include/flatten_layer.h index 711b22f..b9b86d2 100644 --- a/nntrainer/include/flatten_layer.h +++ b/nntrainer/include/flatten_layer.h @@ -98,6 +98,12 @@ public: * @param[in] l layer to copy */ void copy(std::shared_ptr l); + + /** + * @brief get the base name for the layer + * @retval base name of the layer + */ + std::string getBaseName() { return "Flatten"; }; }; } // namespace nntrainer diff --git a/nntrainer/include/input_layer.h b/nntrainer/include/input_layer.h index f9c588c..e8764ac 100644 --- a/nntrainer/include/input_layer.h +++ b/nntrainer/include/input_layer.h @@ -131,6 +131,12 @@ public: */ int setProperty(std::vector values); + /** + * @brief get the base name for the layer + * @retval base name of the layer + */ + std::string getBaseName() { return "Input"; }; + private: bool normalization; bool standardization; diff --git a/nntrainer/include/layer.h b/nntrainer/include/layer.h index bf5819d..b7cbf1b 100644 --- a/nntrainer/include/layer.h +++ b/nntrainer/include/layer.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -327,6 +328,21 @@ public: bool getFlatten() { return flatten; } /** + * @brief Set name of the layer + */ + int setName(std::string name); + + /** + * @brief Set name of the layer + */ + std::string getName(); + + /** + * @brief Get base name of the layer + */ + virtual std::string getBaseName() = 0; + + /** * @brief Property Enumeration * 0. input shape : string * 1. bias zero : bool @@ -345,6 +361,7 @@ public: * 14. pooling_size : ( n,m ) * 15. pooling : max, average, global_max, global_average * 16. flatten : bool + * 17. name : string (type) */ enum class PropertyType { input_shape = 0, @@ -364,11 +381,17 @@ public: pooling_size = 14, pooling = 15, flatten = 16, - unknown = 17 + name = 17, + unknown = 18 }; protected: /** + * @brief Name of the layer (works as the identifier) + */ + std::string name; + + /** * @brief check if current layer's weight decay type is l2norm * @return bool is weightdecay type is L2 Norm */ @@ -465,6 +488,21 @@ protected: private: /** + * @brief Set containing all the names of layers + */ + static std::set layer_names; + + /** + * @brief Count assigned to layer names declared by default + */ + static int def_name_count; + + /** + * @brief Ensure that layer has a name + */ + void ensureName(); + + /** * @brief Convert vector of reference to vector of objects */ std::shared_ptr> diff --git a/nntrainer/include/loss_layer.h b/nntrainer/include/loss_layer.h index d21b02c..47ccaa1 100644 --- a/nntrainer/include/loss_layer.h +++ b/nntrainer/include/loss_layer.h @@ -99,6 +99,12 @@ public: */ int initialize(bool last); + /** + * @brief get the base name for the layer + * @retval base name of the layer + */ + std::string getBaseName() { return "Loss"; }; + private: /** * @brief update loss diff --git a/nntrainer/include/optimizer.h b/nntrainer/include/optimizer.h index 5f1f6ae..07457e0 100644 --- a/nntrainer/include/optimizer.h +++ b/nntrainer/include/optimizer.h @@ -210,6 +210,12 @@ public: */ void save(std::ofstream &file); + /** + * @brief get the base name for the layer + * @retval base name of the layer + */ + std::string getBaseName() { return "Optimizer"; }; + private: /** * @brief Optimizer Type diff --git a/nntrainer/include/pooling2d_layer.h b/nntrainer/include/pooling2d_layer.h index 8255362..b87493c 100644 --- a/nntrainer/include/pooling2d_layer.h +++ b/nntrainer/include/pooling2d_layer.h @@ -139,6 +139,12 @@ public: unknown = 3, }; + /** + * @brief get the base name for the layer + * @retval base name of the layer + */ + std::string getBaseName() { return "Pooling2D"; }; + private: unsigned int pooling_size[POOLING2D_DIM]; unsigned int stride[POOLING2D_DIM]; diff --git a/nntrainer/include/util_func.h b/nntrainer/include/util_func.h index 7d3bc2a..a769bb8 100644 --- a/nntrainer/include/util_func.h +++ b/nntrainer/include/util_func.h @@ -24,6 +24,7 @@ #define __UTIL_FUNC_H__ #ifdef __cplusplus +#include #include namespace nntrainer { diff --git a/nntrainer/src/layer.cpp b/nntrainer/src/layer.cpp index 824c57e..f7cef86 100644 --- a/nntrainer/src/layer.cpp +++ b/nntrainer/src/layer.cpp @@ -29,6 +29,9 @@ namespace nntrainer { +int Layer::def_name_count = 0; +std::set Layer::layer_names; + int Layer::setActivation(ActiType acti) { int status = ML_ERROR_NONE; if (acti == ACT_UNKNOWN) { @@ -132,6 +135,10 @@ int Layer::setProperty(std::vector values) { unsigned int type = parseLayerProperty(key); switch (static_cast(type)) { + case PropertyType::name: + status = setName(value); + NN_RETURN_STATUS(); + break; case PropertyType::input_shape: status = input_dim.setTensorDim(value.c_str()); NN_RETURN_STATUS(); @@ -171,4 +178,41 @@ int Layer::setProperty(std::vector values) { return status; } +int Layer::setName(std::string name) { + int status = ML_ERROR_NONE; + std::pair::iterator, bool> ret; + + if (name.empty()) + status = ML_ERROR_INVALID_PARAMETER; + + if (name == this->name) + return status; + + ret = layer_names.insert(name); + if (ret.second == false) + status = ML_ERROR_INVALID_PARAMETER; + else + this->name = name; + + return status; +} + +std::string Layer::getName() { + ensureName(); + return name; +} + +void Layer::ensureName() { + if (name.empty()) { + std::set::iterator iter; + + do { + name = getBaseName() + std::to_string(def_name_count++); + iter = layer_names.find(name); + } while (iter != layer_names.end()); + + layer_names.insert(name); + } +} + } /* namespace nntrainer */ diff --git a/nntrainer/src/neuralnet.cpp b/nntrainer/src/neuralnet.cpp index bef88da..75d34c7a 100644 --- a/nntrainer/src/neuralnet.cpp +++ b/nntrainer/src/neuralnet.cpp @@ -272,7 +272,7 @@ int NeuralNetwork::init() { ini, (layer_name + ":Normalization").c_str(), false)); input_layer->setStandardization(iniparser_getboolean( ini, (layer_name + ":Standardization").c_str(), false)); - layers.push_back(input_layer); + addLayer(input_layer); } break; case LAYER_CONV2D: { int size[CONV2D_DIM]; @@ -355,7 +355,7 @@ int NeuralNetwork::init() { status = conv2d_layer->setOptimizer(opt); NN_INI_RETURN_STATUS(); - layers.push_back(conv2d_layer); + addLayer(conv2d_layer); } break; case LAYER_POOLING2D: { @@ -401,7 +401,7 @@ int NeuralNetwork::init() { status = pooling2d_layer->initialize(last); NN_INI_RETURN_STATUS(); - layers.push_back(pooling2d_layer); + addLayer(pooling2d_layer); } break; case LAYER_FLATTEN: { @@ -412,7 +412,7 @@ int NeuralNetwork::init() { status = flatten_layer->initialize(last); NN_INI_RETURN_STATUS(); - layers.push_back(flatten_layer); + addLayer(flatten_layer); } break; case LAYER_FC: { @@ -465,7 +465,7 @@ int NeuralNetwork::init() { status = fc_layer->setOptimizer(opt); NN_INI_RETURN_STATUS(); - layers.push_back(fc_layer); + addLayer(fc_layer); } break; case LAYER_BN: { std::shared_ptr bn_layer = @@ -484,7 +484,7 @@ int NeuralNetwork::init() { // fixme: deprecate this. layers.back()->setBNfollow(true); - layers.push_back(bn_layer); + addLayer(bn_layer); NN_INI_RETURN_STATUS(); } break; case LAYER_UNKNOWN: @@ -564,7 +564,7 @@ int NeuralNetwork::initLossLayer() { status = loss_layer->setCost(updated_cost); NN_RETURN_STATUS(); - layers.push_back(loss_layer); + addLayer(loss_layer); return status; } @@ -1118,10 +1118,19 @@ int NeuralNetwork::addLayer(std::shared_ptr layer) { if (initialized) { return ML_ERROR_NOT_SUPPORTED; - } else { - layers.push_back(layer); } + /** @todo This might be redundant. Remove this after testing */ + for (auto iter = layers.begin(); iter != layers.end(); ++iter) { + if ((*iter)->getName() == layer->getName()) { + ml_loge("Layer with name %s already exists in the model.", + layer->getName().c_str()); + return ML_ERROR_INVALID_PARAMETER; + } + } + + layers.push_back(layer); + return status; } diff --git a/test/unittest/unittest_nntrainer_internal.cpp b/test/unittest/unittest_nntrainer_internal.cpp index 8e1f854..b5421fc 100644 --- a/test/unittest/unittest_nntrainer_internal.cpp +++ b/test/unittest/unittest_nntrainer_internal.cpp @@ -279,6 +279,21 @@ TEST(nntrainer_Conv2DLayer, initialize_02_p) { } /** + * @brief Flatten Layer + */ +TEST(nntrainer_Layer, initialize_03_p) { + int status = ML_ERROR_NONE; + std::string config_file = "./test.ini"; + RESET_CONFIG(config_file.c_str()); + replaceString("flatten = false", "flatten = true", config_file, config_str2); + nntrainer::NeuralNetwork NN; + status = NN.setConfig(config_file); + EXPECT_EQ(status, ML_ERROR_NONE); + status = NN.init(); + EXPECT_EQ(status, ML_ERROR_NONE); +} + +/** * @brief Main gtest */ int main(int argc, char **argv) { diff --git a/test/unittest/unittest_nntrainer_layers.cpp b/test/unittest/unittest_nntrainer_layers.cpp index c09f7d0..0f9dd20 100644 --- a/test/unittest/unittest_nntrainer_layers.cpp +++ b/test/unittest/unittest_nntrainer_layers.cpp @@ -142,11 +142,53 @@ TEST(nntrainer_FullyConnectedLayer, initialize_04_p) { int status = ML_ERROR_NONE; nntrainer::FullyConnectedLayer layer; nntrainer::TensorDim d; + std::string layer_name; + d.setTensorDim("32:1:28:28"); layer.setProperty({"unit=1"}); layer.setInputDimension(d); + + /** Layer name can be set */ + layer_name = "FCLayer0"; + status = layer.setName(layer_name); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(layer.getName(), layer_name); + status = layer.initialize(false); EXPECT_EQ(status, ML_ERROR_NONE); + + /** Layer name can be updated */ + layer_name = "FCLayer1"; + status = layer.setName(layer_name); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(layer.getName(), layer_name); +} + +/** + * @brief FullyConnected Layer + */ +TEST(nntrainer_FullyConnectedLayer, initialize_05_n) { + int status = ML_ERROR_NONE; + nntrainer::FullyConnectedLayer layer0, layer1; + nntrainer::TensorDim d; + std::string layer_name; + + /** Default name is set */ + layer_name = layer0.getName(); + EXPECT_GT(layer_name.length(), 0); + + /** Set same name again */ + status = layer0.setName(layer_name); + EXPECT_EQ(status, ML_ERROR_NONE); + EXPECT_EQ(layer0.getName(), layer_name); + + /** Do not set the name already allocated */ + status = layer1.setName(layer_name); + EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); + + /** Default name is set even after error */ + layer_name = layer1.getName(); + EXPECT_GT(layer_name.length(), 0); } /**