From: Parichay Kapoor Date: Tue, 7 Jul 2020 08:24:28 +0000 (+0900) Subject: [API] Update C-API X-Git-Tag: accepted/tizen/unified/20200721.042553~53 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=5cf95201ca96828a14cbac5855ce4bcafe13f0ad;p=platform%2Fcore%2Fml%2Fnntrainer.git [API] Update C-API Update C-API and corresponding code changes Major Changes: 1. ml_nnmodel_compile_with_conf() replaced with ml_nnmodel_construct_with_conf(). This new function loads the model from the config file but does not initialize. ml_nnmodel_compile() should be called after ml_nnmodel_construct_with_conf() before training. 2. ml_nnmodel_compile() does not take optimizer as an input. Rather, use ml_nnmodel_set_optimizer() to set the optimizer for the model. 3. init() from neuralnet has been updated to loadFromConfig() and does not initialize the model anymore. Rather call init() after loadFromConfig() to initialize. This also allows updating the model config after loading model with loadFromConfig(). 4. init(optimizer, args_list) has been replaced with init() Rather call setOptimizer(optimizer) to set the optimizer and setProperty(args_list) to set the properties before calling init(). 5. Bug fixes in checkValidation() Signed-off-by: Parichay Kapoor --- diff --git a/Applications/Classification/jni/main.cpp b/Applications/Classification/jni/main.cpp index 3cfd577..76fdd6d 100644 --- a/Applications/Classification/jni/main.cpp +++ b/Applications/Classification/jni/main.cpp @@ -426,6 +426,7 @@ int main(int argc, char *argv[]) { */ nntrainer::NeuralNetwork NN; NN.setConfig(config); + NN.loadFromConfig(); NN.init(); NN.readModel(); diff --git a/Applications/Classification/jni/main_func.cpp b/Applications/Classification/jni/main_func.cpp index 40184c0..9045dc7 100644 --- a/Applications/Classification/jni/main_func.cpp +++ b/Applications/Classification/jni/main_func.cpp @@ -281,6 +281,7 @@ int main(int argc, char *argv[]) { */ nntrainer::NeuralNetwork NN; NN.setConfig(config); + NN.loadFromConfig(); NN.init(); NN.readModel(); diff --git a/Applications/LogisticRegression/jni/main.cpp b/Applications/LogisticRegression/jni/main.cpp index 9716677..1989f13 100644 --- a/Applications/LogisticRegression/jni/main.cpp +++ b/Applications/LogisticRegression/jni/main.cpp @@ -81,6 +81,7 @@ int main(int argc, char *argv[]) { /** * @brief Initialize NN */ + NN.loadFromConfig(); NN.init(); if (!training) NN.readModel(); diff --git a/Applications/ReinforcementLearning/DeepQ/jni/main.cpp b/Applications/ReinforcementLearning/DeepQ/jni/main.cpp index 44d28fb..24521c7 100644 --- a/Applications/ReinforcementLearning/DeepQ/jni/main.cpp +++ b/Applications/ReinforcementLearning/DeepQ/jni/main.cpp @@ -278,7 +278,9 @@ int main(int argc, char **argv) { /** * @brief initialize mainNet & Target Net */ + mainNet.loadFromConfig(); mainNet.init(); + targetNet.loadFromConfig(); targetNet.init(); /** diff --git a/Applications/Tizen_CAPI/capi_file.c b/Applications/Tizen_CAPI/capi_file.c index e721bf7..788faac 100644 --- a/Applications/Tizen_CAPI/capi_file.c +++ b/Applications/Tizen_CAPI/capi_file.c @@ -85,9 +85,13 @@ int main(int argc, char *argv[]) { "beta1=0.9", "beta2=0.9999", "epsilon=1e-7", NULL); NN_RETURN_STATUS(); + /* set optimizer */ + status = ml_nnmodel_set_optimizer (model, optimizer); + NN_RETURN_STATUS (); + /* compile model with cross entropy loss function */ - status = ml_nnmodel_compile(model, optimizer, "loss=cross", NULL); - NN_RETURN_STATUS(); + status = ml_nnmodel_compile (model, "loss=cross", NULL); + NN_RETURN_STATUS (); /* train model with data files : epochs = 10 and store model file named * "model.bin" */ diff --git a/Applications/Tizen_CAPI/capi_func.c b/Applications/Tizen_CAPI/capi_func.c index a76a9b0..69cf480 100644 --- a/Applications/Tizen_CAPI/capi_func.c +++ b/Applications/Tizen_CAPI/capi_func.c @@ -309,9 +309,13 @@ int main(int argc, char *argv[]) { "beta1=0.9", "beta2=0.9999", "epsilon=1e-7", NULL); NN_RETURN_STATUS(); + /* set optimizer */ + status = ml_nnmodel_set_optimizer (model, optimizer); + NN_RETURN_STATUS (); + /* compile model with cross entropy loss function */ - status = ml_nnmodel_compile(model, optimizer, "loss=cross", NULL); - NN_RETURN_STATUS(); + status = ml_nnmodel_compile (model, "loss=cross", NULL); + NN_RETURN_STATUS (); /* train model with data files : epochs = 10 and store model file named * "model.bin" */ diff --git a/Applications/Tizen_CAPI/main.c b/Applications/Tizen_CAPI/main.c index cb35d6b..4317f49 100644 --- a/Applications/Tizen_CAPI/main.c +++ b/Applications/Tizen_CAPI/main.c @@ -29,13 +29,13 @@ int main(int argc, char *argv[]) { int status = ML_ERROR_NONE; ml_nnmodel_h handle = NULL; const char *config_file = "./Tizen_CAPI_config.ini"; - status = ml_nnmodel_construct(&handle); + status = ml_nnmodel_construct_with_conf (config_file, &handle); if (status != ML_ERROR_NONE) return status; - status = ml_nnmodel_compile_with_conf(config_file, handle); + status = ml_nnmodel_compile (handle, NULL); if (status != ML_ERROR_NONE) return status; - status = ml_nnmodel_train_with_file(handle); + status = ml_nnmodel_train_with_file (handle, NULL); if (status != ML_ERROR_NONE) return status; status = ml_nnmodel_destruct(handle); diff --git a/Applications/Training/jni/main.cpp b/Applications/Training/jni/main.cpp index 8dd379d..d35d59e 100644 --- a/Applications/Training/jni/main.cpp +++ b/Applications/Training/jni/main.cpp @@ -248,6 +248,7 @@ int main(int argc, char *argv[]) { */ nntrainer::NeuralNetwork NN; NN.setConfig(config); + NN.loadFromConfig(); NN.init(); /** diff --git a/api/capi/include/nntrainer.h b/api/capi/include/nntrainer.h index 4211f06..1899653 100644 --- a/api/capi/include/nntrainer.h +++ b/api/capi/include/nntrainer.h @@ -77,30 +77,28 @@ typedef enum { int ml_nnmodel_construct(ml_nnmodel_h *model); /** - * @brief Initialize the neural network model with the given configuration file. - * @details Use this function to initialize neural network model + * @brief Construct the neural network model with the given configuration file. + * @details Use this function to create neural network model with the given configuration file. * @since_tizen 6.x * @param[in] model_conf The location of nntrainer model configuration file. - * @param[in] model The NNTrainer model handler from the given description. + * @param[out] model The NNTrainer model handler from the given description. * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful. * @retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter. */ -int ml_nnmodel_compile_with_conf(const char *model_conf, ml_nnmodel_h model); +int ml_nnmodel_construct_with_conf(const char *model_conf, ml_nnmodel_h *model); /** * @brief initialize the neural network model. - * @details Use this function to initialize neural network model + * @details Use this function to initialize neural network model. Once compiled, addition of new layers is not permitted. Further, updating the properties of added layers is restricted. * @since_tizen 6.x * @param[in] model The NNTrainer model handler from the given description. - * @param[in] optimizer The NNTrainer optimizer handler from the given - * description. * @param[in] ... hyper parmeter for compile model * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful. * @retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter. */ -int ml_nnmodel_compile(ml_nnmodel_h model, ml_nnopt_h optimizer, ...); +int ml_nnmodel_compile(ml_nnmodel_h model, ...); /** * @brief train the neural network model. @@ -145,11 +143,11 @@ int ml_nnmodel_train_with_generator(ml_nnmodel_h model, int ml_nnmodel_destruct(ml_nnmodel_h model); /** - * @brief add layer into the neural network model - * @details Use this function to add layer + * @brief Add layer at the last of the existing layers in neural network model. + * @details Use this function to add a layer to the model. * @since_tizen 6.x - * @param[out] model The NNTrainer model handler from the given description. - * @param[int] layer The NNTrainer layer handler + * @param[in] model The NNTrainer model handler from the given description. + * @param[in] layer The NNTrainer layer handler * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful. * @retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter. @@ -157,6 +155,18 @@ int ml_nnmodel_destruct(ml_nnmodel_h model); int ml_nnmodel_add_layer(ml_nnmodel_h model, ml_nnlayer_h layer); /** + * @brief Set the neural network optimizer. + * @details Use this function to set Neural Network Optimizer. + * @since_tizen 6.x + * @param[in] model The NNTrainer model handler from the given description. + * @param[in] optimizer The NNTrainer Optimizer handler + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter. + */ +int ml_nnmodel_set_optimizer(ml_nnmodel_h model, ml_nnopt_h optimizer); + +/** * @brief Create the neural network layer. * @details Use this function to create Neural Netowrk Layer. * @since_tizen 6.x @@ -196,36 +206,36 @@ int ml_nnlayer_set_property(ml_nnlayer_h layer, ...); * @brief Create the neural network optimizer. * @details Use this function to create Neural Netowrk Optimizer. * @since_tizen 6.x - * @param[out] layer The NNTrainer Optimizer handler from the given description. - * @param[in] type The NNTrainer Optimizer type + * @param[out] optimizer The NNTrainer Optimizer handler + * @param[in] type The NNTrainer Optimizer type * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful. - * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter. + * @retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter. */ -int ml_nnoptimizer_create(ml_nnopt_h *opt, const char *type); +int ml_nnoptimizer_create(ml_nnopt_h *optimizer, const char *type); /** * @brief Delete the neural network optimizer. * @details Use this function to delete Neural Netowrk Optimizer. * @since_tizen 6.x - * @param[in] layer The NNTrainer optimizer handler from the given description. + * @param[in] optimizer The NNTrainer optimizer handler from the given description. * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful. * @retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter. */ -int ml_nnoptimizer_delete(ml_nnopt_h opt); +int ml_nnoptimizer_delete(ml_nnopt_h optimizer); /** * @brief Set the neural network optimizer property. * @details Use this function to set Neural Netowrk Optimizer Property. * @since_tizen 6.x - * @param[in] layer The NNTrainer Optimizer handler from the given description. + * @param[in] optimizer The NNTrainer Optimizer handler from the given description. * @param[in] ... Property values with NULL at the end. * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful. * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter. */ -int ml_nnoptimizer_set_property(ml_nnopt_h opt, ...); +int ml_nnoptimizer_set_property(ml_nnopt_h optimizer, ...); /** * @} diff --git a/api/capi/src/nntrainer.cpp b/api/capi/src/nntrainer.cpp index 009a0a1..9eef24e 100644 --- a/api/capi/src/nntrainer.cpp +++ b/api/capi/src/nntrainer.cpp @@ -97,10 +97,11 @@ int ml_nnmodel_construct(ml_nnmodel_h *model) { return status; } -int ml_nnmodel_compile_with_conf(const char *model_conf, ml_nnmodel_h model) { +int ml_nnmodel_construct_with_conf(const char *model_conf, ml_nnmodel_h *model) { int status = ML_ERROR_NONE; ml_nnmodel *nnmodel; - std::shared_ptr nn; + std::shared_ptr NN; + returnable f; std::ifstream conf_file(model_conf); if (!conf_file.good()) { @@ -108,55 +109,60 @@ int ml_nnmodel_compile_with_conf(const char *model_conf, ml_nnmodel_h model) { return ML_ERROR_INVALID_PARAMETER; } - ML_NNTRAINER_CHECK_MODEL_VALIDATION(nnmodel, model); - nn = nnmodel->network; - - returnable f = [&]() { return nn->setConfig(model_conf); }; - - status = nntrainer_exception_boundary(f); + status = ml_nnmodel_construct(model); if (status != ML_ERROR_NONE) return status; - f = [&]() { return nn->checkValidation(); }; + nnmodel = (ml_nnmodel *)(*model); + NN = nnmodel->network; + f = [&]() { return NN->setConfig(model_conf); }; status = nntrainer_exception_boundary(f); - if (status != ML_ERROR_NONE) + if (status != ML_ERROR_NONE) { + ml_nnmodel_destruct(*model); return status; + } - f = [&]() { return nn->init(); }; - + f = [&]() { return NN->loadFromConfig(); }; status = nntrainer_exception_boundary(f); + if (status != ML_ERROR_NONE) { + ml_nnmodel_destruct(*model); + } + return status; } -int ml_nnmodel_compile(ml_nnmodel_h model, ml_nnopt_h optimizer, ...) { +int ml_nnmodel_compile(ml_nnmodel_h model, ...) { int status = ML_ERROR_NONE; const char *data; ml_nnmodel *nnmodel; - ml_nnopt *nnopt; - - std::shared_ptr NN; - std::shared_ptr opti; + returnable f; ML_NNTRAINER_CHECK_MODEL_VALIDATION(nnmodel, model); - ML_NNTRAINER_CHECK_OPT_VALIDATION(nnopt, optimizer); - std::vector arg_list; - va_list arguments; - va_start(arguments, optimizer); + va_start(arguments, model); while ((data = va_arg(arguments, const char *))) { arg_list.push_back(data); } va_end(arguments); + std::shared_ptr NN; NN = nnmodel->network; - opti = nnopt->optimizer; - returnable f = [&]() { return NN->init(opti, arg_list); }; + f = [&]() { return NN->setProperty(arg_list); }; + status = nntrainer_exception_boundary(f); + if (status != ML_ERROR_NONE) + return status; + f = [&]() { return NN->init(); }; + status = nntrainer_exception_boundary(f); + if (status != ML_ERROR_NONE) + return status; + + f = [&]() { return NN->checkValidation(); }; status = nntrainer_exception_boundary(f); return status; @@ -242,6 +248,7 @@ int ml_nnmodel_add_layer(ml_nnmodel_h model, ml_nnlayer_h layer) { int status = ML_ERROR_NONE; ml_nnmodel *nnmodel; ml_nnlayer *nnlayer; + ML_NNTRAINER_CHECK_MODEL_VALIDATION(nnmodel, model); ML_NNTRAINER_CHECK_LAYER_VALIDATION(nnlayer, layer); @@ -258,6 +265,27 @@ int ml_nnmodel_add_layer(ml_nnmodel_h model, ml_nnlayer_h layer) { return status; } +int ml_nnmodel_set_optimizer(ml_nnmodel_h model, ml_nnopt_h optimizer) { + int status = ML_ERROR_NONE; + ml_nnmodel *nnmodel; + ml_nnopt *nnopt; + + ML_NNTRAINER_CHECK_MODEL_VALIDATION(nnmodel, model); + ML_NNTRAINER_CHECK_OPT_VALIDATION(nnopt, optimizer); + + std::shared_ptr NN; + std::shared_ptr opt; + + NN = nnmodel->network; + opt = nnopt->optimizer; + + returnable f = [&]() { return NN->setOptimizer(opt); }; + + status = nntrainer_exception_boundary(f); + + return status; +} + int ml_nnlayer_create(ml_nnlayer_h *layer, ml_layer_type_e type) { int status = ML_ERROR_NONE; returnable f; @@ -320,18 +348,17 @@ int ml_nnlayer_set_property(ml_nnlayer_h layer, ...) { NL = nnlayer->layer; returnable f = [&]() { return NL->setProperty(arg_list); }; - status = nntrainer_exception_boundary(f); return status; } -int ml_nnoptimizer_create(ml_nnopt_h *opt, const char *type) { +int ml_nnoptimizer_create(ml_nnopt_h *optimizer, const char *type) { int status = ML_ERROR_NONE; ml_nnopt *nnopt = new ml_nnopt; nnopt->magic = ML_NNTRAINER_MAGIC; nnopt->optimizer = std::make_shared(); - *opt = nnopt; + *optimizer = nnopt; returnable f = [&]() { return nnopt->optimizer->setType( @@ -346,27 +373,27 @@ int ml_nnoptimizer_create(ml_nnopt_h *opt, const char *type) { return status; } -int ml_nnoptimizer_delete(ml_nnopt_h opt) { +int ml_nnoptimizer_delete(ml_nnopt_h optimizer) { int status = ML_ERROR_NONE; ml_nnopt *nnopt; - ML_NNTRAINER_CHECK_OPT_VALIDATION(nnopt, opt); + ML_NNTRAINER_CHECK_OPT_VALIDATION(nnopt, optimizer); delete nnopt; return status; } -int ml_nnoptimizer_set_property(ml_nnopt_h opt, ...) { +int ml_nnoptimizer_set_property(ml_nnopt_h optimizer, ...) { int status = ML_ERROR_NONE; ml_nnopt *nnopt; const char *data; - nnopt = (ml_nnopt *)opt; - ML_NNTRAINER_CHECK_OPT_VALIDATION(nnopt, opt); + nnopt = (ml_nnopt *)optimizer; + ML_NNTRAINER_CHECK_OPT_VALIDATION(nnopt, optimizer); std::vector arg_list; va_list arguments; - va_start(arguments, opt); + va_start(arguments, optimizer); while ((data = va_arg(arguments, const char *))) { arg_list.push_back(data); @@ -374,10 +401,10 @@ int ml_nnoptimizer_set_property(ml_nnopt_h opt, ...) { va_end(arguments); - std::shared_ptr Opt; - Opt = nnopt->optimizer; + std::shared_ptr opt; + opt = nnopt->optimizer; - returnable f = [&]() { return Opt->setProperty(arg_list); }; + returnable f = [&]() { return opt->setProperty(arg_list); }; status = nntrainer_exception_boundary(f); diff --git a/nntrainer/include/layer.h b/nntrainer/include/layer.h index b7cbf1b..f0cb0b9 100644 --- a/nntrainer/include/layer.h +++ b/nntrainer/include/layer.h @@ -325,6 +325,12 @@ public: * @brief get if the output of this layer must be flatten * @retval flatten value */ + void setFlatten(bool flatten) { this->flatten = flatten; } + + /** + * @brief get if the output of this layer must be flatten + * @retval flatten value + */ bool getFlatten() { return flatten; } /** diff --git a/nntrainer/include/neuralnet.h b/nntrainer/include/neuralnet.h index c32cc5a..ffc86dc 100644 --- a/nntrainer/include/neuralnet.h +++ b/nntrainer/include/neuralnet.h @@ -101,12 +101,11 @@ public: void setLoss(float l); /** - * @brief Initialize Network. This should be called after set all hyper - * parmeters. + * @brief Create and load the Network with configuration file. * @retval #ML_ERROR_NONE Successful. * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. */ - int init(); + int loadFromConfig(); /** * @brief set Property of Network @@ -117,15 +116,12 @@ public: int setProperty(std::vector values); /** - * @brief Initialize Network - * @param[in] opimizer optimizer instance - * @param[in] arg_list argument list - * "loss = cross | msr" + * @brief Initialize Network. This should be called after set all + * hyperparameters. * @retval #ML_ERROR_NONE Successful. * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. */ - int init(std::shared_ptr optimizer, - std::vector arg_list); + int init(); /** * @brief forward propagation @@ -254,6 +250,13 @@ public: */ int addLayer(std::shared_ptr layer); + /** + * @brief set optimizer for the neural network model + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. + */ + int setOptimizer(std::shared_ptr optimizer); + enum class PropertyType { loss = 0, cost = 1, diff --git a/nntrainer/src/neuralnet.cpp b/nntrainer/src/neuralnet.cpp index 75d34c7a..297cc6e 100644 --- a/nntrainer/src/neuralnet.cpp +++ b/nntrainer/src/neuralnet.cpp @@ -118,7 +118,7 @@ int NeuralNetwork::setConfig(std::string config) { return status; } -int NeuralNetwork::init() { +int NeuralNetwork::loadFromConfig() { int status = ML_ERROR_NONE; std::string ini_file = config; int num_ini_sec = 0; @@ -236,8 +236,7 @@ int NeuralNetwork::init() { /** Parse all the layers defined as sections in order */ TensorDim previous_dim; for (section_names_iter = section_names.begin(); - section_names_iter != section_names.end(); ++section_names_iter) { - bool last = false; + section_names_iter != section_names.end(); ++section_names_iter) { std::string layer_name = *section_names_iter; std::string layer_type_str = iniparser_getstring(ini, (layer_name + ":Type").c_str(), unknown); @@ -245,8 +244,6 @@ int NeuralNetwork::init() { bool b_zero = iniparser_getboolean(ini, (layer_name + ":bias_init_zero").c_str(), true); - last = (section_names_iter + 1) == section_names.end(); - switch (layer_type) { case LAYER_IN: { std::shared_ptr input_layer = std::make_shared(); @@ -264,10 +261,6 @@ int NeuralNetwork::init() { input_layer->setInputDimension(previous_dim); - status = input_layer->initialize(last); - NN_INI_RETURN_STATUS(); - input_layer->setBiasZero(b_zero); - input_layer->setNormalization(iniparser_getboolean( ini, (layer_name + ":Normalization").c_str(), false)); input_layer->setStandardization(iniparser_getboolean( @@ -297,13 +290,6 @@ int NeuralNetwork::init() { layer_name.c_str()); status = ML_ERROR_INVALID_PARAMETER; NN_INI_RETURN_STATUS(); - } else { - conv2d_layer->setInputDimension(previous_dim); - } - - if (last) { - status = conv2d_layer->setCost(cost); - NN_INI_RETURN_STATUS(); } status = getValues(CONV2D_DIM, @@ -349,12 +335,6 @@ int NeuralNetwork::init() { conv2d_layer->setWeightDecay(weight_decay); NN_INI_RETURN_STATUS(); - status = conv2d_layer->initialize(last); - NN_INI_RETURN_STATUS(); - - status = conv2d_layer->setOptimizer(opt); - NN_INI_RETURN_STATUS(); - addLayer(conv2d_layer); } break; @@ -363,8 +343,6 @@ int NeuralNetwork::init() { std::shared_ptr pooling2d_layer = std::make_shared(); - pooling2d_layer->setInputDimension(previous_dim); - status = getValues( POOLING2D_DIM, iniparser_getstring(ini, (layer_name + ":pooling_size").c_str(), @@ -399,8 +377,6 @@ int NeuralNetwork::init() { "average"), TOKEN_POOLING)); - status = pooling2d_layer->initialize(last); - NN_INI_RETURN_STATUS(); addLayer(pooling2d_layer); } break; @@ -408,10 +384,6 @@ int NeuralNetwork::init() { std::shared_ptr flatten_layer = std::make_shared(); - flatten_layer->setInputDimension(previous_dim); - - status = flatten_layer->initialize(last); - NN_INI_RETURN_STATUS(); addLayer(flatten_layer); } break; @@ -437,13 +409,6 @@ int NeuralNetwork::init() { layer_name.c_str()); status = ML_ERROR_INVALID_PARAMETER; NN_INI_RETURN_STATUS(); - } else { - fc_layer->setInputDimension(previous_dim); - } - - if (last) { - status = fc_layer->setCost(cost); - NN_INI_RETURN_STATUS(); } fc_layer->setUnit(static_cast( @@ -455,32 +420,17 @@ int NeuralNetwork::init() { "xavier_uniform"), TOKEN_WEIGHTINI)); - status = fc_layer->initialize(last); - NN_INI_RETURN_STATUS(); - status = parseWeightDecay(ini, layer_name, weight_decay); NN_INI_RETURN_STATUS(); fc_layer->setWeightDecay(weight_decay); - status = fc_layer->setOptimizer(opt); - NN_INI_RETURN_STATUS(); addLayer(fc_layer); } break; case LAYER_BN: { std::shared_ptr bn_layer = std::make_shared(); - bn_layer->setInputDimension(previous_dim); - - status = bn_layer->initialize(last); - NN_INI_RETURN_STATUS(); - - bn_layer->setBiasZero(b_zero); - - status = bn_layer->setOptimizer(opt); - NN_INI_RETURN_STATUS(); - // fixme: deprecate this. layers.back()->setBNfollow(true); @@ -499,29 +449,19 @@ int NeuralNetwork::init() { const char *acti_str = iniparser_getstring(ini, (layer_name + ":Activation").c_str(), unknown); ActiType act = (ActiType)parseType(acti_str, TOKEN_ACTI); - layers.back()->setActivation(act); - status = initActivationLayer(act); - NN_INI_RETURN_STATUS(); /** Add flatten layer */ bool flatten = iniparser_getboolean(ini, (layer_name + ":Flatten").c_str(), false); - if (flatten) { - status = initFlattenLayer(); - NN_INI_RETURN_STATUS(); - } + layers.back()->setFlatten(flatten); + previous_dim = layers.back()->getOutputDimension(); } - /** Add the last layer as loss layer */ - status = initLossLayer(); - NN_INI_RETURN_STATUS(); - status = data_buffer->setMiniBatch(batch_size); NN_INI_RETURN_STATUS(); - initialized = true; iniparser_freedict(ini); return status; } @@ -650,14 +590,10 @@ int NeuralNetwork::setProperty(std::vector values) { return status; } -int NeuralNetwork::init(std::shared_ptr optimizer, - std::vector arg_list) { +int NeuralNetwork::init() { int status = ML_ERROR_NONE; bool last = false; TensorDim previous_dim; - opt = *optimizer.get(); - status = setProperty(arg_list); - NN_RETURN_STATUS(); /** Note: number of entries in layers will change. */ for (unsigned int i = 0; i < layers.size(); ++i) { @@ -1093,16 +1029,15 @@ int NeuralNetwork::train_run() { int NeuralNetwork::checkValidation() { int status = ML_ERROR_NONE; - if (!config.empty()) - return status; - if (layers.size()) { + if (layers.empty()) { return ML_ERROR_INVALID_PARAMETER; } else { for (std::vector>::iterator layer = layers.begin(); layer != layers.end(); ++layer) { - if (!(*layer)->checkValidation()) - return ML_ERROR_INVALID_PARAMETER; + status = (*layer)->checkValidation(); + if (status != ML_ERROR_NONE) + return status; } } @@ -1134,6 +1069,20 @@ int NeuralNetwork::addLayer(std::shared_ptr layer) { return status; } +int NeuralNetwork::setOptimizer(std::shared_ptr optimizer) { + + if (optimizer->getType() == OptType::unknown) + return ML_ERROR_INVALID_PARAMETER; + + if (initialized) { + return ML_ERROR_NOT_SUPPORTED; + } + + opt = *optimizer.get(); + + return ML_ERROR_NONE; +} + std::shared_ptr NeuralNetwork::_make_act_layer(ActiType act, std::shared_ptr prev) { if (layers.back()->getType() == LAYER_ACTIVATION) { diff --git a/test/tizen_capi/unittest_tizen_capi.cpp b/test/tizen_capi/unittest_tizen_capi.cpp index 312160c..aa8b832 100644 --- a/test/tizen_capi/unittest_tizen_capi.cpp +++ b/test/tizen_capi/unittest_tizen_capi.cpp @@ -63,9 +63,9 @@ TEST(nntrainer_capi_nnmodel, compile_01_p) { RESET_CONFIG(config_file.c_str()); replaceString("Layers = inputlayer outputlayer", "Layers = inputlayer outputlayer", config_file, config_str); - status = ml_nnmodel_construct(&handle); + status = ml_nnmodel_construct_with_conf(config_file.c_str(), &handle); EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_nnmodel_compile_with_conf(config_file.c_str(), handle); + status = ml_nnmodel_compile(handle, NULL); EXPECT_EQ(status, ML_ERROR_NONE); status = ml_nnmodel_destruct(handle); EXPECT_EQ(status, ML_ERROR_NONE); @@ -74,43 +74,35 @@ TEST(nntrainer_capi_nnmodel, compile_01_p) { /** * @brief Neural Network Model Compile Test */ -TEST(nntrainer_capi_nnmodel, compile_02_n) { +TEST(nntrainer_capi_nnmodel, construct_conf_01_n) { ml_nnmodel_h handle = NULL; int status = ML_ERROR_NONE; std::string config_file = "/test/cannot_find.ini"; - status = ml_nnmodel_construct(&handle); - EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_nnmodel_compile_with_conf(config_file.c_str(), handle); + status = ml_nnmodel_construct_with_conf(config_file.c_str(), &handle); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); - status = ml_nnmodel_destruct(handle); - EXPECT_EQ(status, ML_ERROR_NONE); } /** * @brief Neural Network Model Compile Test */ -TEST(nntrainer_capi_nnmodel, compile_03_n) { +TEST(nntrainer_capi_nnmodel, construct_conf_02_n) { ml_nnmodel_h handle = NULL; int status = ML_ERROR_NONE; std::string config_file = "./test_compile_03_n.ini"; RESET_CONFIG(config_file.c_str()); replaceString("Input_Shape = 32:1:1:62720", "Input_Shape= 32:1:1:0", config_file, config_str); - status = ml_nnmodel_construct(&handle); - EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_nnmodel_compile_with_conf(config_file.c_str(), handle); + status = ml_nnmodel_construct_with_conf(config_file.c_str(), &handle); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); - status = ml_nnmodel_destruct(handle); - EXPECT_EQ(status, ML_ERROR_NONE); } /** * @brief Neural Network Model Compile Test */ -TEST(nntrainer_capi_nnmodel, compile_04_n) { +TEST(nntrainer_capi_nnmodel, compile_02_n) { int status = ML_ERROR_NONE; std::string config_file = "./test_compile_03_n.ini"; - status = ml_nnmodel_compile_with_conf(config_file.c_str(), NULL); + status = ml_nnmodel_compile(NULL); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); } @@ -158,7 +150,10 @@ TEST(nntrainer_capi_nnmodel, compile_05_p) { "beta1=0.002", "beta2=0.001", "epsilon=1e-7", NULL); EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_nnmodel_compile(model, optimizer, "loss=cross", NULL); + status = ml_nnmodel_set_optimizer(model, optimizer); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_nnmodel_compile(model, "loss=cross", NULL); EXPECT_EQ(status, ML_ERROR_NONE); status = ml_nnlayer_delete(layers[0]); @@ -184,9 +179,9 @@ TEST(nntrainer_capi_nnmodel, train_01_p) { config_file, config_str); replaceString("minibatch = 32", "minibatch = 16", config_file, config_str); replaceString("BufferSize=100", "", config_file, config_str); - status = ml_nnmodel_construct(&handle); + status = ml_nnmodel_construct_with_conf(config_file.c_str(), &handle); EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_nnmodel_compile_with_conf(config_file.c_str(), handle); + status = ml_nnmodel_compile(handle, NULL); EXPECT_EQ(status, ML_ERROR_NONE); status = ml_nnmodel_train_with_file(handle, NULL); EXPECT_EQ(status, ML_ERROR_NONE); @@ -346,10 +341,10 @@ TEST(nntrainer_capi_nnmodel, addLayer_05_n) { replaceString("Layers = inputlayer outputlayer", "Layers = inputlayer outputlayer", config_file, config_str); - status = ml_nnmodel_construct(&model); + status = ml_nnmodel_construct_with_conf(config_file.c_str(), &model); EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_nnmodel_compile_with_conf(config_file.c_str(), model); + status = ml_nnmodel_compile(model, NULL); EXPECT_EQ(status, ML_ERROR_NONE); status = ml_nnlayer_create(&layer, ML_LAYER_TYPE_FC); @@ -468,7 +463,10 @@ TEST(nntrainer_capi_nnmodel, train_with_file_01_p) { "beta1=0.002", "beta2=0.001", "epsilon=1e-7", NULL); EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_nnmodel_compile(model, optimizer, "loss=cross", NULL); + status = ml_nnmodel_set_optimizer(model, optimizer); + EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_nnmodel_compile(model, "loss=cross", NULL); EXPECT_EQ(status, ML_ERROR_NONE); status = ml_nnmodel_train_with_file( @@ -531,8 +529,12 @@ TEST(nntrainer_capi_nnmodel, train_with_generator_01_p) { "beta1=0.002", "beta2=0.001", "epsilon=1e-7", NULL); EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_nnmodel_compile(model, optimizer, "loss=cross", NULL); + status = ml_nnmodel_set_optimizer(model, optimizer); EXPECT_EQ(status, ML_ERROR_NONE); + + status = ml_nnmodel_compile(model, "loss=cross", NULL); + EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_nnmodel_train_with_generator( model, getMiniBatch_train, getMiniBatch_val, NULL, "epochs=2", "batch_size=16", "buffer_size=100", "model_file=model.bin", NULL); diff --git a/test/unittest/unittest_nntrainer_internal.cpp b/test/unittest/unittest_nntrainer_internal.cpp index b5421fc..d723bab 100644 --- a/test/unittest/unittest_nntrainer_internal.cpp +++ b/test/unittest/unittest_nntrainer_internal.cpp @@ -65,6 +65,8 @@ TEST(nntrainer_NeuralNetwork, init_01_p) { nntrainer::NeuralNetwork NN; status = NN.setConfig(config_file); EXPECT_EQ(status, ML_ERROR_NONE); + status = NN.loadFromConfig(); + EXPECT_EQ(status, ML_ERROR_NONE); status = NN.init(); EXPECT_EQ(status, ML_ERROR_NONE); } @@ -72,35 +74,35 @@ TEST(nntrainer_NeuralNetwork, init_01_p) { /** * @brief Neural Network Model initialization */ -TEST(nntrainer_NeuralNetwork, init_02_n) { +TEST(nntrainer_NeuralNetwork, load_config_01_n) { int status = ML_ERROR_NONE; RESET_CONFIG("./test.ini"); replaceString("[Network]", "", "./test.ini", config_str); nntrainer::NeuralNetwork NN; status = NN.setConfig("./test.ini"); EXPECT_EQ(status, ML_ERROR_NONE); - status = NN.init(); + status = NN.loadFromConfig(); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); } /** * @brief Neural Network Model initialization */ -TEST(nntrainer_NeuralNetwork, init_03_n) { +TEST(nntrainer_NeuralNetwork, load_config_02_n) { int status = ML_ERROR_NONE; RESET_CONFIG("./test.ini"); replaceString("adam", "aaaadam", "./test.ini", config_str); nntrainer::NeuralNetwork NN; status = NN.setConfig("./test.ini"); EXPECT_EQ(status, ML_ERROR_NONE); - status = NN.init(); + status = NN.loadFromConfig(); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); } /** * @brief Neural Network Model initialization */ -TEST(nntrainer_NeuralNetwork, init_04_n) { +TEST(nntrainer_NeuralNetwork, load_config_03_n) { int status = ML_ERROR_NONE; RESET_CONFIG("./test.ini"); replaceString("Input_Shape = 32:1:1:62720", "Input_Shape = 32:1:1:0", @@ -108,27 +110,27 @@ TEST(nntrainer_NeuralNetwork, init_04_n) { nntrainer::NeuralNetwork NN; status = NN.setConfig("./test.ini"); EXPECT_EQ(status, ML_ERROR_NONE); - EXPECT_THROW(NN.init(), std::invalid_argument); + EXPECT_THROW(NN.loadFromConfig(), std::invalid_argument); } /** * @brief Neural Network Model initialization */ -TEST(nntrainer_NeuralNetwork, init_05_n) { +TEST(nntrainer_NeuralNetwork, load_config_04_n) { int status = ML_ERROR_NONE; RESET_CONFIG("./test.ini"); replaceString("Input_Shape = 32:1:1:62720", "", "./test.ini", config_str); nntrainer::NeuralNetwork NN; status = NN.setConfig("./test.ini"); EXPECT_EQ(status, ML_ERROR_NONE); - status = NN.init(); + status = NN.loadFromConfig(); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); } /** * @brief Neural Network Model initialization */ -TEST(nntrainer_NeuralNetwork, init_06_n) { +TEST(nntrainer_NeuralNetwork, load_config_05_n) { int status = ML_ERROR_NONE; RESET_CONFIG("./test.ini"); replaceString("Learning_rate = 0.0001", "Learning_rate = -0.0001", @@ -136,34 +138,36 @@ TEST(nntrainer_NeuralNetwork, init_06_n) { nntrainer::NeuralNetwork NN; status = NN.setConfig("./test.ini"); EXPECT_EQ(status, ML_ERROR_NONE); - status = NN.init(); + status = NN.loadFromConfig(); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); } /** * @brief Neural Network Model initialization */ -TEST(nntrainer_NeuralNetwork, init_07_p) { +TEST(nntrainer_NeuralNetwork, load_config_06_p) { int status = ML_ERROR_NONE; RESET_CONFIG("./test.ini"); replaceString("TrainData = trainingSet.dat", "", "./test.ini", config_str); nntrainer::NeuralNetwork NN; status = NN.setConfig("./test.ini"); EXPECT_EQ(status, ML_ERROR_NONE); - status = NN.init(); + status = NN.loadFromConfig(); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); } /** * @brief Neural Network Model initialization */ -TEST(nntrainer_NeuralNetwork, init_08_n) { +TEST(nntrainer_NeuralNetwork, init_02_p) { int status = ML_ERROR_NONE; RESET_CONFIG("./test.ini"); replaceString("TestData = testSet.dat", "", "./test.ini", config_str); nntrainer::NeuralNetwork NN; status = NN.setConfig("./test.ini"); EXPECT_EQ(status, ML_ERROR_NONE); + status = NN.loadFromConfig(); + EXPECT_EQ(status, ML_ERROR_NONE); status = NN.init(); EXPECT_EQ(status, ML_ERROR_NONE); } @@ -171,21 +175,21 @@ TEST(nntrainer_NeuralNetwork, init_08_n) { /** * @brief Neural Network Model initialization */ -TEST(nntrainer_NeuralNetwork, init_09_n) { +TEST(nntrainer_NeuralNetwork, load_config_07_n) { int status = ML_ERROR_NONE; RESET_CONFIG("./test.ini"); replaceString("LabelData = label.dat", "", "./test.ini", config_str); nntrainer::NeuralNetwork NN; status = NN.setConfig("./test.ini"); EXPECT_EQ(status, ML_ERROR_NONE); - status = NN.init(); + status = NN.loadFromConfig(); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); } /** * @brief Neural Network Model initialization */ -TEST(nntrainer_NeuralNetwork, init_10_p) { +TEST(nntrainer_NeuralNetwork, init_03_p) { int status = ML_ERROR_NONE; std::string config_file = "./test.ini"; RESET_CONFIG(config_file.c_str()); @@ -193,6 +197,8 @@ TEST(nntrainer_NeuralNetwork, init_10_p) { nntrainer::NeuralNetwork NN; status = NN.setConfig(config_file); EXPECT_EQ(status, ML_ERROR_NONE); + status = NN.loadFromConfig(); + EXPECT_EQ(status, ML_ERROR_NONE); status = NN.init(); EXPECT_EQ(status, ML_ERROR_NONE); } @@ -259,6 +265,8 @@ TEST(nntrainer_Conv2DLayer, initialize_01_p) { nntrainer::NeuralNetwork NN; status = NN.setConfig(config_file); EXPECT_EQ(status, ML_ERROR_NONE); + status = NN.loadFromConfig(); + EXPECT_EQ(status, ML_ERROR_NONE); status = NN.init(); EXPECT_EQ(status, ML_ERROR_NONE); } @@ -274,6 +282,8 @@ TEST(nntrainer_Conv2DLayer, initialize_02_p) { nntrainer::NeuralNetwork NN; status = NN.setConfig(config_file); EXPECT_EQ(status, ML_ERROR_NONE); + status = NN.loadFromConfig(); + EXPECT_EQ(status, ML_ERROR_NONE); status = NN.init(); EXPECT_EQ(status, ML_ERROR_NONE); } @@ -289,6 +299,8 @@ TEST(nntrainer_Layer, initialize_03_p) { nntrainer::NeuralNetwork NN; status = NN.setConfig(config_file); EXPECT_EQ(status, ML_ERROR_NONE); + status = NN.loadFromConfig(); + EXPECT_EQ(status, ML_ERROR_NONE); status = NN.init(); EXPECT_EQ(status, ML_ERROR_NONE); }