From: Jihoon Lee Date: Tue, 24 Aug 2021 12:48:53 +0000 (+0900) Subject: [Save] Prepare to save ini X-Git-Tag: submit/tizen/20210827.122527~12 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d43f324c16d4acded173f610173ad3e6399b71ab;p=platform%2Fcore%2Fml%2Fnntrainer.git [Save] Prepare to save ini This patch adds a skeleton to save into ini format **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Jihoon Lee --- diff --git a/nntrainer/compiler/ini_interpreter.cpp b/nntrainer/compiler/ini_interpreter.cpp index edeb936e..1d46d693 100644 --- a/nntrainer/compiler/ini_interpreter.cpp +++ b/nntrainer/compiler/ini_interpreter.cpp @@ -252,12 +252,11 @@ getMergeableGraph(std::shared_ptr graph, } // namespace -void IniGraphInterpreter::serialize( - std::shared_ptr representation, - const std::string &out) { +void IniGraphInterpreter::serialize(const GraphRepresentation &representation, + const std::string &out) { std::vector sections; - for (auto iter = representation->cbegin(); iter != representation->cend(); + for (auto iter = representation.cbegin(); iter != representation.cend(); iter++) { const auto &ln = *iter; diff --git a/nntrainer/compiler/ini_interpreter.h b/nntrainer/compiler/ini_interpreter.h index 1cfc19db..9a917d19 100644 --- a/nntrainer/compiler/ini_interpreter.h +++ b/nntrainer/compiler/ini_interpreter.h @@ -48,9 +48,10 @@ public: virtual ~IniGraphInterpreter(){}; /** - * @copydoc GraphInterpreter::serialize(const std::string &out) + * @copydoc GraphInterpreter::serialize(const GraphRepresentation + * representation, const std::string &out) */ - void serialize(std::shared_ptr representation, + void serialize(const GraphRepresentation &representation, const std::string &out) override; /** diff --git a/nntrainer/compiler/interpreter.h b/nntrainer/compiler/interpreter.h index 23487ca6..aa391b24 100644 --- a/nntrainer/compiler/interpreter.h +++ b/nntrainer/compiler/interpreter.h @@ -61,9 +61,8 @@ public: * @param representation graph representation * @param out output file name */ - virtual void - serialize(std::shared_ptr representation, - const std::string &out) = 0; + virtual void serialize(const GraphRepresentation &representation, + const std::string &out) = 0; /** * @brief deserialize graph from a stream diff --git a/nntrainer/compiler/tflite_interpreter.cpp b/nntrainer/compiler/tflite_interpreter.cpp index c6539156..5f08359c 100644 --- a/nntrainer/compiler/tflite_interpreter.cpp +++ b/nntrainer/compiler/tflite_interpreter.cpp @@ -236,12 +236,11 @@ private: std::vector outputs; }; -TfOpNodes -buildOpNodes(std::shared_ptr representation) { +TfOpNodes buildOpNodes(const GraphRepresentation &representation) { TfOpNodes nodes; /// @todo, look ahead of layers to get nodes that can be fused /// we will need to have a dedicated builder - for (auto iter = representation->cbegin(); iter != representation->cend(); + for (auto iter = representation.cbegin(); iter != representation.cend(); iter++) { const auto &ln = *iter; Exporter e; @@ -429,9 +428,8 @@ buildSubGraphs(const TfOpNodes &nodes, const TfOpIdxMap &map, } // namespace -void TfliteInterpreter::serialize( - std::shared_ptr representation, - const std::string &out) { +void TfliteInterpreter::serialize(const GraphRepresentation &representation, + const std::string &out) { /// @todo check if graph is finalized & initialized and ready to serialize. /// 1. The graph must have weights, input dims, output dims set flatbuffers::FlatBufferBuilder fbb; diff --git a/nntrainer/compiler/tflite_interpreter.h b/nntrainer/compiler/tflite_interpreter.h index 9ab5fee2..1a91f8d1 100644 --- a/nntrainer/compiler/tflite_interpreter.h +++ b/nntrainer/compiler/tflite_interpreter.h @@ -40,7 +40,7 @@ public: /** * @copydoc GraphInterpreter::serialize(const std::string &out) */ - void serialize(std::shared_ptr representation, + void serialize(const GraphRepresentation &representation, const std::string &out) override; /** diff --git a/nntrainer/layers/layer_node.cpp b/nntrainer/layers/layer_node.cpp index b173df0d..06113b15 100644 --- a/nntrainer/layers/layer_node.cpp +++ b/nntrainer/layers/layer_node.cpp @@ -71,12 +71,19 @@ public: /** * @brief LossSpec validator - * + * @todo detect when loss becomes Nan is useful. But it will need dedicated + * throw * @param v float to validate - * @retval true if it is greater or equal than 0.0 - * @retval false if it is samller than 0.0 + * @retval true if is valid number + * @retval false if it is nan */ - bool isValid(const float &v) const override { return !std::isnan(v); } + bool isValid(const float &v) const override { + if (std::isnan(v)) { + ml_logw("loss value is NAN"); + } + + return true; + } }; } // namespace props @@ -124,8 +131,8 @@ LayerNode::LayerNode(std::unique_ptr &&l) : finalized(false), activation_type(ActivationType::ACT_NONE), layer_node_props(new PropsType(props::Name(), props::Flatten(), - props::Distribute(), props::Trainable(), - props::Loss())), + props::Distribute(), props::Trainable())), + loss(new props::Loss()), regularization_loss(0.0f), exec_order({0, 0, 0}) { if (layer && layer->getType() == TimeDistLayer::type) { @@ -373,8 +380,7 @@ void LayerNode::finalize() { * @brief Forward Propagation of a layer */ void LayerNode::forwarding(bool training) { - std::get(*layer_node_props) - .set(run_context.getRegularizationLoss()); + loss->set(run_context.getRegularizationLoss()); layer->forwarding(run_context, training); } @@ -422,11 +428,9 @@ bool LayerNode::requireLabel() const { return getLayer()->requireLabel(); } float LayerNode::getLoss() const { /** add loss only for loss layers */ if (requireLabel()) - std::get(*layer_node_props) - .set(std::get(*layer_node_props).get() + - run_context.getLoss()); + loss->set(*loss + run_context.getLoss()); - return std::get(*layer_node_props).get(); + return *loss; } /** diff --git a/nntrainer/layers/layer_node.h b/nntrainer/layers/layer_node.h index bac8468c..abb36d37 100644 --- a/nntrainer/layers/layer_node.h +++ b/nntrainer/layers/layer_node.h @@ -617,12 +617,13 @@ private: properties in the context/graph unless intended. */ using PropsType = std::tuple; + props::Trainable>; /** * These properties are set for the layer by the user but are intercepted * and used in the node which forms the basic element of the graph. */ std::unique_ptr layer_node_props; /**< properties for the node */ + std::unique_ptr loss; /**< loss */ float regularization_loss; ExecutionOrder exec_order; /**< order/location of execution for this node in forward and backwarding operations */ diff --git a/nntrainer/models/neuralnet.cpp b/nntrainer/models/neuralnet.cpp index 141e8482..32bdc456 100644 --- a/nntrainer/models/neuralnet.cpp +++ b/nntrainer/models/neuralnet.cpp @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -372,8 +373,17 @@ void NeuralNetwork::save(const std::string &file_path, model_file.close(); break; } - case ml::train::ModelFormat::MODEL_FORMAT_INI: - [[fallthrough]]; // NYI + case ml::train::ModelFormat::MODEL_FORMAT_INI: { + IniGraphInterpreter interpreter; + + /// @note this is to ensure permission checks are done + checkedOpenStream(file_path, std::ios::out); + /// @todo serialize model props + /// @todo serialize dataset props + /// @todo serialize optimizer props + interpreter.serialize(model_graph, file_path); + break; + } default: throw nntrainer::exception::not_supported( "saving with given format is not supported yet"); diff --git a/test/ccapi/unittest_ccapi.cpp b/test/ccapi/unittest_ccapi.cpp index 61afb767..a11167c6 100644 --- a/test/ccapi/unittest_ccapi.cpp +++ b/test/ccapi/unittest_ccapi.cpp @@ -405,6 +405,23 @@ TEST(nntrainer_ccapi, train_with_config_02_n) { EXPECT_EQ(model->train(), ML_ERROR_INVALID_PARAMETER); } +TEST(nntrainer_ccapi, save_ini_p) { + std::unique_ptr model; + model = ml::train::createModel(ml::train::ModelType::NEURAL_NET); + ScopedIni s("simple_ini", {model_base + "batch_size = 16", optimizer, + dataset + "-BufferSize", inputlayer, outputlayer}); + EXPECT_EQ(model->loadFromConfig(s.getIniName()), ML_ERROR_NONE); + EXPECT_EQ(model->compile(), ML_ERROR_NONE); + EXPECT_EQ(model->initialize(), ML_ERROR_NONE); + auto saved_ini_name = s.getIniName() + "_saved"; + model->save(saved_ini_name, ml::train::ModelFormat::MODEL_FORMAT_INI); + + if (remove(saved_ini_name.c_str())) { + std::cerr << "remove ini " << saved_ini_name + << "failed, reason: " << strerror(errno); + } +} + /** * @brief Main gtest */ diff --git a/test/unittest/unittest_base_properties.cpp b/test/unittest/unittest_base_properties.cpp index 0bfa649a..e0ceec47 100644 --- a/test/unittest/unittest_base_properties.cpp +++ b/test/unittest/unittest_base_properties.cpp @@ -285,7 +285,7 @@ TEST(BasicProperty, valid_p) { auto result = e.getResult(); auto pair1 = std::pair("unit", "1"); - EXPECT_EQ(result->at(2), pair1); + EXPECT_EQ(result->at(1), pair1); } { /**< load from layer */