} // namespace
-void IniGraphInterpreter::serialize(
- std::shared_ptr<const GraphRepresentation> representation,
- const std::string &out) {
+void IniGraphInterpreter::serialize(const GraphRepresentation &representation,
+ const std::string &out) {
std::vector<IniSection> sections;
- for (auto iter = representation->cbegin(); iter != representation->cend();
+ for (auto iter = representation.cbegin(); iter != representation.cend();
iter++) {
const auto &ln = *iter;
virtual ~IniGraphInterpreter(){};
/**
- * @copydoc GraphInterpreter::serialize(const std::string &out)
+ * @copydoc GraphInterpreter::serialize(const GraphRepresentation
+ * representation, const std::string &out)
*/
- void serialize(std::shared_ptr<const GraphRepresentation> representation,
+ void serialize(const GraphRepresentation &representation,
const std::string &out) override;
/**
* @param representation graph representation
* @param out output file name
*/
- virtual void
- serialize(std::shared_ptr<const GraphRepresentation> representation,
- const std::string &out) = 0;
+ virtual void serialize(const GraphRepresentation &representation,
+ const std::string &out) = 0;
/**
* @brief deserialize graph from a stream
std::vector<int> outputs;
};
-TfOpNodes
-buildOpNodes(std::shared_ptr<const GraphRepresentation> representation) {
+TfOpNodes buildOpNodes(const GraphRepresentation &representation) {
TfOpNodes nodes;
/// @todo, look ahead of layers to get nodes that can be fused
/// we will need to have a dedicated builder
- for (auto iter = representation->cbegin(); iter != representation->cend();
+ for (auto iter = representation.cbegin(); iter != representation.cend();
iter++) {
const auto &ln = *iter;
Exporter e;
} // namespace
-void TfliteInterpreter::serialize(
- std::shared_ptr<const GraphRepresentation> representation,
- const std::string &out) {
+void TfliteInterpreter::serialize(const GraphRepresentation &representation,
+ const std::string &out) {
/// @todo check if graph is finalized & initialized and ready to serialize.
/// 1. The graph must have weights, input dims, output dims set
flatbuffers::FlatBufferBuilder fbb;
/**
* @copydoc GraphInterpreter::serialize(const std::string &out)
*/
- void serialize(std::shared_ptr<const GraphRepresentation> representation,
+ void serialize(const GraphRepresentation &representation,
const std::string &out) override;
/**
/**
* @brief LossSpec validator
- *
+ * @todo detect when loss becomes Nan is useful. But it will need dedicated
+ * throw
* @param v float to validate
- * @retval true if it is greater or equal than 0.0
- * @retval false if it is samller than 0.0
+ * @retval true if is valid number
+ * @retval false if it is nan
*/
- bool isValid(const float &v) const override { return !std::isnan(v); }
+ bool isValid(const float &v) const override {
+ if (std::isnan(v)) {
+ ml_logw("loss value is NAN");
+ }
+
+ return true;
+ }
};
} // namespace props
finalized(false),
activation_type(ActivationType::ACT_NONE),
layer_node_props(new PropsType(props::Name(), props::Flatten(),
- props::Distribute(), props::Trainable(),
- props::Loss())),
+ props::Distribute(), props::Trainable())),
+ loss(new props::Loss()),
regularization_loss(0.0f),
exec_order({0, 0, 0}) {
if (layer && layer->getType() == TimeDistLayer::type) {
* @brief Forward Propagation of a layer
*/
void LayerNode::forwarding(bool training) {
- std::get<props::Loss>(*layer_node_props)
- .set(run_context.getRegularizationLoss());
+ loss->set(run_context.getRegularizationLoss());
layer->forwarding(run_context, training);
}
float LayerNode::getLoss() const {
/** add loss only for loss layers */
if (requireLabel())
- std::get<props::Loss>(*layer_node_props)
- .set(std::get<props::Loss>(*layer_node_props).get() +
- run_context.getLoss());
+ loss->set(*loss + run_context.getLoss());
- return std::get<props::Loss>(*layer_node_props).get();
+ return *loss;
}
/**
properties in the context/graph unless intended. */
using PropsType = std::tuple<props::Name, props::Flatten, props::Distribute,
- props::Trainable, props::Loss>;
+ props::Trainable>;
/**
* These properties are set for the layer by the user but are intercepted
* and used in the node which forms the basic element of the graph.
*/
std::unique_ptr<PropsType> layer_node_props; /**< properties for the node */
+ std::unique_ptr<props::Loss> loss; /**< loss */
float regularization_loss;
ExecutionOrder exec_order; /**< order/location of execution for this node
in forward and backwarding operations */
#include <sstream>
#include <databuffer.h>
+#include <ini_interpreter.h>
#include <model_loader.h>
#include <neuralnet.h>
#include <nntrainer_error.h>
model_file.close();
break;
}
- case ml::train::ModelFormat::MODEL_FORMAT_INI:
- [[fallthrough]]; // NYI
+ case ml::train::ModelFormat::MODEL_FORMAT_INI: {
+ IniGraphInterpreter interpreter;
+
+ /// @note this is to ensure permission checks are done
+ checkedOpenStream<std::ofstream>(file_path, std::ios::out);
+ /// @todo serialize model props
+ /// @todo serialize dataset props
+ /// @todo serialize optimizer props
+ interpreter.serialize(model_graph, file_path);
+ break;
+ }
default:
throw nntrainer::exception::not_supported(
"saving with given format is not supported yet");
EXPECT_EQ(model->train(), ML_ERROR_INVALID_PARAMETER);
}
+TEST(nntrainer_ccapi, save_ini_p) {
+ std::unique_ptr<ml::train::Model> model;
+ model = ml::train::createModel(ml::train::ModelType::NEURAL_NET);
+ ScopedIni s("simple_ini", {model_base + "batch_size = 16", optimizer,
+ dataset + "-BufferSize", inputlayer, outputlayer});
+ EXPECT_EQ(model->loadFromConfig(s.getIniName()), ML_ERROR_NONE);
+ EXPECT_EQ(model->compile(), ML_ERROR_NONE);
+ EXPECT_EQ(model->initialize(), ML_ERROR_NONE);
+ auto saved_ini_name = s.getIniName() + "_saved";
+ model->save(saved_ini_name, ml::train::ModelFormat::MODEL_FORMAT_INI);
+
+ if (remove(saved_ini_name.c_str())) {
+ std::cerr << "remove ini " << saved_ini_name
+ << "failed, reason: " << strerror(errno);
+ }
+}
+
/**
* @brief Main gtest
*/
auto result = e.getResult<nntrainer::ExportMethods::METHOD_STRINGVECTOR>();
auto pair1 = std::pair<std::string, std::string>("unit", "1");
- EXPECT_EQ(result->at(2), pair1);
+ EXPECT_EQ(result->at(1), pair1);
}
{ /**< load from layer */