This patch adds basic load save from CCAPI.
**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: Jihoon Lee <jhoon.it.lee@samsung.com>
exit(1);
}
+ const std::string weight_path = "logistic_model.bin";
+
const std::vector<std::string> args(argv + 1, argv + argc);
std::string config = args[1];
data_file = args[2];
std::move(data_train));
try {
- NN.train();
+ NN.train({"save_path=" + weight_path});
} catch (...) {
std::cerr << "Error during train" << std::endl;
return 0;
}
} else {
try {
- NN.readModel();
+ NN.load(weight_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
} catch (std::exception &e) {
- std::cerr << "Error during readModel: " << e.what() << "\n";
+ std::cerr << "Error during loading weights: " << e.what() << "\n";
return 1;
}
std::ifstream dataFile(data_file);
Epochs = 100 # Epochs
Loss = cross # Loss function : mse (mean squared error)
# cross ( cross entropy )
-Save_Path = "logistic_model.bin" # model path to save / read
batch_size = 16 # batch size
[Optimizer]
* @brief Neural Network Create & Initialization
*/
model = createModel(ml::train::ModelType::NEURAL_NET);
- model->loadFromConfig(config);
+ model->load(config, ml::train::ModelFormat::MODEL_FORMAT_INI_WITH_BIN);
} catch (std::exception &e) {
std::cerr << "Error during loadFromConfig " << e.what() << std::endl;
return 1;
try {
model->compile();
model->initialize();
- model->readModel();
model->setDataset(ml::train::DatasetModeType::MODE_TRAIN, dataset_train);
model->setDataset(ml::train::DatasetModeType::MODE_VALID, dataset_val);
} catch (std::exception &e) {
exit(1);
}
+ std::string weight_path = "product_ratings_model.bin";
+
const std::vector<std::string> args(argv + 1, argv + argc);
std::string config = args[1];
data_file = args[2];
std::cerr << "Error during initialize" << std::endl;
return 1;
}
- NN.readModel();
std::cout << "Input dimension: " << NN.getInputDimension()[0];
}
} else {
try {
- NN.readModel();
+ NN.load(weight_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
} catch (std::exception &e) {
- std::cerr << "Error during readModel: " << e.what() << "\n";
+ std::cerr << "Error during loading weights: " << e.what() << "\n";
return 1;
}
std::ifstream dataFile(data_file);
std::cout << "./DeepQ Config.ini\n";
exit(0);
}
+ const std::string weight_file = "model_deepq.bin";
const std::vector<std::string> args(argv + 1, argv + argc);
std::string config = args[0];
* @brief Read Model Data if any
*/
try {
- mainNet.readModel();
+ mainNet.load(weight_file, ml::train::ModelFormat::MODEL_FORMAT_BIN);
+ targetNet.load(weight_file, ml::train::ModelFormat::MODEL_FORMAT_BIN);
} catch (...) {
- std::cerr << "Error during readModel\n";
+ std::cerr << "Error during readBin\n";
return 1;
}
- /**
- * @brief Sync targetNet
- */
- targetNet.copy(mainNet);
-
/**
* @brief Run Episode
*/
std::cerr << "Error during getLoss: " << e.what() << "\n";
return 1;
}
- /**
- * @brief copy targetNetwork
- */
- targetNet.copy(mainNet);
try {
- mainNet.saveModel();
+ targetNet.load(weight_file, ml::train::ModelFormat::MODEL_FORMAT_BIN);
+ mainNet.save(weight_file, ml::train::ModelFormat::MODEL_FORMAT_BIN);
} catch (std::exception &e) {
- std::cerr << "Error during saveModel: " << e.what() << "\n";
+ std::cerr << "Error during saveBin: " << e.what() << "\n";
return 1;
}
}
nntrainer::NeuralNetwork NN;
int status = ML_ERROR_NONE;
try {
- status = NN.loadFromConfig(config);
- if (status != ML_ERROR_NONE)
- return status;
+ NN.load(config, ml::train::ModelFormat::MODEL_FORMAT_INI);
+ // NN.load(weight_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
status = NN.compile();
if (status != ML_ERROR_NONE)
std::cerr << "Error during init" << std::endl;
return 1;
}
- try {
- NN.readModel();
- } catch (std::exception &e) {
- std::cerr << "Error during readModel reason: " << e.what() << std::endl;
- return 1;
- }
try {
NN.train();
*/
try {
model = createModel(ml::train::ModelType::NEURAL_NET);
- model->loadFromConfig(config);
+ model->load(config, ml::train::ModelFormat::MODEL_FORMAT_INI_WITH_BIN);
} catch (...) {
std::cerr << "Error during loadFromConfig" << std::endl;
return 1;
std::cerr << "Error during init" << std::endl;
return 1;
}
- try {
- model->readModel();
- } catch (std::exception &e) {
- std::cerr << "Error during readModel, reason: " << e.what() << std::endl;
- return 1;
- }
model->setDataset(ml::train::DatasetModeType::MODE_TRAIN, dataset_train);
model->setDataset(ml::train::DatasetModeType::MODE_VALID, dataset_val);
*/
nntrainer::NeuralNetwork NN;
try {
- NN.loadFromConfig(config);
+ NN.load(config, ml::train::ModelFormat::MODEL_FORMAT_INI_WITH_BIN);
} catch (...) {
std::cerr << "Error during loadFromConfig" << std::endl;
return 0;
}
try {
- NN.readModel();
NN.setDataset(ml::train::DatasetModeType::MODE_TRAIN, std::move(db_train));
NN.setDataset(ml::train::DatasetModeType::MODE_VALID, std::move(db_valid));
NN.train();
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
- virtual int loadFromConfig(std::string config) = 0;
+ virtual int loadFromConfig(const std::string &config) = 0;
/**
* @brief Minimal set of properties that must be supported by the model
*/
virtual int initialize() = 0;
- /**
- * @brief save model and training parameters into file
- * @todo deprecate this
- */
- [[deprecated("use saveModel(const std::string &path_prefix, "
- "ModelFormat format)")]] virtual void
- saveModel() = 0;
-
/**
* @brief load model states and training parameters from a file
* @param file_path file_path to save the model, if full path is not
* @param format format to save parameters
*/
virtual void save(const std::string &file_path,
- ModelFormat format = ModelFormat::MODEL_FORMAT_BIN){};
-
- /**
- * @brief read model and training parameters from file
- * @todo deprecate this
- */
- virtual void readModel() = 0;
+ ModelFormat format = ModelFormat::MODEL_FORMAT_BIN) = 0;
/**
* @brief load model with regard to the format
* @param format format to save parameters
*/
virtual void load(const std::string &file_path,
- ModelFormat format = ModelFormat::MODEL_FORMAT_BIN){};
+ ModelFormat format = ModelFormat::MODEL_FORMAT_BIN) = 0;
/**
* @brief Run Model training and validation
loadModel();
model->compile();
model->initialize();
- model->readModel();
}
const char *NNTrainerInference::getModelConfig() {
gint64 start_time = g_get_real_time();
#endif
model = ml::train::createModel(ml::train::ModelType::NEURAL_NET);
- model->loadFromConfig(model_config);
+ model->load(model_config, ml::train::ModelFormat::MODEL_FORMAT_INI_WITH_BIN);
#if (DBG)
gint64 stop_time = g_get_real_time();
g_message("Model is loaded: %" G_GINT64_FORMAT, (stop_time - start_time));
break;
default:
std::string msg =
- "[Layer] Unknown Layer Property Key for value " + std::string(value);
+ "[Layer] Unknown Layer Property Key for value, key: " + type_str +
+ " value: " + value;
throw exception::not_supported(msg);
}
}
namespace nntrainer {
-int NeuralNetwork::loadFromConfig(std::string config) {
+int NeuralNetwork::loadFromConfig(const std::string &config) {
if (loadedFromConfig == true) {
ml_loge("cannnot do loadFromConfig twice");
return ML_ERROR_INVALID_PARAMETER;
}
initialized = true;
+
+ if (!load_path.empty()) {
+ load(load_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
+ }
+
return status;
}
backwarding(iteration);
}
+void NeuralNetwork::save(const std::string &file_path,
+ ml::train::ModelFormat format) {
+ NNTR_THROW_IF(!initialized, std::runtime_error)
+ << "Cannot save model if not initialized yet, path: " << file_path
+ << " format: " << static_cast<unsigned>(format);
+
+ /// @todo this switch case should be delegating the function call only. It's
+ /// not delegating for now as required logics are managable for now.
+ switch (format) {
+ case ml::train::ModelFormat::MODEL_FORMAT_BIN: {
+ std::ofstream model_file(file_path, std::ios::out | std::ios::binary);
+ /// @todo, if errno == EACCESS or EPERM, throw PERMISSION DENIED error
+ NNTR_THROW_IF(!model_file.good(), std::invalid_argument)
+ << "model file not opened, file path: " << file_path
+ << " reason: " << strerror(errno);
+ for (auto iter = model_graph.cbegin(); iter != model_graph.cend(); iter++) {
+ (*iter)->save(model_file);
+ }
+ model_file.write((char *)&epoch_idx, sizeof(epoch_idx));
+ model_file.write((char *)&iter, sizeof(iter));
+ model_file.close();
+ break;
+ }
+ case ml::train::ModelFormat::MODEL_FORMAT_INI:
+ [[fallthrough]]; // NYI
+ default:
+ throw nntrainer::exception::not_supported(
+ "saving with given format is not supported yet");
+ }
+}
+
+void NeuralNetwork::load(const std::string &file_path,
+ ml::train::ModelFormat format) {
+ /// @todo this switch case should be delegating the function call only. It's
+ /// not delegating for now as required logics are managable for now.
+ switch (format) {
+ case ml::train::ModelFormat::MODEL_FORMAT_BIN: {
+ NNTR_THROW_IF(!initialized, std::runtime_error)
+ << "Cannot load if not initialized yet, path: " << file_path
+ << " format: " << static_cast<unsigned>(format);
+
+ std::ifstream model_file(file_path, std::ios::in | std::ios::binary);
+ /// @todo, if errno == EACCESS or EPERM, throw PERMISSION DENIED error
+ NNTR_THROW_IF(!model_file.good(), std::invalid_argument)
+ << "model file not opened, file path: " << file_path
+ << " reason: " << strerror(errno);
+
+ for (auto iter = model_graph.cbegin(); iter != model_graph.cend(); iter++) {
+ (*iter)->read(model_file);
+ }
+
+ try {
+ /// this is assuming that the failure is allowed at the end of the file
+ /// read. so, after this line, additional read shouldn't be called
+ checkedRead(model_file, (char *)&epoch_idx, sizeof(epoch_idx),
+ "[NeuralNetwork::readModel] failed to read epoch_idx");
+ checkedRead(model_file, (char *)&iter, sizeof(iter),
+ "[NeuralNetwork::readModel] failed to read iteration");
+ } catch (...) {
+ std::cerr << "failed to read epoch idx, proceeding with default index\n";
+ }
+
+ ml_logi("read modelfile: %s", file_path.c_str());
+ break;
+ }
+ case ml::train::ModelFormat::MODEL_FORMAT_INI_WITH_BIN: {
+ int ret = loadFromConfig(file_path);
+ throw_status(ret);
+ if (!save_path.empty()) {
+ /// @todo checkedOpenhere
+ load_path = save_path;
+ }
+ break;
+ }
+ case ml::train::ModelFormat::MODEL_FORMAT_INI: {
+ int ret = loadFromConfig(file_path);
+ throw_status(ret);
+ break;
+ }
+ default:
+ throw nntrainer::exception::not_supported(
+ "loading with given format is not supported yet");
+ }
+}
+
float NeuralNetwork::getLoss() {
loss = 0.0f;
return *this;
}
-/**
- * @brief save model to file
- * save Weight & Bias Data into file by calling save from layer
- * save training parameters from the optimizer
- * @todo saving order is based on the topological sort and this may
- * not match with the ini order
- */
-void NeuralNetwork::saveModel() {
- if (!initialized)
- throw std::runtime_error("Cannot save the model before initialize.");
-
- if (save_path == std::string()) {
- return;
- }
-
- if (!initialized)
- throw std::runtime_error("Cannot save the model before initialize.");
-
- std::ofstream model_file(save_path, std::ios::out | std::ios::binary);
-
- NNTR_THROW_IF(!model_file.good(), std::invalid_argument)
- << "model file not opened, file path: " << save_path
- << " reason: " << strerror(errno);
-
- for (auto iter = model_graph.cbegin(); iter != model_graph.cend(); iter++) {
- (*iter)->save(model_file);
- }
- model_file.write((char *)&epoch_idx, sizeof(epoch_idx));
- model_file.write((char *)&iter, sizeof(iter));
- model_file.close();
-}
-
-/**
- * @brief read model from file
- * read Weight & Bias Data into file by calling save from layer
- * read training parameters from the optimizer if continuing train
- * @todo reading order is based on the topological sort and this may
- * not match with the ini order
- */
-void NeuralNetwork::readModel() {
- if (!initialized)
- throw std::runtime_error("Cannot read the model before initialize.");
-
- if (save_path == std::string()) {
- return;
- }
-
- if (!isFileExist(save_path)) {
- ml_logd("skipping reading model, path is not valid: %s", save_path.c_str());
- return;
- }
-
- if (!initialized)
- throw std::runtime_error("Cannot save the model before initialize.");
-
- std::ifstream model_file(save_path, std::ios::in | std::ios::binary);
-
- for (auto iter = model_graph.cbegin(); iter != model_graph.cend(); iter++) {
- (*iter)->read(model_file);
- }
-
- try {
- /// this is assuming that the failure is allowed at the end of the file
- /// read. so, after this line, additional read shouldn't be called
- checkedRead(model_file, (char *)&epoch_idx, sizeof(epoch_idx),
- "[NeuralNetwork::readModel] failed to read epoch_idx");
- checkedRead(model_file, (char *)&iter, sizeof(iter),
- "[NeuralNetwork::readModel] failed to read iteration");
- } catch (...) {
- model_file.close();
- std::cerr << "failed to read epoch idx, proceeding with default index\n";
- }
-
- model_file.close();
- ml_logi("read modelfile: %s", save_path.c_str());
-}
-
void NeuralNetwork::setBatchSize(unsigned int batch) {
batch_size = batch;
throw std::runtime_error("No training data");
training.loss /= count;
- saveModel();
+ if (!save_path.empty()) {
+ save(save_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
+ }
std::cout << "#" << epoch_idx << "/" << epochs
<< " - Training Loss: " << training.loss;
initialized(false),
compiled(false),
loadedFromConfig(false),
+ load_path(""),
app_context(app_context_),
in_place_optimization(in_place_opt) {}
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
- int loadFromConfig(std::string config);
+ int loadFromConfig(const std::string &config);
/**
* @brief Compile the graph in the model
void backwarding(int iteration);
/**
- * @brief save model and training parameters into file
+ * @copydoc Model::save(const std::string &file_path, ml::train::ModelFormat
+ * format);
*/
- void saveModel();
+ void save(const std::string &file_path,
+ ml::train::ModelFormat format =
+ ml::train::ModelFormat::MODEL_FORMAT_BIN) override;
/**
- * @brief read model and training parameters from file
+ * @copydoc Model::load(const std::string &file_path, ml::train::ModelFormat
+ * format);
*/
- void readModel();
+ void load(const std::string &file_path,
+ ml::train::ModelFormat format =
+ ml::train::ModelFormat::MODEL_FORMAT_BIN) override;
/**
* @brief get Epochs
bool loadedFromConfig; /**< Check if config is loaded to prevent load twice */
+ std::string load_path; /**< path to load weights when initialize */
+
RunStats validation; /** validation statistics of the model */
RunStats training; /** training statistics of the model */
RunStats testing; /** testing statistics of the model */
swap(lhs.model_graph, rhs.model_graph);
swap(lhs.compiled, rhs.compiled);
swap(lhs.loadedFromConfig, rhs.loadedFromConfig);
+ swap(lhs.load_path, rhs.load_path);
}
/**
EXPECT_NO_THROW(NN.addLayer(layer_node));
EXPECT_NO_THROW(NN.setProperty({"loss=mse"}));
- EXPECT_THROW(NN.readModel(), std::runtime_error);
- EXPECT_THROW(NN.saveModel(), std::runtime_error);
+ EXPECT_THROW(NN.load("model.bin"), std::runtime_error);
+ EXPECT_THROW(NN.save("model.bin"), std::runtime_error);
EXPECT_EQ(NN.compile(), ML_ERROR_NONE);
- EXPECT_THROW(NN.readModel(), std::runtime_error);
- EXPECT_THROW(NN.saveModel(), std::runtime_error);
+ EXPECT_THROW(NN.load("model.bin"), std::runtime_error);
+ EXPECT_THROW(NN.save("model.bin"), std::runtime_error);
}
/**