REGISTER_METHOD(MLTrainerLayerSetProperty);
REGISTER_METHOD(MLTrainerLayerCreate);
+ REGISTER_METHOD(MLTrainerLayerDispose);
REGISTER_METHOD(MLTrainerOptimizerSetProperty);
REGISTER_METHOD(MLTrainerOptimizerCreate);
+ REGISTER_METHOD(MLTrainerOptimizerDispose);
REGISTER_METHOD(MLTrainerModelCreate);
REGISTER_METHOD(MLTrainerModelCompile);
REGISTER_METHOD(MLTrainerModelAddLayer);
REGISTER_METHOD(MLTrainerModelSave);
REGISTER_METHOD(MLTrainerModelSetDataset);
REGISTER_METHOD(MLTrainerModelSetOptimizer);
+ REGISTER_METHOD(MLTrainerModelDispose);
REGISTER_METHOD(MLTrainerDatasetCreateGenerator);
REGISTER_METHOD(MLTrainerDatasetCreateFromFile);
REGISTER_METHOD(MLTrainerDatasetSetProperty);
+ REGISTER_METHOD(MLTrainerDatasetDispose);
#undef REGISTER_METHOD
}
ReportSuccess(out);
}
+void MlInstance::MLTrainerLayerDispose(const picojson::value& args,
+ picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kId, double, out);
+
+ auto id = static_cast<int>(args.get(kId).get<double>());
+
+ PlatformResult result = trainer_manager_.LayerDispose(id);
+ if (!result) {
+ ReportError(result, &out);
+ return;
+ }
+ ReportSuccess(out);
+}
+
void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
ReportSuccess(out);
}
+void MlInstance::MLTrainerOptimizerDispose(const picojson::value& args,
+ picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kId, double, out);
+
+ auto id = static_cast<int>(args.get(kId).get<double>());
+
+ PlatformResult result = trainer_manager_.OptimizerDispose(id);
+ if (!result) {
+ ReportError(result, &out);
+ return;
+ }
+ ReportSuccess(out);
+}
+
void MlInstance::MLTrainerModelCreate(const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
int id = -1;
ReportSuccess(out);
}
+void MlInstance::MLTrainerModelDispose(const picojson::value& args,
+ picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kId, double, out);
+
+ auto id = static_cast<int>(args.get(kId).get<double>());
+
+ PlatformResult result = trainer_manager_.ModelDispose(id);
+ if (!result) {
+ ReportError(result, &out);
+ return;
+ }
+ ReportSuccess(out);
+}
+
void MlInstance::MLTrainerDatasetCreateGenerator(const picojson::value& args,
picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
ReportSuccess(out);
}
+void MlInstance::MLTrainerDatasetDispose(const picojson::value& args,
+ picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kId, double, out);
+
+ auto id = static_cast<int>(args.get(kId).get<double>());
+
+ PlatformResult result = trainer_manager_.DatasetDispose(id);
+ if (!result) {
+ ReportError(result, &out);
+ return;
+ }
+ ReportSuccess(out);
+}
+
#undef CHECK_EXIST
#undef CHECK_TYPE
#undef CHECK_TYPE_X
ml_train_model_h n_model = NULL;
int ret_val = ml_train_model_construct(&n_model);
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not create model: %d (%s)", ret_val, ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
- models_[next_model_id_] = n_model;
+ models_[next_model_id_] = std::make_shared<Model>(n_model);
id = next_model_id_++;
return PlatformResult();
ml_train_model_h n_model = NULL;
int ret_val = ml_train_model_construct_with_conf(config.c_str(), &n_model);
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not create model: %d (%s)", ret_val, ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
- models_[next_model_id_] = n_model;
+ models_[next_model_id_] = std::make_shared<Model>(n_model);
id = next_model_id_++;
return PlatformResult();
int ret_val = 0;
auto compileOpts = ss.str();
if (compileOpts.length() < OPTION_SEPARATOR.length()) {
- ret_val = ml_train_model_compile(model, NULL);
+ ret_val = ml_train_model_compile(model->getNative(), NULL);
} else {
// remove trailing ' | ' from options string
compileOpts =
compileOpts.substr(0, compileOpts.length() - OPTION_SEPARATOR.length());
LoggerI("Compiling model with options: %s", compileOpts.c_str());
- ret_val = ml_train_model_compile(model, compileOpts.c_str(), NULL);
+ ret_val =
+ ml_train_model_compile(model->getNative(), compileOpts.c_str(), NULL);
}
ss.clear();
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not compile model: %d (%s)", ret_val, ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+ model->setCompiled(true);
return PlatformResult();
}
auto& model = models_[id];
+ if (!model->isCompiled()) {
+ LoggerE("Trying to train model that is not compiled");
+ return PlatformResult(ErrorCode::INVALID_STATE_ERR,
+ "Cannot train model before compilation");
+ }
+
std::stringstream ss;
for (const auto& opt : options) {
const auto& key = opt.first;
auto runOpts = ss.str();
if (runOpts.length() < OPTION_SEPARATOR.length()) {
- ret_val = ml_train_model_run(model, NULL);
+ ret_val = ml_train_model_run(model->getNative(), NULL);
} else {
// remove trailing ' | ' from options string
runOpts = runOpts.substr(0, runOpts.length() - OPTION_SEPARATOR.length());
LoggerI("Running model with options: %s", runOpts.c_str());
- ret_val = ml_train_model_run(model, runOpts.c_str(), NULL);
+ ret_val = ml_train_model_run(model->getNative(), runOpts.c_str(), NULL);
}
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not run (train) model: %d (%s)", ret_val,
ml_strerror(ret_val));
return PlatformResult(ErrorCode::UNKNOWN_ERR, ml_strerror(ret_val));
auto& model = models_[id];
auto& layer = layers_[layerId];
- int ret_val = ml_train_model_add_layer(model, layer);
- if (ret_val != 0) {
+ if (model->isCompiled()) {
+ LoggerE("Modification of compiled model");
+ return PlatformResult(ErrorCode::INVALID_STATE_ERR,
+ "Modification of compiled model not allowed");
+ }
+
+ int ret_val =
+ ml_train_model_add_layer(model->getNative(), layer->getNative());
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not add layer to model: %d (%s)", ret_val,
ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+ model->layerIndices.push_back(layerId);
+ layer->setAttached(true);
+
return PlatformResult();
}
auto& model = models_[id];
auto& optimizer = optimizers_[optimizerId];
+ if (model->isCompiled()) {
+ LoggerE("Modification of compiled model");
+ return PlatformResult(ErrorCode::INVALID_STATE_ERR,
+ "Modification of compiled model not allowed");
+ }
- int ret_val = ml_train_model_set_optimizer(model, optimizer);
- if (ret_val != 0) {
+ int ret_val =
+ ml_train_model_set_optimizer(model->getNative(), optimizer->getNative());
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not set optimizer for model: %d (%s)", ret_val,
ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+ if (model->optimizerIndex != INVALID_INDEX) {
+ // "release" optimizer that has been set before
+ auto optPosition = optimizers_.find(model->optimizerIndex);
+ if (optPosition != optimizers_.end()) {
+ (*optPosition).second->setAttached(false);
+ } else {
+ // This should never happen but just in case check and log such situation
+ LoggerE(
+ "Attached optimizer does not exist in map - some internal error "
+ "faced");
+ }
+ }
+
+ model->optimizerIndex = optimizerId;
+ optimizer->setAttached(true);
+
return PlatformResult();
}
auto& model = models_[id];
auto& dataset = datasets_[datasetId];
- int ret_val = ml_train_model_set_dataset(model, dataset);
- if (ret_val != 0) {
+ if (model->isCompiled()) {
+ LoggerE("Modification of compiled model");
+ return PlatformResult(ErrorCode::INVALID_STATE_ERR,
+ "Modification of compiled model not allowed");
+ }
+
+ int ret_val =
+ ml_train_model_set_dataset(model->getNative(), dataset->getNative());
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not set dataset for model: %d (%s)", ret_val,
ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+ if (model->datasetIndex != INVALID_INDEX) {
+ // "release" dataset that has been set before
+ auto datPosition = datasets_.find(model->datasetIndex);
+ if (datPosition != datasets_.end()) {
+ (*datPosition).second->setAttached(false);
+ } else {
+ // This should never happen but just in case check and log such situation
+ LoggerE(
+ "Attached dataset does not exist in map = some internal error faced");
+ }
+ }
+
+ model->datasetIndex = datasetId;
+ dataset->setAttached(true);
+
return PlatformResult();
}
auto& model = models_[id];
char* tmpSummary = NULL;
- int ret_val = ml_train_model_get_summary(model, level, &tmpSummary);
+ int ret_val =
+ ml_train_model_get_summary(model->getNative(), level, &tmpSummary);
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not get summary for model: %d (%s)", ret_val,
ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
LoggerI("Saving model to file: %s", tmpString.c_str());
- int ret_val = ml_train_model_save(model, tmpString.c_str(), format);
+ int ret_val =
+ ml_train_model_save(model->getNative(), tmpString.c_str(), format);
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not model to file: %d (%s)", ret_val, ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
return PlatformResult();
}
+PlatformResult TrainerManager::ModelDispose(int id) {
+ ScopeLogger();
+
+ if (models_.find(id) == models_.end()) {
+ LoggerE("Could not find model with id: %d", id);
+ return PlatformResult(ErrorCode::NOT_FOUND_ERR, "Could not find model");
+ }
+
+ auto model = models_[id];
+
+ int ret_val = ml_train_model_destroy(model->getNative());
+ if (ret_val != ML_ERROR_NONE) {
+ LoggerE("Could not destroy model: %d (%s)", ret_val, ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+
+ // When model is destroyed by ml_train_model_destroy() then all attached
+ // handles (layers, optimizer, dataset) are also destroyed. This means that
+ // after Model disposal all related objects in JS/C++ layer become invalid.
+ // Code below is removing all wrappers stored in TrainerManager based on
+ // identifiers taken from Model wrapper
+ if (model->optimizerIndex >= 0) {
+ LoggerD("Deleting attached optimizer: %d", model->optimizerIndex);
+ optimizers_.erase(model->optimizerIndex);
+ }
+ if (model->datasetIndex) {
+ LoggerD("Deleting attached dataset: %d", model->datasetIndex);
+ datasets_.erase(model->datasetIndex);
+ }
+ for (auto const& ls : model->layerIndices) {
+ LoggerD("Deleting attached layer: %d", ls);
+ layers_.erase(ls);
+ }
+ models_.erase(id);
+
+ return PlatformResult();
+}
+
PlatformResult TrainerManager::CreateLayer(int& id,
ml_train_layer_type_e type) {
ScopeLogger();
ml_train_layer_h n_layer = NULL;
int ret_val = ml_train_layer_create(&n_layer, type);
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not create layer: %s", ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
- layers_[next_layer_id_] = n_layer;
+ layers_[next_layer_id_] =
+ std::make_shared<NativeWrapper<ml_train_layer_h>>(n_layer);
id = next_layer_id_++;
return PlatformResult();
}
auto layer = layers_[id];
std::string opt = name + "=" + value;
- int ret_val = ml_train_layer_set_property(layer, opt.c_str(), NULL);
- if (ret_val != 0) {
+ int ret_val =
+ ml_train_layer_set_property(layer->getNative(), opt.c_str(), NULL);
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not set layer property: %d (%s)", ret_val,
ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
return PlatformResult();
}
+PlatformResult TrainerManager::LayerDispose(int id) {
+ ScopeLogger();
+
+ if (layers_.find(id) == layers_.end()) {
+ LoggerE("Could not find layer with id: %d", id);
+ return PlatformResult(ErrorCode::NOT_FOUND_ERR, "Could not find layer");
+ }
+
+ auto layer = layers_[id];
+ // Layer added to Model cannot be destroyed individually.
+ // It will be destroyed when destroying the Model
+ // see comment in TrainerManager::ModelDispose()
+ if (layer->isAttached()) {
+ LoggerE("Trying to dispose layer attached to model");
+ return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR,
+ "Cannot dispose layer attached to model");
+ }
+
+ int ret_val = ml_train_layer_destroy(layer->getNative());
+ if (ret_val != ML_ERROR_NONE) {
+ LoggerE("Could not destroy layer: %d (%s)", ret_val, ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+
+ layers_.erase(id);
+
+ return PlatformResult();
+}
+
PlatformResult TrainerManager::CreateOptimizer(int& id,
ml_train_optimizer_type_e type) {
ScopeLogger();
ml_train_optimizer_h n_optimizer = NULL;
int ret_val = ml_train_optimizer_create(&n_optimizer, type);
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not create optimizer: %d (%s)", ret_val,
ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
- optimizers_[next_optimizer_id_] = n_optimizer;
+ optimizers_[next_optimizer_id_] =
+ std::make_shared<NativeWrapper<ml_train_optimizer_h>>(n_optimizer);
id = next_optimizer_id_++;
return PlatformResult();
}
auto optimizer = optimizers_[id];
std::string opt = name + "=" + value;
- int ret_val = ml_train_optimizer_set_property(optimizer, opt.c_str(), NULL);
- if (ret_val != 0) {
+ int ret_val = ml_train_optimizer_set_property(optimizer->getNative(),
+ opt.c_str(), NULL);
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not set optimizer property: %d (%s)", ret_val,
ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
return PlatformResult();
}
+PlatformResult TrainerManager::OptimizerDispose(int id) {
+ ScopeLogger();
+
+ if (optimizers_.find(id) == optimizers_.end()) {
+ LoggerE("Could not find optimizer with id: %d", id);
+ return PlatformResult(ErrorCode::NOT_FOUND_ERR, "Could not find optimizer");
+ }
+
+ auto optimizer = optimizers_[id];
+ // Optimizer set to Model cannot be destroyed individually.
+ // It will be destroyed when destroying the Model
+ // see comment in TrainerManager::ModelDispose()
+ if (optimizer->isAttached()) {
+ LoggerE("Trying to dispose optimizer attached to model");
+ return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR,
+ "Cannot dispose optimizer attached to model");
+ }
+
+ int ret_val = ml_train_optimizer_destroy(optimizer->getNative());
+ if (ret_val != ML_ERROR_NONE) {
+ LoggerE("Could not destroy optimizer: %d (%s)", ret_val,
+ ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+
+ optimizers_.erase(id);
+
+ return PlatformResult();
+}
+
PlatformResult TrainerManager::CreateFileDataset(int& id, const std::string train_file,
const std::string valid_file,
const std::string test_file) {
ml_train_dataset_h n_dataset = NULL;
int ret_val = ml_train_dataset_create(&n_dataset);
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not create dataset: %s", ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
ret_val = ml_train_dataset_add_file(n_dataset, ML_TRAIN_DATASET_MODE_TRAIN,
tmpString.c_str());
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not add train file %s to dataset: %s", tmpString.c_str(),
ml_strerror(ret_val));
ml_train_dataset_destroy(n_dataset);
}
ret_val = ml_train_dataset_add_file(n_dataset, ML_TRAIN_DATASET_MODE_VALID,
tmpString.c_str());
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not add validation file %s to dataset: %s",
tmpString.c_str(), ml_strerror(ret_val));
ml_train_dataset_destroy(n_dataset);
}
ret_val = ml_train_dataset_add_file(n_dataset, ML_TRAIN_DATASET_MODE_TEST,
tmpString.c_str());
- if (ret_val != 0) {
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not add test file %s to dataset: %s", tmpString.c_str(),
ml_strerror(ret_val));
ml_train_dataset_destroy(n_dataset);
}
}
- datasets_[next_dataset_id_] = n_dataset;
+ datasets_[next_dataset_id_] =
+ std::make_shared<NativeWrapper<ml_train_dataset_h>>(n_dataset);
id = next_dataset_id_++;
return PlatformResult();
}
// ml_train_dataset_set_property() is marked as deprecated
// temporary set same property for all modes (all data files) if possible
int ret_val = ml_train_dataset_set_property_for_mode(
- dataset, ML_TRAIN_DATASET_MODE_TRAIN, opt.c_str(), NULL);
- if (ret_val != 0) {
+ dataset->getNative(), ML_TRAIN_DATASET_MODE_TRAIN, opt.c_str(), NULL);
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not set dataset property for train mode: %d (%s)", ret_val,
ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
ret_val = ml_train_dataset_set_property_for_mode(
- dataset, ML_TRAIN_DATASET_MODE_VALID, opt.c_str(), NULL);
- if (ret_val != 0) {
+ dataset->getNative(), ML_TRAIN_DATASET_MODE_VALID, opt.c_str(), NULL);
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not set dataset property for validation mode: %d (%s)",
ret_val, ml_strerror(ret_val));
// MK-TODO report error for each file when extracted to separate functions
}
ret_val = ml_train_dataset_set_property_for_mode(
- dataset, ML_TRAIN_DATASET_MODE_TEST, opt.c_str(), NULL);
- if (ret_val != 0) {
+ dataset->getNative(), ML_TRAIN_DATASET_MODE_TEST, opt.c_str(), NULL);
+ if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not set dataset property for test mode: %d (%s)", ret_val,
ml_strerror(ret_val));
// MK-TODO report error for each file when extracted to separate functions
return PlatformResult();
}
+PlatformResult TrainerManager::DatasetDispose(int id) {
+ ScopeLogger();
+
+ if (datasets_.find(id) == datasets_.end()) {
+ LoggerE("Could not find dataset with id: %d", id);
+ return PlatformResult(ErrorCode::NOT_FOUND_ERR, "Could not find dataset");
+ }
+
+ auto dataset = datasets_[id];
+ // Dataset set to Model cannot be destroyed individually.
+ // It will be destroyed when destroying the Model
+ // see comment in TrainerManager::ModelDispose()
+ if (dataset->isAttached()) {
+ LoggerE("Trying to dispose dataset attached to model");
+ return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR,
+ "Cannot dispose dataset attached to model");
+ }
+
+ int ret_val = ml_train_dataset_destroy(dataset->getNative());
+ if (ret_val != ML_ERROR_NONE) {
+ LoggerE("Could not destroy dataset: %d (%s)", ret_val,
+ ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+
+ datasets_.erase(id);
+
+ return PlatformResult();
+}
+
} // namespace ml
} // namespace extension