}
auto& model = models_[id];
+ std::lock_guard<std::mutex> modelLock(model->instanceLock);
std::stringstream ss;
for (const auto& opt : options) {
const picojson::object& options) {
ScopeLogger();
+ // lock the models_ map operations to avoid conflicting with disposal function
+ models_map_lock_.lock();
if (models_.find(id) == models_.end()) {
LoggerE("Could not find model with id: %d", id);
+ models_map_lock_.unlock();
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find model");
}
auto& model = models_[id];
+ std::lock_guard<std::mutex> modelLock(model->instanceLock);
+ // model instance is securely locked for other operations
+ // so it's safe to unlock map now
+ models_map_lock_.unlock();
if (!model->isCompiled()) {
LoggerE("Trying to train model that is not compiled");
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find model");
}
+ auto& model = models_[id];
+ bool available = model->instanceLock.try_lock();
+ if (!available) {
+ LoggerE("Model locked - probaly training in progress");
+ return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR,
+ "Model training in progress - cannot modify");
+ }
+
if (layers_.find(layerId) == layers_.end()) {
LoggerE("Could not find layer with id: %d", id);
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find layer");
}
- auto& model = models_[id];
auto& layer = layers_[layerId];
if (model->isCompiled()) {
LoggerE("Modification of compiled model");
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::INVALID_STATE_ERR,
"Modification of compiled model not allowed");
}
if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not add layer to model: %d (%s)", ret_val,
ml_strerror(ret_val));
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
model->layerIndices.push_back(layerId);
layer->setAttached(true);
+ model->instanceLock.unlock();
return PlatformResult();
}
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find model");
}
+ auto& model = models_[id];
+ bool available = model->instanceLock.try_lock();
+ if (!available) {
+ LoggerE("Model locked - probaly training in progress");
+ return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR,
+ "Model training in progress - cannot modify");
+ }
+
if (optimizers_.find(optimizerId) == optimizers_.end()) {
LoggerE("Could not find optimizer with id: %d", id);
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find optimizer");
}
- auto& model = models_[id];
auto& optimizer = optimizers_[optimizerId];
+
if (model->isCompiled()) {
LoggerE("Modification of compiled model");
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::INVALID_STATE_ERR,
"Modification of compiled model not allowed");
}
if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not set optimizer for model: %d (%s)", ret_val,
ml_strerror(ret_val));
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
model->optimizerIndex = optimizerId;
optimizer->setAttached(true);
+ model->instanceLock.unlock();
return PlatformResult();
}
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find model");
}
+ auto& model = models_[id];
+ bool available = model->instanceLock.try_lock();
+ if (!available) {
+ LoggerE("Model locked - probaly training in progress");
+ return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR,
+ "Model training in progress - cannot modify");
+ }
+
if (datasets_.find(datasetId) == datasets_.end()) {
LoggerE("Could not find dataset with id: %d", id);
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find dataset");
}
- auto& model = models_[id];
auto& dataset = datasets_[datasetId];
if (model->isCompiled()) {
LoggerE("Modification of compiled model");
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::INVALID_STATE_ERR,
"Modification of compiled model not allowed");
}
if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not set dataset for model: %d (%s)", ret_val,
ml_strerror(ret_val));
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
model->datasetIndex = datasetId;
dataset->setAttached(true);
+ model->instanceLock.unlock();
return PlatformResult();
}
}
auto& model = models_[id];
+ std::lock_guard<std::mutex> modelLock(model->instanceLock);
+
char* tmpSummary = NULL;
int ret_val =
}
auto& model = models_[id];
+ bool available = model->instanceLock.try_lock();
+ if (!available) {
+ LoggerE("Model locked - probaly training in progress");
+ return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR,
+ "Model training in progress - cannot save");
+ }
auto tmpString = path;
if (tmpString.substr(0, FILE_PATH_PREFIX.length()) == FILE_PATH_PREFIX) {
int ret_val =
ml_train_model_save(model->getNative(), tmpString.c_str(), format);
+ model->instanceLock.unlock();
+
if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not model to file: %d (%s)", ret_val, ml_strerror(ret_val));
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
PlatformResult TrainerManager::ModelDispose(int id) {
ScopeLogger();
+ // lock the models_ map operations to avoid conflicting with model training
+ std::lock_guard<std::mutex> model_lock(models_map_lock_);
+
if (models_.find(id) == models_.end()) {
LoggerE("Could not find model with id: %d", id);
return PlatformResult(ErrorCode::NOT_FOUND_ERR, "Could not find model");
auto model = models_[id];
+ bool available = model->instanceLock.try_lock();
+ if (!available) {
+ LoggerE("Could not lock model for disposal - probaly training in progress");
+ return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR,
+ "Model training in progress - disposal not allowed");
+ }
+
int ret_val = ml_train_model_destroy(model->getNative());
if (ret_val != ML_ERROR_NONE) {
LoggerE("Could not destroy model: %d (%s)", ret_val, ml_strerror(ret_val));
+ model->instanceLock.unlock();
return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+ // erase model from map and use a shared pointer for related objects removal
+ models_.erase(id);
+
// When model is destroyed by ml_train_model_destroy() then all attached
// handles (layers, optimizer, dataset) are also destroyed. This means that
// after Model disposal all related objects in JS/C++ layer become invalid.
LoggerD("Deleting attached optimizer: %d", model->optimizerIndex);
optimizers_.erase(model->optimizerIndex);
}
+
if (model->datasetIndex) {
LoggerD("Deleting attached dataset: %d", model->datasetIndex);
datasets_.erase(model->datasetIndex);
}
+
for (auto const& ls : model->layerIndices) {
LoggerD("Deleting attached layer: %d", ls);
layers_.erase(ls);
}
- models_.erase(id);
+ model->instanceLock.unlock();
return PlatformResult();
}
layers_[next_layer_id_] =
std::make_shared<NativeWrapper<ml_train_layer_h>>(n_layer);
id = next_layer_id_++;
+
return PlatformResult();
}
-PlatformResult TrainerManager::LayerSetProperty(int id, const std::string& name,
+PlatformResult TrainerManager::LayerSetProperty(int id,
+ const std::string& name,
const std::string& value) {
ScopeLogger("id: %d, name: %s, value: %s", id, name.c_str(), value.c_str());
}
auto layer = layers_[id];
+
std::string opt = name + "=" + value;
int ret_val =
optimizers_[next_optimizer_id_] =
std::make_shared<NativeWrapper<ml_train_optimizer_h>>(n_optimizer);
id = next_optimizer_id_++;
+
return PlatformResult();
}
}
auto optimizer = optimizers_[id];
+
std::string opt = name + "=" + value;
int ret_val = ml_train_optimizer_set_property(optimizer->getNative(),
opt.c_str(), NULL);
return PlatformResult();
}
-PlatformResult TrainerManager::CreateFileDataset(int& id, const std::string train_file,
+PlatformResult TrainerManager::CreateFileDataset(int& id,
+ const std::string train_file,
const std::string valid_file,
const std::string test_file) {
ScopeLogger();
datasets_[next_dataset_id_] =
std::make_shared<NativeWrapper<ml_train_dataset_h>>(n_dataset);
id = next_dataset_id_++;
+
return PlatformResult();
}
}
auto dataset = datasets_[id];
+
std::string opt = name + "=" + value;
int ret_val = ml_train_dataset_set_property_for_mode(dataset->getNative(),