From: Marcin Kaminski Date: Sat, 15 Jan 2022 18:09:31 +0000 (+0100) Subject: [ML][Training] Object destruction implementation X-Git-Tag: submit/tizen/20220210.090823~1^2~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a519fde1bf664d9d0b5e1a6383caae80da77babf;p=platform%2Fcore%2Fapi%2Fwebapi-plugins.git [ML][Training] Object destruction implementation Changes: - dispose() function added to Model, Layer, Dataset and Optimizer - C++/C layer for dispose implemented - additional wrapper objects for storing state/relations between objects Change-Id: Id35d552c5bdf4840a176eeb85ad1bc4114c01ccb --- diff --git a/src/ml/js/ml_trainer.js b/src/ml/js/ml_trainer.js index 4dcc99da..643a5f6f 100755 --- a/src/ml/js/ml_trainer.js +++ b/src/ml/js/ml_trainer.js @@ -119,6 +119,23 @@ Layer.prototype.setProperty = function() { } }; +var ValidDisposeExceptions = [ + 'NotFoundError', + 'AbortError' +]; + +Layer.prototype.dispose = function () { + var result = native_.callSync('MLTrainerLayerDispose', { id: this._id }); + + if (native_.isFailure(result)) { + throw native_.getErrorObjectAndValidate( + result, + ValidDisposeExceptions, + AbortError + ); + } +}; + var Optimizer = function(id, type) { Object.defineProperties(this, { type: { @@ -166,6 +183,18 @@ Optimizer.prototype.setProperty = function() { } }; +Optimizer.prototype.dispose = function () { + var result = native_.callSync('MLTrainerOptimizerDispose', { id: this._id }); + + if (native_.isFailure(result)) { + throw native_.getErrorObjectAndValidate( + result, + ValidDisposeExceptions, + AbortError + ); + } +}; + var Dataset = function(id, type) { Object.defineProperties(this, { type: { @@ -213,6 +242,19 @@ Dataset.prototype.setProperty = function() { } }; + +Dataset.prototype.dispose = function () { + var result = native_.callSync('MLTrainerDatasetDispose', { id: this._id }); + + if (native_.isFailure(result)) { + throw native_.getErrorObjectAndValidate( + result, + ValidDisposeExceptions, + AbortError + ); + } +}; + var Model = function(id) { Object.defineProperties(this, { _id: { value: id, writable: false, enumerable: false } @@ -515,6 +557,18 @@ Model.prototype.setOptimizer = function() { } }; +Model.prototype.dispose = function () { + var result = native_.callSync('MLTrainerModelDispose', { id: this._id }); + + if (native_.isFailure(result)) { + throw native_.getErrorObjectAndValidate( + result, + ValidDisposeExceptions, + AbortError + ); + } +}; + var ValidCreateLayerExceptions = ['NotSupportedError', 'TypeMismatchError', 'AbortError']; var NO_ID = -1; diff --git a/src/ml/ml.gyp b/src/ml/ml.gyp index c3e775cf..9f3534c1 100644 --- a/src/ml/ml.gyp +++ b/src/ml/ml.gyp @@ -39,6 +39,8 @@ 'ml_single_manager.h', 'ml_trainer_manager.cc', 'ml_trainer_manager.h', + 'ml_trainer_objects.cc', + 'ml_trainer_objects.h', 'ml_singleshot.cc', 'ml_singleshot.h', 'ml_utils.cc', diff --git a/src/ml/ml_instance.cc b/src/ml/ml_instance.cc index 5d1dfbda..d7a1ba3f 100644 --- a/src/ml/ml_instance.cc +++ b/src/ml/ml_instance.cc @@ -183,8 +183,10 @@ MlInstance::MlInstance() REGISTER_METHOD(MLTrainerLayerSetProperty); REGISTER_METHOD(MLTrainerLayerCreate); + REGISTER_METHOD(MLTrainerLayerDispose); REGISTER_METHOD(MLTrainerOptimizerSetProperty); REGISTER_METHOD(MLTrainerOptimizerCreate); + REGISTER_METHOD(MLTrainerOptimizerDispose); REGISTER_METHOD(MLTrainerModelCreate); REGISTER_METHOD(MLTrainerModelCompile); REGISTER_METHOD(MLTrainerModelAddLayer); @@ -193,9 +195,11 @@ MlInstance::MlInstance() REGISTER_METHOD(MLTrainerModelSave); REGISTER_METHOD(MLTrainerModelSetDataset); REGISTER_METHOD(MLTrainerModelSetOptimizer); + REGISTER_METHOD(MLTrainerModelDispose); REGISTER_METHOD(MLTrainerDatasetCreateGenerator); REGISTER_METHOD(MLTrainerDatasetCreateFromFile); REGISTER_METHOD(MLTrainerDatasetSetProperty); + REGISTER_METHOD(MLTrainerDatasetDispose); #undef REGISTER_METHOD } @@ -1753,6 +1757,21 @@ void MlInstance::MLTrainerLayerCreate(const picojson::value& args, picojson::obj ReportSuccess(out); } +void MlInstance::MLTrainerLayerDispose(const picojson::value& args, + picojson::object& out) { + ScopeLogger("args: %s", args.serialize().c_str()); + CHECK_ARGS(args, kId, double, out); + + auto id = static_cast(args.get(kId).get()); + + PlatformResult result = trainer_manager_.LayerDispose(id); + if (!result) { + ReportError(result, &out); + return; + } + ReportSuccess(out); +} + void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out) { ScopeLogger("args: %s", args.serialize().c_str()); CHECK_ARGS(args, kId, double, out); @@ -1793,6 +1812,21 @@ void MlInstance::MLTrainerOptimizerCreate(const picojson::value& args, picojson: ReportSuccess(out); } +void MlInstance::MLTrainerOptimizerDispose(const picojson::value& args, + picojson::object& out) { + ScopeLogger("args: %s", args.serialize().c_str()); + CHECK_ARGS(args, kId, double, out); + + auto id = static_cast(args.get(kId).get()); + + PlatformResult result = trainer_manager_.OptimizerDispose(id); + if (!result) { + ReportError(result, &out); + return; + } + ReportSuccess(out); +} + void MlInstance::MLTrainerModelCreate(const picojson::value& args, picojson::object& out) { ScopeLogger("args: %s", args.serialize().c_str()); int id = -1; @@ -1977,6 +2011,21 @@ void MlInstance::MLTrainerModelSetOptimizer(const picojson::value& args, picojso ReportSuccess(out); } +void MlInstance::MLTrainerModelDispose(const picojson::value& args, + picojson::object& out) { + ScopeLogger("args: %s", args.serialize().c_str()); + CHECK_ARGS(args, kId, double, out); + + auto id = static_cast(args.get(kId).get()); + + PlatformResult result = trainer_manager_.ModelDispose(id); + if (!result) { + ReportError(result, &out); + return; + } + ReportSuccess(out); +} + void MlInstance::MLTrainerDatasetCreateGenerator(const picojson::value& args, picojson::object& out) { ScopeLogger("args: %s", args.serialize().c_str()); @@ -2022,6 +2071,21 @@ void MlInstance::MLTrainerDatasetSetProperty(const picojson::value& args, picojs ReportSuccess(out); } +void MlInstance::MLTrainerDatasetDispose(const picojson::value& args, + picojson::object& out) { + ScopeLogger("args: %s", args.serialize().c_str()); + CHECK_ARGS(args, kId, double, out); + + auto id = static_cast(args.get(kId).get()); + + PlatformResult result = trainer_manager_.DatasetDispose(id); + if (!result) { + ReportError(result, &out); + return; + } + ReportSuccess(out); +} + #undef CHECK_EXIST #undef CHECK_TYPE #undef CHECK_TYPE_X diff --git a/src/ml/ml_instance.h b/src/ml/ml_instance.h index 69bc0706..cfec0a7b 100644 --- a/src/ml/ml_instance.h +++ b/src/ml/ml_instance.h @@ -153,9 +153,13 @@ class MlInstance : public common::ParsedInstance { void MLTrainerLayerSetProperty(const picojson::value& args, picojson::object& out); void MLTrainerLayerCreate(const picojson::value& args, picojson::object& out); + void MLTrainerLayerDispose(const picojson::value& args, + picojson::object& out); void MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out); void MLTrainerOptimizerCreate(const picojson::value& args, picojson::object& out); + void MLTrainerOptimizerDispose(const picojson::value& args, + picojson::object& out); void MLTrainerModelCreate(const picojson::value& args, picojson::object& out); void MLTrainerModelCompile(const picojson::value& args, picojson::object& out); @@ -165,10 +169,14 @@ class MlInstance : public common::ParsedInstance { void MLTrainerModelSave(const picojson::value& args, picojson::object& out); void MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out); void MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out); + void MLTrainerModelDispose(const picojson::value& args, + picojson::object& out); void MLTrainerDatasetCreateGenerator(const picojson::value& args, picojson::object& out); void MLTrainerDatasetCreateFromFile(const picojson::value& args, picojson::object& out); void MLTrainerDatasetSetProperty(const picojson::value& args, picojson::object& out); + void MLTrainerDatasetDispose(const picojson::value& args, + picojson::object& out); }; } // namespace ml diff --git a/src/ml/ml_trainer_manager.cc b/src/ml/ml_trainer_manager.cc index 071ccb3a..01043f99 100644 --- a/src/ml/ml_trainer_manager.cc +++ b/src/ml/ml_trainer_manager.cc @@ -41,12 +41,12 @@ PlatformResult TrainerManager::CreateModel(int& id) { ml_train_model_h n_model = NULL; int ret_val = ml_train_model_construct(&n_model); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not create model: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } - models_[next_model_id_] = n_model; + models_[next_model_id_] = std::make_shared(n_model); id = next_model_id_++; return PlatformResult(); @@ -58,12 +58,12 @@ PlatformResult TrainerManager::CreateModel(int& id, const std::string config) { ml_train_model_h n_model = NULL; int ret_val = ml_train_model_construct_with_conf(config.c_str(), &n_model); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not create model: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } - models_[next_model_id_] = n_model; + models_[next_model_id_] = std::make_shared(n_model); id = next_model_id_++; return PlatformResult(); @@ -99,21 +99,23 @@ PlatformResult TrainerManager::ModelCompile(int id, int ret_val = 0; auto compileOpts = ss.str(); if (compileOpts.length() < OPTION_SEPARATOR.length()) { - ret_val = ml_train_model_compile(model, NULL); + ret_val = ml_train_model_compile(model->getNative(), NULL); } else { // remove trailing ' | ' from options string compileOpts = compileOpts.substr(0, compileOpts.length() - OPTION_SEPARATOR.length()); LoggerI("Compiling model with options: %s", compileOpts.c_str()); - ret_val = ml_train_model_compile(model, compileOpts.c_str(), NULL); + ret_val = + ml_train_model_compile(model->getNative(), compileOpts.c_str(), NULL); } ss.clear(); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not compile model: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } + model->setCompiled(true); return PlatformResult(); } @@ -129,6 +131,12 @@ PlatformResult TrainerManager::ModelRun(int id, auto& model = models_[id]; + if (!model->isCompiled()) { + LoggerE("Trying to train model that is not compiled"); + return PlatformResult(ErrorCode::INVALID_STATE_ERR, + "Cannot train model before compilation"); + } + std::stringstream ss; for (const auto& opt : options) { const auto& key = opt.first; @@ -149,15 +157,15 @@ PlatformResult TrainerManager::ModelRun(int id, auto runOpts = ss.str(); if (runOpts.length() < OPTION_SEPARATOR.length()) { - ret_val = ml_train_model_run(model, NULL); + ret_val = ml_train_model_run(model->getNative(), NULL); } else { // remove trailing ' | ' from options string runOpts = runOpts.substr(0, runOpts.length() - OPTION_SEPARATOR.length()); LoggerI("Running model with options: %s", runOpts.c_str()); - ret_val = ml_train_model_run(model, runOpts.c_str(), NULL); + ret_val = ml_train_model_run(model->getNative(), runOpts.c_str(), NULL); } - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not run (train) model: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::UNKNOWN_ERR, ml_strerror(ret_val)); @@ -182,13 +190,23 @@ PlatformResult TrainerManager::ModelAddLayer(int id, int layerId) { auto& model = models_[id]; auto& layer = layers_[layerId]; - int ret_val = ml_train_model_add_layer(model, layer); - if (ret_val != 0) { + if (model->isCompiled()) { + LoggerE("Modification of compiled model"); + return PlatformResult(ErrorCode::INVALID_STATE_ERR, + "Modification of compiled model not allowed"); + } + + int ret_val = + ml_train_model_add_layer(model->getNative(), layer->getNative()); + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not add layer to model: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } + model->layerIndices.push_back(layerId); + layer->setAttached(true); + return PlatformResult(); } @@ -207,14 +225,36 @@ PlatformResult TrainerManager::ModelSetOptimizer(int id, int optimizerId) { auto& model = models_[id]; auto& optimizer = optimizers_[optimizerId]; + if (model->isCompiled()) { + LoggerE("Modification of compiled model"); + return PlatformResult(ErrorCode::INVALID_STATE_ERR, + "Modification of compiled model not allowed"); + } - int ret_val = ml_train_model_set_optimizer(model, optimizer); - if (ret_val != 0) { + int ret_val = + ml_train_model_set_optimizer(model->getNative(), optimizer->getNative()); + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not set optimizer for model: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } + if (model->optimizerIndex != INVALID_INDEX) { + // "release" optimizer that has been set before + auto optPosition = optimizers_.find(model->optimizerIndex); + if (optPosition != optimizers_.end()) { + (*optPosition).second->setAttached(false); + } else { + // This should never happen but just in case check and log such situation + LoggerE( + "Attached optimizer does not exist in map - some internal error " + "faced"); + } + } + + model->optimizerIndex = optimizerId; + optimizer->setAttached(true); + return PlatformResult(); } @@ -234,13 +274,35 @@ PlatformResult TrainerManager::ModelSetDataset(int id, int datasetId) { auto& model = models_[id]; auto& dataset = datasets_[datasetId]; - int ret_val = ml_train_model_set_dataset(model, dataset); - if (ret_val != 0) { + if (model->isCompiled()) { + LoggerE("Modification of compiled model"); + return PlatformResult(ErrorCode::INVALID_STATE_ERR, + "Modification of compiled model not allowed"); + } + + int ret_val = + ml_train_model_set_dataset(model->getNative(), dataset->getNative()); + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not set dataset for model: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } + if (model->datasetIndex != INVALID_INDEX) { + // "release" dataset that has been set before + auto datPosition = datasets_.find(model->datasetIndex); + if (datPosition != datasets_.end()) { + (*datPosition).second->setAttached(false); + } else { + // This should never happen but just in case check and log such situation + LoggerE( + "Attached dataset does not exist in map = some internal error faced"); + } + } + + model->datasetIndex = datasetId; + dataset->setAttached(true); + return PlatformResult(); } @@ -257,9 +319,10 @@ PlatformResult TrainerManager::ModelSummarize(int id, auto& model = models_[id]; char* tmpSummary = NULL; - int ret_val = ml_train_model_get_summary(model, level, &tmpSummary); + int ret_val = + ml_train_model_get_summary(model->getNative(), level, &tmpSummary); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not get summary for model: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); @@ -290,9 +353,10 @@ PlatformResult TrainerManager::ModelSave(int id, } LoggerI("Saving model to file: %s", tmpString.c_str()); - int ret_val = ml_train_model_save(model, tmpString.c_str(), format); + int ret_val = + ml_train_model_save(model->getNative(), tmpString.c_str(), format); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not model to file: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } @@ -300,6 +364,44 @@ PlatformResult TrainerManager::ModelSave(int id, return PlatformResult(); } +PlatformResult TrainerManager::ModelDispose(int id) { + ScopeLogger(); + + if (models_.find(id) == models_.end()) { + LoggerE("Could not find model with id: %d", id); + return PlatformResult(ErrorCode::NOT_FOUND_ERR, "Could not find model"); + } + + auto model = models_[id]; + + int ret_val = ml_train_model_destroy(model->getNative()); + if (ret_val != ML_ERROR_NONE) { + LoggerE("Could not destroy model: %d (%s)", ret_val, ml_strerror(ret_val)); + return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); + } + + // When model is destroyed by ml_train_model_destroy() then all attached + // handles (layers, optimizer, dataset) are also destroyed. This means that + // after Model disposal all related objects in JS/C++ layer become invalid. + // Code below is removing all wrappers stored in TrainerManager based on + // identifiers taken from Model wrapper + if (model->optimizerIndex >= 0) { + LoggerD("Deleting attached optimizer: %d", model->optimizerIndex); + optimizers_.erase(model->optimizerIndex); + } + if (model->datasetIndex) { + LoggerD("Deleting attached dataset: %d", model->datasetIndex); + datasets_.erase(model->datasetIndex); + } + for (auto const& ls : model->layerIndices) { + LoggerD("Deleting attached layer: %d", ls); + layers_.erase(ls); + } + models_.erase(id); + + return PlatformResult(); +} + PlatformResult TrainerManager::CreateLayer(int& id, ml_train_layer_type_e type) { ScopeLogger(); @@ -307,12 +409,13 @@ PlatformResult TrainerManager::CreateLayer(int& id, ml_train_layer_h n_layer = NULL; int ret_val = ml_train_layer_create(&n_layer, type); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not create layer: %s", ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } - layers_[next_layer_id_] = n_layer; + layers_[next_layer_id_] = + std::make_shared>(n_layer); id = next_layer_id_++; return PlatformResult(); } @@ -329,8 +432,9 @@ PlatformResult TrainerManager::LayerSetProperty(int id, const std::string& name, auto layer = layers_[id]; std::string opt = name + "=" + value; - int ret_val = ml_train_layer_set_property(layer, opt.c_str(), NULL); - if (ret_val != 0) { + int ret_val = + ml_train_layer_set_property(layer->getNative(), opt.c_str(), NULL); + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not set layer property: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); @@ -338,6 +442,35 @@ PlatformResult TrainerManager::LayerSetProperty(int id, const std::string& name, return PlatformResult(); } +PlatformResult TrainerManager::LayerDispose(int id) { + ScopeLogger(); + + if (layers_.find(id) == layers_.end()) { + LoggerE("Could not find layer with id: %d", id); + return PlatformResult(ErrorCode::NOT_FOUND_ERR, "Could not find layer"); + } + + auto layer = layers_[id]; + // Layer added to Model cannot be destroyed individually. + // It will be destroyed when destroying the Model + // see comment in TrainerManager::ModelDispose() + if (layer->isAttached()) { + LoggerE("Trying to dispose layer attached to model"); + return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR, + "Cannot dispose layer attached to model"); + } + + int ret_val = ml_train_layer_destroy(layer->getNative()); + if (ret_val != ML_ERROR_NONE) { + LoggerE("Could not destroy layer: %d (%s)", ret_val, ml_strerror(ret_val)); + return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); + } + + layers_.erase(id); + + return PlatformResult(); +} + PlatformResult TrainerManager::CreateOptimizer(int& id, ml_train_optimizer_type_e type) { ScopeLogger(); @@ -345,13 +478,14 @@ PlatformResult TrainerManager::CreateOptimizer(int& id, ml_train_optimizer_h n_optimizer = NULL; int ret_val = ml_train_optimizer_create(&n_optimizer, type); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not create optimizer: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } - optimizers_[next_optimizer_id_] = n_optimizer; + optimizers_[next_optimizer_id_] = + std::make_shared>(n_optimizer); id = next_optimizer_id_++; return PlatformResult(); } @@ -368,8 +502,9 @@ PlatformResult TrainerManager::OptimizerSetProperty(int id, auto optimizer = optimizers_[id]; std::string opt = name + "=" + value; - int ret_val = ml_train_optimizer_set_property(optimizer, opt.c_str(), NULL); - if (ret_val != 0) { + int ret_val = ml_train_optimizer_set_property(optimizer->getNative(), + opt.c_str(), NULL); + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not set optimizer property: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); @@ -377,6 +512,36 @@ PlatformResult TrainerManager::OptimizerSetProperty(int id, return PlatformResult(); } +PlatformResult TrainerManager::OptimizerDispose(int id) { + ScopeLogger(); + + if (optimizers_.find(id) == optimizers_.end()) { + LoggerE("Could not find optimizer with id: %d", id); + return PlatformResult(ErrorCode::NOT_FOUND_ERR, "Could not find optimizer"); + } + + auto optimizer = optimizers_[id]; + // Optimizer set to Model cannot be destroyed individually. + // It will be destroyed when destroying the Model + // see comment in TrainerManager::ModelDispose() + if (optimizer->isAttached()) { + LoggerE("Trying to dispose optimizer attached to model"); + return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR, + "Cannot dispose optimizer attached to model"); + } + + int ret_val = ml_train_optimizer_destroy(optimizer->getNative()); + if (ret_val != ML_ERROR_NONE) { + LoggerE("Could not destroy optimizer: %d (%s)", ret_val, + ml_strerror(ret_val)); + return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); + } + + optimizers_.erase(id); + + return PlatformResult(); +} + PlatformResult TrainerManager::CreateFileDataset(int& id, const std::string train_file, const std::string valid_file, const std::string test_file) { @@ -385,7 +550,7 @@ PlatformResult TrainerManager::CreateFileDataset(int& id, const std::string trai ml_train_dataset_h n_dataset = NULL; int ret_val = ml_train_dataset_create(&n_dataset); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not create dataset: %s", ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } @@ -399,7 +564,7 @@ PlatformResult TrainerManager::CreateFileDataset(int& id, const std::string trai ret_val = ml_train_dataset_add_file(n_dataset, ML_TRAIN_DATASET_MODE_TRAIN, tmpString.c_str()); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not add train file %s to dataset: %s", tmpString.c_str(), ml_strerror(ret_val)); ml_train_dataset_destroy(n_dataset); @@ -415,7 +580,7 @@ PlatformResult TrainerManager::CreateFileDataset(int& id, const std::string trai } ret_val = ml_train_dataset_add_file(n_dataset, ML_TRAIN_DATASET_MODE_VALID, tmpString.c_str()); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not add validation file %s to dataset: %s", tmpString.c_str(), ml_strerror(ret_val)); ml_train_dataset_destroy(n_dataset); @@ -431,7 +596,7 @@ PlatformResult TrainerManager::CreateFileDataset(int& id, const std::string trai } ret_val = ml_train_dataset_add_file(n_dataset, ML_TRAIN_DATASET_MODE_TEST, tmpString.c_str()); - if (ret_val != 0) { + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not add test file %s to dataset: %s", tmpString.c_str(), ml_strerror(ret_val)); ml_train_dataset_destroy(n_dataset); @@ -439,7 +604,8 @@ PlatformResult TrainerManager::CreateFileDataset(int& id, const std::string trai } } - datasets_[next_dataset_id_] = n_dataset; + datasets_[next_dataset_id_] = + std::make_shared>(n_dataset); id = next_dataset_id_++; return PlatformResult(); } @@ -462,16 +628,16 @@ PlatformResult TrainerManager::DatasetSetProperty(int id, // ml_train_dataset_set_property() is marked as deprecated // temporary set same property for all modes (all data files) if possible int ret_val = ml_train_dataset_set_property_for_mode( - dataset, ML_TRAIN_DATASET_MODE_TRAIN, opt.c_str(), NULL); - if (ret_val != 0) { + dataset->getNative(), ML_TRAIN_DATASET_MODE_TRAIN, opt.c_str(), NULL); + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not set dataset property for train mode: %d (%s)", ret_val, ml_strerror(ret_val)); return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); } ret_val = ml_train_dataset_set_property_for_mode( - dataset, ML_TRAIN_DATASET_MODE_VALID, opt.c_str(), NULL); - if (ret_val != 0) { + dataset->getNative(), ML_TRAIN_DATASET_MODE_VALID, opt.c_str(), NULL); + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not set dataset property for validation mode: %d (%s)", ret_val, ml_strerror(ret_val)); // MK-TODO report error for each file when extracted to separate functions @@ -479,8 +645,8 @@ PlatformResult TrainerManager::DatasetSetProperty(int id, } ret_val = ml_train_dataset_set_property_for_mode( - dataset, ML_TRAIN_DATASET_MODE_TEST, opt.c_str(), NULL); - if (ret_val != 0) { + dataset->getNative(), ML_TRAIN_DATASET_MODE_TEST, opt.c_str(), NULL); + if (ret_val != ML_ERROR_NONE) { LoggerE("Could not set dataset property for test mode: %d (%s)", ret_val, ml_strerror(ret_val)); // MK-TODO report error for each file when extracted to separate functions @@ -490,5 +656,35 @@ PlatformResult TrainerManager::DatasetSetProperty(int id, return PlatformResult(); } +PlatformResult TrainerManager::DatasetDispose(int id) { + ScopeLogger(); + + if (datasets_.find(id) == datasets_.end()) { + LoggerE("Could not find dataset with id: %d", id); + return PlatformResult(ErrorCode::NOT_FOUND_ERR, "Could not find dataset"); + } + + auto dataset = datasets_[id]; + // Dataset set to Model cannot be destroyed individually. + // It will be destroyed when destroying the Model + // see comment in TrainerManager::ModelDispose() + if (dataset->isAttached()) { + LoggerE("Trying to dispose dataset attached to model"); + return PlatformResult(ErrorCode::NO_MODIFICATION_ALLOWED_ERR, + "Cannot dispose dataset attached to model"); + } + + int ret_val = ml_train_dataset_destroy(dataset->getNative()); + if (ret_val != ML_ERROR_NONE) { + LoggerE("Could not destroy dataset: %d (%s)", ret_val, + ml_strerror(ret_val)); + return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val)); + } + + datasets_.erase(id); + + return PlatformResult(); +} + } // namespace ml } // namespace extension diff --git a/src/ml/ml_trainer_manager.h b/src/ml/ml_trainer_manager.h index 72f800b4..15de4f8b 100644 --- a/src/ml/ml_trainer_manager.h +++ b/src/ml/ml_trainer_manager.h @@ -21,6 +21,8 @@ #include "common/platform_result.h" +#include "ml_trainer_objects.h" + using common::PlatformResult; namespace extension { @@ -46,19 +48,23 @@ class TrainerManager { PlatformResult ModelSave(int id, const std::string& path, ml_train_model_format_e format); + PlatformResult ModelDispose(int id); PlatformResult CreateLayer(int& id, ml_train_layer_type_e type); PlatformResult LayerSetProperty(int id, const std::string& name, const std::string& value); + PlatformResult LayerDispose(int id); PlatformResult CreateOptimizer(int& id, ml_train_optimizer_type_e type); PlatformResult OptimizerSetProperty(int id, const std::string& name, const std::string& value); + PlatformResult OptimizerDispose(int id); PlatformResult CreateFileDataset(int& id, const std::string train_file, const std::string valid_file, const std::string test_file); PlatformResult DatasetSetProperty(int id, const std::string& name, const std::string& value); + PlatformResult DatasetDispose(int id); private: int next_model_id_ = 0; @@ -66,10 +72,10 @@ class TrainerManager { int next_optimizer_id_ = 0; int next_dataset_id_ = 0; - std::map models_; - std::map optimizers_; - std::map layers_; - std::map datasets_; + std::map> models_; + std::map>> optimizers_; + std::map>> layers_; + std::map>> datasets_; }; } // namespace ml diff --git a/src/ml/ml_trainer_objects.cc b/src/ml/ml_trainer_objects.cc new file mode 100644 index 00000000..9a9aa32a --- /dev/null +++ b/src/ml/ml_trainer_objects.cc @@ -0,0 +1,28 @@ +#include "ml_trainer_objects.h" + +namespace extension { +namespace ml { +Model::Model() { + optimizerIndex = -1; + datasetIndex = -1; + compiled = false; +} + +Model::Model(ml_train_model_h native_handle) : Model() { + native = native_handle; +} + +ml_train_model_h Model::getNative() { + return native; +} + +void Model::setCompiled(bool flag) { + compiled = flag; +} + +bool Model::isCompiled() { + return compiled; +} + +} // namespace ml +} // namespace extension diff --git a/src/ml/ml_trainer_objects.h b/src/ml/ml_trainer_objects.h new file mode 100644 index 00000000..e826369e --- /dev/null +++ b/src/ml/ml_trainer_objects.h @@ -0,0 +1,84 @@ +#ifndef ML_ML_TRAINER_OBJECTS_H_ +#define ML_ML_TRAINER_OBJECTS_H_ + +/** +This file contains helper structs that allows to deal with relations between +objects and handle error from native API as they are not properly reported: + +* layer, optimizer and dataset added to model cannot be destroyed directly - +they will be destroyed on model destroy + +* layer added to model still can be modified using _set_property so should be +still available in trainer manager + +* when destroying model manager has to remove stored references to related +dataset, optimizer and layers + +* model after compilation cannot be modified (ex. by adding new layer) + +* model cannot be trained (run() can't be called) without prior compilation, +as this returns error. +If compile() and run() are called again after mentioned error, +then the C library explodes. + +**/ + +#include + +#include + +namespace extension { +namespace ml { + +static const int INVALID_INDEX = -1; + +class Model { + ml_train_model_h native; + bool compiled; + + Model(); + + public: + Model(ml_train_model_h nativeHandler); + Model& operator=(const Model&) = delete; + + ml_train_model_h getNative(); + + void setCompiled(bool flag); + bool isCompiled(); + + /* + Fields below are not encapsulated (made public) intentionaly. + They are accessed (read/write) mutiple times by functions from + higher layer (ml_trainer_manager.cc). + + Changing access to getter/setter will cause additional function calls without + any benefit (this object is used only internally in plugin) + + */ + int datasetIndex; + int optimizerIndex; + std::vector layerIndices; +}; + +template +class NativeWrapper { + T native; + bool attached; + NativeWrapper() { attached = false; } + + public: + NativeWrapper(T nativeHandle) : NativeWrapper() { native = nativeHandle; } + T& operator=(const T&) = delete; + + T getNative() { return native; } + + void setAttached(bool flag) { attached = flag; } + + bool isAttached() { return attached; } +}; + +} // namespace ml +} // namespace extension + +#endif // ML_ML_TRAINER_OBJECTS_H_