return result.summary
};
+/*
+Private method used for verification of training results.
+It returns true if the results match given values (with tolerance 1.0e-5), false otherwise.
+*/
+Model.prototype._checkMetrics = function (trainLoss, validLoss, validAccuracy) {
+ var callArgs = {
+ trainLoss: trainLoss, validLoss: validLoss, validAccuracy: validAccuracy,
+ id: this._id
+ }
+
+ var result = native_.callSync('MLTrainerModelCheckMetrics', callArgs);
+
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ ValidBasicExceptions,
+ AbortError
+ );
+ }
+
+ return result.result
+};
+
+
var ValidModelSaveExceptions = [
'InvalidValuesError',
'TypeMismatchError',
REGISTER_METHOD(MLTrainerModelAddLayer);
REGISTER_METHOD(MLTrainerModelRun);
REGISTER_METHOD(MLTrainerModelSummarize);
+ REGISTER_METHOD(MLTrainerModelCheckMetrics);
REGISTER_METHOD(MLTrainerModelSave);
REGISTER_METHOD(MLTrainerModelSetDataset);
REGISTER_METHOD(MLTrainerModelSetOptimizer);
ReportSuccess(out);
}
+void MlInstance::MLTrainerModelCheckMetrics(const picojson::value& args,
+ picojson::object& out) {
+ const std::string kTrainLoss = "trainLoss";
+ const std::string kValidLoss = "validLoss";
+ const std::string kValidAccuracy = "validAccuracy";
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kId, double, out);
+ CHECK_ARGS(args, kTrainLoss, double, out);
+ CHECK_ARGS(args, kValidLoss, double, out);
+ CHECK_ARGS(args, kValidAccuracy, double, out);
+
+ auto id = static_cast<int>(args.get(kId).get<double>());
+ auto train_loss = args.get(kTrainLoss).get<double>();
+ auto valid_loss = args.get(kValidLoss).get<double>();
+ auto valid_accuracy = args.get(kValidAccuracy).get<double>();
+
+ bool as_expected = false;
+ PlatformResult result = trainer_manager_.CheckMetrics(
+ id, train_loss, valid_loss, valid_accuracy, &as_expected);
+ if (!result) {
+ ReportError(result, &out);
+ return;
+ }
+
+ ReportSuccess(picojson::value(as_expected), out);
+}
+
void MlInstance::MLTrainerModelSave(const picojson::value& args,
picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
void MLTrainerModelAddLayer(const picojson::value& args, picojson::object& out);
void MLTrainerModelRun(const picojson::value& args, picojson::object& out);
void MLTrainerModelSummarize(const picojson::value& args, picojson::object& out);
+ void MLTrainerModelCheckMetrics(const picojson::value& args,
+ picojson::object& out);
void MLTrainerModelSave(const picojson::value& args, picojson::object& out);
void MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out);
void MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out);
return PlatformResult();
}
-PlatformResult TrainerManager::ModelSave(int id,
- const std::string& path,
+// These values are defined in nntrainer tests
+// nntrainer/include/nntrainer_test_util.h file. These defines are just copied
+// because mentioned header is not public.
+#define ML_TRAIN_SUMMARY_MODEL_TRAIN_LOSS 101
+#define ML_TRAIN_SUMMARY_MODEL_VALID_LOSS 102
+#define ML_TRAIN_SUMMARY_MODEL_VALID_ACCURACY 103
+
+PlatformResult TrainerManager::CheckMetrics(int id, double train_loss,
+ double valid_loss,
+ double valid_accuracy, bool* res) {
+ ScopeLogger("Expected train_loss %f, valid_loss: %f, valid_accuracy: %f",
+ train_loss, valid_loss, valid_accuracy);
+ const double tolerance = 1.0e-5;
+
+ if (models_.find(id) == models_.end()) {
+ LoggerE("Could not find model with id: %d", id);
+ return PlatformResult(ErrorCode::ABORT_ERR, "Could not find model");
+ }
+
+ auto& model = models_[id];
+
+ int status = ML_ERROR_NONE;
+ char *summary1 = nullptr, *summary2 = nullptr, *summary3 = nullptr;
+ status = ml_train_model_get_summary(
+ model->getNative(),
+ (ml_train_summary_type_e)ML_TRAIN_SUMMARY_MODEL_TRAIN_LOSS, &summary1);
+ if (status != ML_ERROR_NONE) {
+ LoggerE("Could not get summary for model: %d (%s)", status,
+ ml_strerror(status));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(status));
+ }
+ double train_loss_real = std::strtof(summary1, nullptr);
+ LoggerE("Train loss value: %f", train_loss_real);
+ bool result = (train_loss_real - train_loss) < tolerance;
+ free(summary1);
+
+ status = ml_train_model_get_summary(
+ model->getNative(),
+ (ml_train_summary_type_e)ML_TRAIN_SUMMARY_MODEL_VALID_LOSS, &summary2);
+ if (status != ML_ERROR_NONE) {
+ LoggerE("Could not get summary for model: %d (%s)", status,
+ ml_strerror(status));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(status));
+ }
+ double valid_loss_real = std::strtof(summary2, nullptr);
+ LoggerE("Valid loss value: %f", valid_loss_real);
+ result = result && ((valid_loss_real - valid_loss) < tolerance);
+ free(summary2);
+
+ status = ml_train_model_get_summary(
+ model->getNative(),
+ (ml_train_summary_type_e)ML_TRAIN_SUMMARY_MODEL_VALID_ACCURACY,
+ &summary3);
+ if (status != ML_ERROR_NONE) {
+ LoggerE("Could not get summary for model: %d (%s)", status,
+ ml_strerror(status));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(status));
+ }
+ double valid_accuracy_real = std::strtof(summary3, nullptr);
+ LoggerE("Valid accuracy value: %f", valid_accuracy_real);
+ result = result && ((valid_accuracy_real - valid_accuracy) < tolerance);
+ free(summary3);
+
+ *res = result;
+ return PlatformResult();
+};
+
+PlatformResult TrainerManager::ModelSave(int id, const std::string& path,
ml_train_model_format_e format) {
ScopeLogger();
PlatformResult ModelSetDataset(int id, int datasetId);
PlatformResult ModelSummarize(int id, ml_train_summary_type_e level,
std::string& summary);
+ PlatformResult CheckMetrics(int id, double train_loss, double valid_loss,
+ double valid_accuracy, bool* result);
PlatformResult ModelSave(int id,
const std::string& path,
ml_train_model_format_e format);