[ML] Train - added a test method for trained model 88/270888/4
authorPiotr Kosko/Tizen API (PLT) /SRPOL/Engineer/Samsung Electronics <p.kosko@samsung.com>
Thu, 10 Feb 2022 14:36:51 +0000 (15:36 +0100)
committerPiotr Kosko/Tizen API (PLT) /SRPOL/Engineer/Samsung Electronics <p.kosko@samsung.com>
Fri, 18 Feb 2022 07:19:19 +0000 (08:19 +0100)
[ACR] https://code.sec.samsung.net/jira/browse/TWDAPI-285

[Verification] Code compiles without errors.
Verification method is available in JS console.

var trainsetFile = "documents/trainingSet.dat";
var validsetFile = "documents/valSet.dat";
// TODO should support virtual roots
var outputFile = "/home/owner/media/Documents/webapi_tizen_model.bin"
var m = tizen.ml.trainer.createModel()

var l1 = tizen.ml.trainer.createLayer("LAYER_IN")
l1.setProperty("input_shape", "1:1:62720")
l1.setProperty("normalization", "true")
l1.setProperty("name", "inputlayer")
m.addLayer(l1)

var l2 = tizen.ml.trainer.createLayer("LAYER_FC")
l2.setProperty("unit", "10")
l2.setProperty("activation", "softmax")
l2.setProperty("bias_initializer", "zeros")
l2.setProperty("weight_regularizer", "l2norm")
l2.setProperty("weight_regularizer_constant", "0.005")
l2.setProperty("weight_initializer", "xavier_uniform")
l2.setProperty("name", "fc1")
l2.setProperty("input_layers", "inputlayer")
m.addLayer(l2)

var opt = tizen.ml.trainer.createOptimizer("OPTIMIZER_ADAM")
opt.setProperty("learning_rate", "0.0001")
opt.setProperty("decay_rate", "0.96")
opt.setProperty("decay_steps", "1000")
opt.setProperty("beta1", "0.002")
opt.setProperty("beta2", "0.001")
opt.setProperty("epsilon", "1e-7")
m.setOptimizer(opt);

var dataset = tizen.ml.trainer.createFileDataset(trainsetFile, validsetFile, /*no test file*/);
dataset.setProperty("buffer_size", "100", "MODE_TRAIN");
dataset.setProperty("buffer_size", "100", "MODE_VALID");
m.setDataset(dataset);

var compileOpts = {
    loss: "cross", batch_size: "16"
}
m.compile(compileOpts);

var runOpts = {
    epochs: "2", save_path: outputFile
}
m.run(runOpts, (s) => {
    console.log("success");
    console.log("Test result: " + m._checkMetrics(2.163000, 2.267410, 16.666700));
}, (e) => console.log("error " + JSON.stringify(e)));

Change-Id: I4760fe341f58f84c985c6e4e4b609bafe36fb4be

src/ml/js/ml_trainer.js
src/ml/ml_instance.cc
src/ml/ml_instance.h
src/ml/ml_trainer_manager.cc
src/ml/ml_trainer_manager.h

index 4c3a841..f1e0712 100755 (executable)
@@ -430,6 +430,30 @@ Model.prototype.summarize = function() {
     return result.summary
 };
 
+/*
+Private method used for verification of training results.
+It returns true if the results match given values (with tolerance 1.0e-5), false otherwise.
+*/
+Model.prototype._checkMetrics = function (trainLoss, validLoss, validAccuracy) {
+    var callArgs = {
+        trainLoss: trainLoss, validLoss: validLoss, validAccuracy: validAccuracy,
+        id: this._id
+    }
+
+    var result = native_.callSync('MLTrainerModelCheckMetrics', callArgs);
+
+    if (native_.isFailure(result)) {
+        throw native_.getErrorObjectAndValidate(
+            result,
+            ValidBasicExceptions,
+            AbortError
+        );
+    }
+
+    return result.result
+};
+
+
 var ValidModelSaveExceptions = [
     'InvalidValuesError',
     'TypeMismatchError',
index 250eced..bfff53d 100644 (file)
@@ -193,6 +193,7 @@ MlInstance::MlInstance()
   REGISTER_METHOD(MLTrainerModelAddLayer);
   REGISTER_METHOD(MLTrainerModelRun);
   REGISTER_METHOD(MLTrainerModelSummarize);
+  REGISTER_METHOD(MLTrainerModelCheckMetrics);
   REGISTER_METHOD(MLTrainerModelSave);
   REGISTER_METHOD(MLTrainerModelSetDataset);
   REGISTER_METHOD(MLTrainerModelSetOptimizer);
@@ -1952,6 +1953,33 @@ void MlInstance::MLTrainerModelSummarize(const picojson::value& args, picojson::
   ReportSuccess(out);
 }
 
+void MlInstance::MLTrainerModelCheckMetrics(const picojson::value& args,
+                                            picojson::object& out) {
+  const std::string kTrainLoss = "trainLoss";
+  const std::string kValidLoss = "validLoss";
+  const std::string kValidAccuracy = "validAccuracy";
+  ScopeLogger("args: %s", args.serialize().c_str());
+  CHECK_ARGS(args, kId, double, out);
+  CHECK_ARGS(args, kTrainLoss, double, out);
+  CHECK_ARGS(args, kValidLoss, double, out);
+  CHECK_ARGS(args, kValidAccuracy, double, out);
+
+  auto id = static_cast<int>(args.get(kId).get<double>());
+  auto train_loss = args.get(kTrainLoss).get<double>();
+  auto valid_loss = args.get(kValidLoss).get<double>();
+  auto valid_accuracy = args.get(kValidAccuracy).get<double>();
+
+  bool as_expected = false;
+  PlatformResult result = trainer_manager_.CheckMetrics(
+      id, train_loss, valid_loss, valid_accuracy, &as_expected);
+  if (!result) {
+    ReportError(result, &out);
+    return;
+  }
+
+  ReportSuccess(picojson::value(as_expected), out);
+}
+
 void MlInstance::MLTrainerModelSave(const picojson::value& args,
                                     picojson::object& out) {
   ScopeLogger("args: %s", args.serialize().c_str());
index cfec0a7..5e3095d 100644 (file)
@@ -166,6 +166,8 @@ class MlInstance : public common::ParsedInstance {
   void MLTrainerModelAddLayer(const picojson::value& args, picojson::object& out);
   void MLTrainerModelRun(const picojson::value& args, picojson::object& out);
   void MLTrainerModelSummarize(const picojson::value& args, picojson::object& out);
+  void MLTrainerModelCheckMetrics(const picojson::value& args,
+                                  picojson::object& out);
   void MLTrainerModelSave(const picojson::value& args, picojson::object& out);
   void MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out);
   void MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out);
index 00795d5..d819d56 100644 (file)
@@ -334,8 +334,74 @@ PlatformResult TrainerManager::ModelSummarize(int id,
   return PlatformResult();
 }
 
-PlatformResult TrainerManager::ModelSave(int id,
-                                         const std::string& path,
+// These values are defined in nntrainer tests
+// nntrainer/include/nntrainer_test_util.h file. These defines are just copied
+// because mentioned header is not public.
+#define ML_TRAIN_SUMMARY_MODEL_TRAIN_LOSS 101
+#define ML_TRAIN_SUMMARY_MODEL_VALID_LOSS 102
+#define ML_TRAIN_SUMMARY_MODEL_VALID_ACCURACY 103
+
+PlatformResult TrainerManager::CheckMetrics(int id, double train_loss,
+                                            double valid_loss,
+                                            double valid_accuracy, bool* res) {
+  ScopeLogger("Expected train_loss %f, valid_loss: %f, valid_accuracy: %f",
+              train_loss, valid_loss, valid_accuracy);
+  const double tolerance = 1.0e-5;
+
+  if (models_.find(id) == models_.end()) {
+    LoggerE("Could not find model with id: %d", id);
+    return PlatformResult(ErrorCode::ABORT_ERR, "Could not find model");
+  }
+
+  auto& model = models_[id];
+
+  int status = ML_ERROR_NONE;
+  char *summary1 = nullptr, *summary2 = nullptr, *summary3 = nullptr;
+  status = ml_train_model_get_summary(
+      model->getNative(),
+      (ml_train_summary_type_e)ML_TRAIN_SUMMARY_MODEL_TRAIN_LOSS, &summary1);
+  if (status != ML_ERROR_NONE) {
+    LoggerE("Could not get summary for model: %d (%s)", status,
+            ml_strerror(status));
+    return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(status));
+  }
+  double train_loss_real = std::strtof(summary1, nullptr);
+  LoggerE("Train loss value: %f", train_loss_real);
+  bool result = (train_loss_real - train_loss) < tolerance;
+  free(summary1);
+
+  status = ml_train_model_get_summary(
+      model->getNative(),
+      (ml_train_summary_type_e)ML_TRAIN_SUMMARY_MODEL_VALID_LOSS, &summary2);
+  if (status != ML_ERROR_NONE) {
+    LoggerE("Could not get summary for model: %d (%s)", status,
+            ml_strerror(status));
+    return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(status));
+  }
+  double valid_loss_real = std::strtof(summary2, nullptr);
+  LoggerE("Valid loss value: %f", valid_loss_real);
+  result = result && ((valid_loss_real - valid_loss) < tolerance);
+  free(summary2);
+
+  status = ml_train_model_get_summary(
+      model->getNative(),
+      (ml_train_summary_type_e)ML_TRAIN_SUMMARY_MODEL_VALID_ACCURACY,
+      &summary3);
+  if (status != ML_ERROR_NONE) {
+    LoggerE("Could not get summary for model: %d (%s)", status,
+            ml_strerror(status));
+    return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(status));
+  }
+  double valid_accuracy_real = std::strtof(summary3, nullptr);
+  LoggerE("Valid accuracy value: %f", valid_accuracy_real);
+  result = result && ((valid_accuracy_real - valid_accuracy) < tolerance);
+  free(summary3);
+
+  *res = result;
+  return PlatformResult();
+};
+
+PlatformResult TrainerManager::ModelSave(int id, const std::string& path,
                                          ml_train_model_format_e format) {
   ScopeLogger();
 
index 4e38c5a..efd2fca 100644 (file)
@@ -45,6 +45,8 @@ class TrainerManager {
   PlatformResult ModelSetDataset(int id, int datasetId);
   PlatformResult ModelSummarize(int id, ml_train_summary_type_e level,
                                 std::string& summary);
+  PlatformResult CheckMetrics(int id, double train_loss, double valid_loss,
+                              double valid_accuracy, bool* result);
   PlatformResult ModelSave(int id,
                            const std::string& path,
                            ml_train_model_format_e format);