2 * Copyright (C) 2020 Samsung Electronics Co., Ltd. All Rights Reserved.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 * http://www.apache.org/licenses/LICENSE-2.0
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
17 * @brief NNTrainer C-API Wrapper.
18 * This allows to construct and control NNTrainer Model.
19 * @see https://github.com/nnstreamer/nntrainer
20 * @author Jijoong Moon <jijoong.moon@samsung.com>
21 * @author Parichay Kapoor <pk.kapoor@samsung.com>
22 * @bug No known bugs except for NYI items
28 #include <nntrainer.h>
29 #include <nntrainer_internal.h>
33 #include <nntrainer_error.h>
34 #include <nntrainer_log.h>
37 * @brief Global lock for nntrainer C-API
38 * @details This lock ensures that ml_train_model_destroy is thread safe. All
39 * other API functions use the mutex from their object handle. However
40 * for destroy, object mutex cannot be used as their handles are
41 * destroyed at destroy.
46 * @brief Adopt the lock to the current scope for the object
48 #define ML_TRAIN_ADOPT_LOCK(obj, obj_lock) \
49 std::lock_guard<std::mutex> obj_lock(obj->m, std::adopt_lock)
52 * @brief function to wrap an exception to predefined error value
53 * @param[in] func must be wrapped inside lambda []() -> int otherwise compile
54 * error will be raised
57 template <typename F> static int nntrainer_exception_boundary(F &&func) {
58 int status = ML_ERROR_NONE;
60 /**< Exception boundary for cpp exceptions */
61 /// @note aware that some exception are inheritance of others so should be
62 /// caught before than some
65 } catch (nntrainer::exception::not_supported &e) {
66 ml_loge("%s %s", typeid(e).name(), e.what());
67 return ML_ERROR_INVALID_PARAMETER;
68 } catch (nntrainer::exception::permission_denied &e) {
69 ml_loge("%s %s", typeid(e).name(), e.what());
70 return ML_ERROR_PERMISSION_DENIED;
71 } catch (std::invalid_argument &e) {
72 ml_loge("%s %s", typeid(e).name(), e.what());
73 return ML_ERROR_INVALID_PARAMETER;
74 } catch (std::range_error &e) {
75 ml_loge("%s %s", typeid(e).name(), e.what());
76 return ML_ERROR_INVALID_PARAMETER;
77 } catch (std::out_of_range &e) {
78 ml_loge("%s %s", typeid(e).name(), e.what());
79 return ML_ERROR_INVALID_PARAMETER;
80 } catch (std::logic_error &e) {
81 ml_loge("%s %s", typeid(e).name(), e.what());
82 return ML_ERROR_INVALID_PARAMETER;
83 } catch (std::bad_alloc &e) {
84 ml_loge("%s %s", typeid(e).name(), e.what());
85 return ML_ERROR_OUT_OF_MEMORY;
86 } catch (std::exception &e) {
87 ml_loge("%s %s", typeid(e).name(), e.what());
88 return ML_ERROR_UNKNOWN;
90 ml_loge("unknown error type thrown");
91 return ML_ERROR_UNKNOWN;
94 /**< Exception boundary for specialized error code */
95 /// @todo deprecate this with #233
97 case ML_ERROR_BAD_ADDRESS:
98 return ML_ERROR_OUT_OF_MEMORY;
99 case ML_ERROR_RESULT_OUT_OF_RANGE:
100 return ML_ERROR_INVALID_PARAMETER;
106 typedef std::function<int()> returnable;
109 * @brief std::make_shared wrapped with exception boundary
111 * @tparam Tv value type.
112 * @tparam Tp pointer type.
113 * @tparam Types args used to construct
114 * @param target pointer
116 * @return int error value. ML_ERROR_OUT_OF_MEMORY if fail
118 template <typename Tv, typename Tp, typename... Types>
119 static int exception_bounded_make_shared(Tp &target, Types... args) {
120 returnable f = [&]() {
121 target = std::make_shared<Tv>(args...);
122 return ML_ERROR_NONE;
125 return nntrainer_exception_boundary(f);
129 * @brief Create dataset with different types of train/test/valid data source
130 * @param[in] dataset dataset object to be created
131 * @param[in] type type of the dataset
132 * @param[in] train training data source
133 * @param[in] valid validation data source
134 * @param[in] test testing data source
136 template <typename T>
137 static int ml_train_dataset_create(ml_train_dataset_h *dataset,
138 ml::train::DatasetType type, T train,
140 int status = ML_ERROR_NONE;
142 check_feature_state();
143 if (dataset == NULL) {
144 return ML_ERROR_INVALID_PARAMETER;
147 ml_train_dataset *nndataset = new ml_train_dataset;
148 nndataset->magic = ML_NNTRAINER_MAGIC;
149 nndataset->in_use = false;
151 returnable f = [&]() {
152 if (train != nullptr) {
153 nndataset->dataset[ML_TRAIN_DATASET_MODE_TRAIN] =
154 ml::train::createDataset(type, train);
156 if (valid != nullptr) {
157 nndataset->dataset[ML_TRAIN_DATASET_MODE_VALID] =
158 ml::train::createDataset(type, valid);
160 if (test != nullptr) {
161 nndataset->dataset[ML_TRAIN_DATASET_MODE_TEST] =
162 ml::train::createDataset(type, test);
164 return ML_ERROR_NONE;
167 status = nntrainer_exception_boundary(f);
168 if (status != ML_ERROR_NONE) {
170 ml_loge("Error: Create dataset failed");
172 *dataset = nndataset;
179 * @brief add ml::train::Dataset to @a dataset
181 * @tparam Args args needed to create the dataset
182 * @param dataset dataset handle
183 * @param mode target mode
184 * @param type dataset type
185 * @param args args needed to create the dataset
186 * @retval #ML_ERROR_NONE Successful
187 * @retval #ML_ERROR_INVALID_PARAMETER if parameter is invalid
189 template <typename... Args>
190 static int ml_train_dataset_add_(ml_train_dataset_h dataset,
191 ml_train_dataset_mode_e mode,
192 ml::train::DatasetType type, Args &&... args) {
193 check_feature_state();
194 std::shared_ptr<ml::train::Dataset> underlying_dataset;
196 returnable f = [&]() {
198 ml::train::createDataset(type, std::forward<Args>(args)...);
199 return ML_ERROR_NONE;
202 int status = nntrainer_exception_boundary(f);
203 if (status != ML_ERROR_NONE) {
204 ml_loge("Failed to create dataset");
208 if (underlying_dataset == nullptr) {
209 return ML_ERROR_INVALID_PARAMETER;
212 ml_train_dataset *nndataset;
213 ML_TRAIN_VERIFY_VALID_HANDLE(dataset);
216 ML_TRAIN_GET_VALID_DATASET_LOCKED(nndataset, dataset);
217 ML_TRAIN_ADOPT_LOCK(nndataset, dataset_lock);
219 nndataset->dataset[mode] = underlying_dataset;
229 * @brief Function to create ml::train::Model object.
231 static int nn_object(ml_train_model_h *model) {
232 int status = ML_ERROR_NONE;
235 return ML_ERROR_INVALID_PARAMETER;
237 ml_train_model *nnmodel = new ml_train_model;
238 nnmodel->magic = ML_NNTRAINER_MAGIC;
239 nnmodel->optimizer = NULL;
240 nnmodel->dataset = NULL;
244 returnable f = [&]() {
245 nnmodel->model = ml::train::createModel(ml::train::ModelType::NEURAL_NET);
246 return ML_ERROR_NONE;
249 status = nntrainer_exception_boundary(f);
250 if (status != ML_ERROR_NONE) {
252 ml_loge("Error: creating nn object failed");
258 int ml_train_model_construct(ml_train_model_h *model) {
259 int status = ML_ERROR_NONE;
261 check_feature_state();
263 returnable f = [&]() { return nn_object(model); };
265 status = nntrainer_exception_boundary(f);
269 int ml_train_model_construct_with_conf(const char *model_conf,
270 ml_train_model_h *model) {
271 int status = ML_ERROR_NONE;
272 ml_train_model *nnmodel;
273 std::shared_ptr<ml::train::Model> m;
276 status = ml_train_model_construct(model);
277 if (status != ML_ERROR_NONE)
280 nnmodel = (ml_train_model *)(*model);
283 f = [&]() { return m->loadFromConfig(model_conf); };
284 status = nntrainer_exception_boundary(f);
285 if (status != ML_ERROR_NONE) {
286 ml_train_model_destroy(*model);
292 int ml_train_model_compile(ml_train_model_h model, ...) {
293 int status = ML_ERROR_NONE;
295 ml_train_model *nnmodel;
297 std::shared_ptr<ml::train::Model> m;
299 check_feature_state();
301 ML_TRAIN_VERIFY_VALID_HANDLE(model);
303 std::vector<std::string> arg_list;
305 va_start(arguments, model);
307 while ((data = va_arg(arguments, const char *))) {
308 arg_list.push_back(data);
313 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
314 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
319 m->setProperty(arg_list);
320 return ML_ERROR_NONE;
322 status = nntrainer_exception_boundary(f);
323 if (status != ML_ERROR_NONE)
326 f = [&]() { return m->compile(); };
327 status = nntrainer_exception_boundary(f);
328 if (status != ML_ERROR_NONE)
331 f = [&]() { return m->initialize(); };
332 status = nntrainer_exception_boundary(f);
333 if (status != ML_ERROR_NONE)
339 int ml_train_model_compile_with_single_param(ml_train_model_h model,
340 const char *single_param) {
341 ML_TRAIN_VERIFY_VALID_HANDLE(model);
343 return ml_train_model_compile(model, single_param, NULL);
346 int ml_train_model_run(ml_train_model_h model, ...) {
347 int status = ML_ERROR_NONE;
348 ml_train_model *nnmodel;
350 std::shared_ptr<ml::train::Model> m;
352 check_feature_state();
354 ML_TRAIN_VERIFY_VALID_HANDLE(model);
356 std::vector<std::string> arg_list;
358 va_start(arguments, model);
360 while ((data = va_arg(arguments, const char *))) {
361 arg_list.push_back(data);
367 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
368 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
372 returnable f = [&]() { return m->train(arg_list); };
373 status = nntrainer_exception_boundary(f);
378 int ml_train_model_run_with_single_param(ml_train_model_h model,
379 const char *single_param) {
380 ML_TRAIN_VERIFY_VALID_HANDLE(model);
382 return ml_train_model_run(model, single_param, NULL);
385 int ml_train_model_destroy(ml_train_model_h model) {
386 int status = ML_ERROR_NONE;
387 ml_train_model *nnmodel;
389 check_feature_state();
392 ML_TRAIN_GET_VALID_MODEL_LOCKED_RESET(nnmodel, model);
393 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
396 if (nnmodel->optimizer) {
397 if (nnmodel->optimizer->lr_sheduler) {
398 ML_TRAIN_RESET_VALIDATED_HANDLE(nnmodel->optimizer->lr_sheduler);
399 delete nnmodel->optimizer->lr_sheduler;
402 ML_TRAIN_RESET_VALIDATED_HANDLE(nnmodel->optimizer);
403 delete nnmodel->optimizer;
406 if (nnmodel->dataset) {
407 ML_TRAIN_RESET_VALIDATED_HANDLE(nnmodel->dataset);
408 delete nnmodel->dataset;
411 for (auto &x : nnmodel->layers_map) {
412 ML_TRAIN_RESET_VALIDATED_HANDLE(x.second);
415 nnmodel->layers_map.clear();
422 static int ml_train_model_get_summary_util(ml_train_model_h model,
423 ml_train_summary_type_e verbosity,
424 std::stringstream &ss) {
425 int status = ML_ERROR_NONE;
426 ml_train_model *nnmodel;
427 std::shared_ptr<ml::train::Model> m;
430 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
431 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
436 returnable f = [&]() {
437 m->summarize(ss, verbosity);
438 return ML_ERROR_NONE;
441 status = nntrainer_exception_boundary(f);
445 int ml_train_model_get_summary(ml_train_model_h model,
446 ml_train_summary_type_e verbosity,
448 int status = ML_ERROR_NONE;
449 std::stringstream ss;
451 check_feature_state();
453 ML_TRAIN_VERIFY_VALID_HANDLE(model);
455 if (summary == nullptr) {
456 ml_loge("summary pointer is null");
457 return ML_ERROR_INVALID_PARAMETER;
460 status = ml_train_model_get_summary_util(model, verbosity, ss);
461 if (status != ML_ERROR_NONE) {
462 ml_loge("failed make a summary: %d", status);
466 std::string str = ss.str();
467 const std::string::size_type size = str.size();
470 ml_logw("summary is empty for the model!");
473 *summary = (char *)malloc((size + 1) * sizeof(char));
474 if (*summary == nullptr) {
475 ml_loge("failed to malloc");
476 return ML_ERROR_OUT_OF_MEMORY;
478 std::memcpy(*summary, str.c_str(), size + 1);
483 int ml_train_model_add_layer(ml_train_model_h model, ml_train_layer_h layer) {
484 int status = ML_ERROR_NONE;
485 ml_train_model *nnmodel;
486 ml_train_layer *nnlayer;
488 check_feature_state();
490 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
491 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
492 ML_TRAIN_GET_VALID_LAYER_LOCKED(nnlayer, layer);
493 ML_TRAIN_ADOPT_LOCK(nnlayer, layer_lock);
495 if (nnlayer->in_use) {
496 ml_loge("Layer already in use.");
497 return ML_ERROR_INVALID_PARAMETER;
500 std::shared_ptr<ml::train::Model> m;
501 std::shared_ptr<ml::train::Layer> l;
506 if (nnmodel->layers_map.count(l->getName())) {
507 ml_loge("It is not allowed to add layer with same name: %s",
508 l->getName().c_str());
509 return ML_ERROR_INVALID_PARAMETER;
512 returnable f = [&]() { return m->addLayer(l); };
514 status = nntrainer_exception_boundary(f);
515 if (status != ML_ERROR_NONE)
518 nnmodel->layers_map.insert({l->getName(), nnlayer});
519 nnlayer->in_use = true;
523 int ml_train_model_set_optimizer(ml_train_model_h model,
524 ml_train_optimizer_h optimizer) {
525 int status = ML_ERROR_NONE;
526 ml_train_model *nnmodel;
527 ml_train_optimizer *nnopt;
529 check_feature_state();
531 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
532 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
533 ML_TRAIN_GET_VALID_OPT_LOCKED(nnopt, optimizer);
534 ML_TRAIN_ADOPT_LOCK(nnopt, opt_lock);
537 ml_loge("Optimizer already in use.");
538 return ML_ERROR_INVALID_PARAMETER;
541 std::shared_ptr<ml::train::Model> m;
542 std::shared_ptr<ml::train::Optimizer> opt;
545 opt = nnopt->optimizer;
547 returnable f = [&]() { return m->setOptimizer(opt); };
549 status = nntrainer_exception_boundary(f);
550 if (status == ML_ERROR_NONE) {
551 nnopt->in_use = true;
552 if (nnmodel->optimizer) {
553 nnmodel->optimizer->in_use = false;
555 nnmodel->optimizer = nnopt;
561 int ml_train_model_set_dataset(ml_train_model_h model,
562 ml_train_dataset_h dataset) {
563 int status = ML_ERROR_NONE;
564 ml_train_model *nnmodel;
565 ml_train_dataset *nndataset;
567 check_feature_state();
569 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
570 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
571 ML_TRAIN_GET_VALID_DATASET_LOCKED(nndataset, dataset);
572 ML_TRAIN_ADOPT_LOCK(nndataset, dataset_lock);
574 if (nndataset->in_use) {
575 ml_loge("Dataset already in use.");
576 return ML_ERROR_INVALID_PARAMETER;
579 std::shared_ptr<ml::train::Model> m;
583 returnable f = [&]() {
584 auto &[train_set, valid_set, test_set] = nndataset->dataset;
585 int status = ML_ERROR_NONE;
586 status = m->setDataset(ml::train::DatasetModeType::MODE_TRAIN, train_set);
587 if (status != ML_ERROR_NONE) {
591 if (valid_set != nullptr) {
592 status = m->setDataset(ml::train::DatasetModeType::MODE_VALID, valid_set);
593 if (status != ML_ERROR_NONE) {
598 if (test_set != nullptr) {
599 status = m->setDataset(ml::train::DatasetModeType::MODE_TEST, test_set);
600 if (status != ML_ERROR_NONE) {
607 status = nntrainer_exception_boundary(f);
608 if (status == ML_ERROR_NONE) {
609 nndataset->in_use = true;
610 if (nnmodel->dataset)
611 nnmodel->dataset->in_use = false;
612 nnmodel->dataset = nndataset;
618 int ml_train_model_get_layer(ml_train_model_h model, const char *layer_name,
619 ml_train_layer_h *layer) {
620 int status = ML_ERROR_NONE;
621 ml_train_model *nnmodel;
623 check_feature_state();
625 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
626 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
628 std::unordered_map<std::string, ml_train_layer *>::iterator layer_iter =
629 nnmodel->layers_map.find(std::string(layer_name));
630 /** if layer found in layers_map, return layer */
631 if (layer_iter != nnmodel->layers_map.end()) {
632 *layer = layer_iter->second;
637 * if layer not found in layers_map, get layer from model,
638 * wrap it in struct nnlayer, add new entry in layer_map and then return
640 std::shared_ptr<ml::train::Model> m;
641 std::shared_ptr<ml::train::Layer> l;
644 returnable f = [&]() { return m->getLayer(layer_name, &l); };
645 status = nntrainer_exception_boundary(f);
647 if (status != ML_ERROR_NONE)
650 ml_train_layer *nnlayer = new ml_train_layer;
651 nnlayer->magic = ML_NNTRAINER_MAGIC;
653 nnlayer->in_use = true;
654 nnmodel->layers_map.insert({l->getName(), nnlayer});
660 int ml_train_layer_create(ml_train_layer_h *layer, ml_train_layer_type_e type) {
661 int status = ML_ERROR_NONE;
662 ml_train_layer *nnlayer;
664 check_feature_state();
666 nnlayer = new ml_train_layer;
667 nnlayer->magic = ML_NNTRAINER_MAGIC;
668 nnlayer->in_use = false;
670 returnable f = [&]() {
671 nnlayer->layer = ml::train::createLayer((ml::train::LayerType)type);
672 return ML_ERROR_NONE;
675 status = nntrainer_exception_boundary(f);
676 if (status != ML_ERROR_NONE) {
678 ml_loge("Error: Create layer failed");
686 int ml_train_layer_destroy(ml_train_layer_h layer) {
687 int status = ML_ERROR_NONE;
688 ml_train_layer *nnlayer;
690 check_feature_state();
693 ML_TRAIN_GET_VALID_LAYER_LOCKED_RESET(nnlayer, layer);
694 ML_TRAIN_ADOPT_LOCK(nnlayer, layer_lock);
696 if (nnlayer->in_use) {
697 ml_loge("Cannot delete layer already added in a model."
698 "Delete model will delete this layer.");
699 return ML_ERROR_INVALID_PARAMETER;
708 int ml_train_layer_set_property(ml_train_layer_h layer, ...) {
709 int status = ML_ERROR_NONE;
710 ml_train_layer *nnlayer;
712 std::shared_ptr<ml::train::Layer> l;
714 check_feature_state();
716 ML_TRAIN_VERIFY_VALID_HANDLE(layer);
718 std::vector<std::string> arg_list;
720 va_start(arguments, layer);
722 while ((data = va_arg(arguments, const char *))) {
723 arg_list.push_back(data);
729 ML_TRAIN_GET_VALID_LAYER_LOCKED(nnlayer, layer);
730 ML_TRAIN_ADOPT_LOCK(nnlayer, layer_lock);
735 returnable f = [&]() {
736 l->setProperty(arg_list);
737 return ML_ERROR_NONE;
739 status = nntrainer_exception_boundary(f);
744 int ml_train_layer_set_property_with_single_param(ml_train_layer_h layer,
745 const char *single_param) {
746 ML_TRAIN_VERIFY_VALID_HANDLE(layer);
748 return ml_train_layer_set_property(layer, single_param, NULL);
751 int ml_train_optimizer_create(ml_train_optimizer_h *optimizer,
752 ml_train_optimizer_type_e type) {
753 int status = ML_ERROR_NONE;
755 check_feature_state();
757 ml_train_optimizer *nnopt = new ml_train_optimizer;
758 nnopt->magic = ML_NNTRAINER_MAGIC;
759 nnopt->in_use = false;
760 nnopt->lr_sheduler = NULL;
762 returnable f = [&]() {
764 ml::train::createOptimizer((ml::train::OptimizerType)type);
765 return ML_ERROR_NONE;
768 status = nntrainer_exception_boundary(f);
769 if (status != ML_ERROR_NONE) {
771 ml_loge("creating optimizer failed");
779 int ml_train_optimizer_destroy(ml_train_optimizer_h optimizer) {
780 int status = ML_ERROR_NONE;
781 ml_train_optimizer *nnopt;
783 check_feature_state();
786 ML_TRAIN_GET_VALID_OPT_LOCKED_RESET(nnopt, optimizer);
787 ML_TRAIN_ADOPT_LOCK(nnopt, optimizer_lock);
790 ml_loge("Cannot delete optimizer already set to a model."
791 "Delete model will delete this optimizer.");
792 return ML_ERROR_INVALID_PARAMETER;
795 if (nnopt->lr_sheduler) {
796 ML_TRAIN_RESET_VALIDATED_HANDLE(nnopt->lr_sheduler);
797 delete nnopt->lr_sheduler;
805 int ml_train_optimizer_set_property(ml_train_optimizer_h optimizer, ...) {
806 int status = ML_ERROR_NONE;
807 ml_train_optimizer *nnopt;
809 std::shared_ptr<ml::train::Optimizer> opt;
811 check_feature_state();
813 ML_TRAIN_VERIFY_VALID_HANDLE(optimizer);
815 std::vector<std::string> arg_list;
817 va_start(arguments, optimizer);
819 while ((data = va_arg(arguments, const char *))) {
820 arg_list.push_back(data);
826 ML_TRAIN_GET_VALID_OPT_LOCKED(nnopt, optimizer);
827 ML_TRAIN_ADOPT_LOCK(nnopt, optimizer_lock);
829 opt = nnopt->optimizer;
832 returnable f = [&]() {
833 opt->setProperty(arg_list);
834 return ML_ERROR_NONE;
837 status = nntrainer_exception_boundary(f);
842 int ml_train_optimizer_set_property_with_single_param(
843 ml_train_optimizer_h optimizer, const char *single_param) {
844 ML_TRAIN_VERIFY_VALID_HANDLE(optimizer);
846 return ml_train_optimizer_set_property(optimizer, single_param, NULL);
849 int ml_train_optimizer_set_lr_scheduler(ml_train_optimizer_h optimizer,
850 ml_train_lr_scheduler_h lr_scheduler) {
851 int status = ML_ERROR_NONE;
852 ml_train_optimizer *nnopt;
853 ml_train_lr_scheduler *nnlrscheduler;
855 check_feature_state();
857 ML_TRAIN_GET_VALID_OPT_LOCKED(nnopt, optimizer);
858 ML_TRAIN_ADOPT_LOCK(nnopt, opt_lock);
859 ML_TRAIN_GET_VALID_LR_SCHEDULER_LOCKED(nnlrscheduler, lr_scheduler);
860 ML_TRAIN_ADOPT_LOCK(nnlrscheduler, lr_scheduler_lock);
862 if (nnlrscheduler->in_use) {
863 ml_loge("learning rate scheduler already in use.");
864 return ML_ERROR_INVALID_PARAMETER;
867 std::shared_ptr<ml::train::Optimizer> opt;
868 std::shared_ptr<ml::train::LearningRateScheduler> lr_sched;
870 opt = nnopt->optimizer;
871 lr_sched = nnlrscheduler->lr_scheduler;
873 returnable f = [&]() { return opt->setLearningRateScheduler(lr_sched); };
875 status = nntrainer_exception_boundary(f);
876 if (status == ML_ERROR_NONE) {
877 nnlrscheduler->in_use = true;
878 if (nnopt->lr_sheduler) {
879 nnopt->lr_sheduler->in_use = false;
881 nnopt->lr_sheduler = nnlrscheduler;
887 int ml_train_lr_scheduler_create(ml_train_lr_scheduler_h *lr_scheduler,
888 ml_train_lr_scheduler_type_e type) {
889 int status = ML_ERROR_NONE;
891 check_feature_state();
893 ml_train_lr_scheduler *nnlrscheduler = new ml_train_lr_scheduler;
894 nnlrscheduler->magic = ML_NNTRAINER_MAGIC;
895 nnlrscheduler->in_use = false;
897 returnable f = [&]() {
898 nnlrscheduler->lr_scheduler = ml::train::createLearningRateScheduler(
899 (ml::train::LearningRateSchedulerType)type);
900 return ML_ERROR_NONE;
903 status = nntrainer_exception_boundary(f);
904 if (status != ML_ERROR_NONE) {
905 delete nnlrscheduler;
906 ml_loge("creating optimizer failed");
908 *lr_scheduler = nnlrscheduler;
914 int ml_train_lr_scheduler_destroy(ml_train_lr_scheduler_h lr_scheduler) {
915 int status = ML_ERROR_NONE;
916 ml_train_lr_scheduler *nnlrscheduler;
918 check_feature_state();
921 ML_TRAIN_GET_VALID_LR_SCHEDULER_LOCKED_RESET(nnlrscheduler, lr_scheduler);
922 ML_TRAIN_ADOPT_LOCK(nnlrscheduler, lr_scheduler_lock);
924 if (nnlrscheduler->in_use) {
926 "Cannot delete learning rate scheduler already set to a optimizer."
927 "Delete optimizer will delete this learning rate scheduler.");
928 return ML_ERROR_INVALID_PARAMETER;
932 delete nnlrscheduler;
936 int ml_train_lr_scheduler_set_property(ml_train_lr_scheduler_h lr_scheduler,
938 int status = ML_ERROR_NONE;
939 ml_train_lr_scheduler *nnlrscheduler;
941 std::shared_ptr<ml::train::LearningRateScheduler> lr_sched;
943 check_feature_state();
945 ML_TRAIN_VERIFY_VALID_HANDLE(lr_scheduler);
947 std::vector<std::string> arg_list;
949 va_start(arguments, lr_scheduler);
951 while ((data = va_arg(arguments, const char *))) {
952 arg_list.push_back(data);
958 ML_TRAIN_GET_VALID_LR_SCHEDULER_LOCKED(nnlrscheduler, lr_scheduler);
959 ML_TRAIN_ADOPT_LOCK(nnlrscheduler, lr_scheduler_lock);
961 lr_sched = nnlrscheduler->lr_scheduler;
964 returnable f = [&]() {
965 lr_sched->setProperty(arg_list);
966 return ML_ERROR_NONE;
969 status = nntrainer_exception_boundary(f);
974 int ml_train_lr_scheduler_set_property_with_single_param(
975 ml_train_lr_scheduler_h lr_scheduler, const char *single_param) {
976 ML_TRAIN_VERIFY_VALID_HANDLE(lr_scheduler);
978 return ml_train_lr_scheduler_set_property(lr_scheduler, single_param, NULL);
981 int ml_train_dataset_create(ml_train_dataset_h *dataset) {
982 return ml_train_dataset_create(dataset, ml::train::DatasetType::UNKNOWN,
983 nullptr, nullptr, nullptr);
986 int ml_train_dataset_add_generator(ml_train_dataset_h dataset,
987 ml_train_dataset_mode_e mode,
988 ml_train_datagen_cb cb, void *user_data) {
989 check_feature_state();
991 return ML_ERROR_INVALID_PARAMETER;
994 return ml_train_dataset_add_(dataset, mode, ml::train::DatasetType::GENERATOR,
998 int ml_train_dataset_add_file(ml_train_dataset_h dataset,
999 ml_train_dataset_mode_e mode, const char *file) {
1000 check_feature_state();
1001 if (file == nullptr) {
1002 return ML_ERROR_INVALID_PARAMETER;
1005 return ml_train_dataset_add_(dataset, mode, ml::train::DatasetType::FILE,
1009 int ml_train_dataset_create_with_generator(ml_train_dataset_h *dataset,
1010 ml_train_datagen_cb train_cb,
1011 ml_train_datagen_cb valid_cb,
1012 ml_train_datagen_cb test_cb) {
1013 if (train_cb == nullptr) {
1014 return ML_ERROR_INVALID_PARAMETER;
1016 return ml_train_dataset_create(dataset, ml::train::DatasetType::GENERATOR,
1017 train_cb, valid_cb, test_cb);
1020 int ml_train_dataset_create_with_file(ml_train_dataset_h *dataset,
1021 const char *train_file,
1022 const char *valid_file,
1023 const char *test_file) {
1024 if (train_file == nullptr) {
1025 return ML_ERROR_INVALID_PARAMETER;
1027 return ml_train_dataset_create(dataset, ml::train::DatasetType::FILE,
1028 train_file, valid_file, test_file);
1032 * @brief set property for the specific data mode, main difference from @a
1033 * ml_train_dataset_set_property_for_mode() is that this function returns @a
1034 * ML_ERROR_NOT_SUPPORTED if dataset does not exist.
1036 * @param[in] dataset dataset
1037 * @param[in] mode mode
1038 * @param[in] args argument
1039 * @retval #ML_ERROR_NONE successful
1040 * @retval #ML_ERROR_INVALID_PARAMETER when arg is invalid
1041 * @retval #ML_ERROR_NOT_SUPPORTED when dataset did not exist
1044 ml_train_dataset_set_property_for_mode_(ml_train_dataset_h dataset,
1045 ml_train_dataset_mode_e mode,
1046 const std::vector<void *> &args) {
1047 static constexpr char USER_DATA[] = "user_data";
1048 int status = ML_ERROR_NONE;
1049 ml_train_dataset *nndataset;
1051 check_feature_state();
1053 ML_TRAIN_VERIFY_VALID_HANDLE(dataset);
1056 ML_TRAIN_GET_VALID_DATASET_LOCKED(nndataset, dataset);
1057 ML_TRAIN_ADOPT_LOCK(nndataset, dataset_lock);
1059 auto &db = nndataset->dataset[mode];
1061 returnable f = [&db, &args]() {
1062 int status_ = ML_ERROR_NONE;
1063 if (db == nullptr) {
1064 status_ = ML_ERROR_NOT_SUPPORTED;
1068 std::vector<std::string> properties;
1069 for (unsigned int i = 0; i < args.size(); ++i) {
1070 char *key_ptr = (char *)args[i];
1071 std::string key = key_ptr;
1072 std::for_each(key.begin(), key.end(),
1073 [](char &c) { c = ::tolower(c); });
1074 key.erase(std::remove_if(key.begin(), key.end(), ::isspace), key.end());
1076 /** Handle the user_data as a special case, serialize the address and
1077 * pass it to the databuffer */
1078 if (key == USER_DATA) {
1079 /** This ensures that a valid user_data element is passed by the user
1081 if (i + 1 >= args.size()) {
1082 ml_loge("key user_data expects, next value to be a pointer");
1083 status_ = ML_ERROR_INVALID_PARAMETER;
1086 std::ostringstream ss;
1087 ss << key << '=' << args[i + 1];
1088 properties.push_back(ss.str());
1090 /** As values of i+1 is consumed, increase i by 1 */
1092 } else if (key.rfind("user_data=", 0) == 0) {
1093 /** case that user tries to pass something like user_data=5, this is
1095 status_ = ML_ERROR_INVALID_PARAMETER;
1098 properties.push_back(key);
1103 db->setProperty(properties);
1107 status = nntrainer_exception_boundary(f);
1112 int ml_train_dataset_set_property(ml_train_dataset_h dataset, ...) {
1113 std::vector<void *> arg_list;
1115 va_start(arguments, dataset);
1118 while ((data = va_arg(arguments, void *))) {
1119 arg_list.push_back(data);
1123 /// having status of ML_ERROR_NOT_SUPPORTED is not an error in this call.
1124 int status = ml_train_dataset_set_property_for_mode_(
1125 dataset, ML_TRAIN_DATASET_MODE_TRAIN, arg_list);
1126 if (status != ML_ERROR_NONE && status != ML_ERROR_NOT_SUPPORTED) {
1130 status = ml_train_dataset_set_property_for_mode_(
1131 dataset, ML_TRAIN_DATASET_MODE_VALID, arg_list);
1132 if (status != ML_ERROR_NONE && status != ML_ERROR_NOT_SUPPORTED) {
1136 status = ml_train_dataset_set_property_for_mode_(
1137 dataset, ML_TRAIN_DATASET_MODE_TEST, arg_list);
1138 if (status != ML_ERROR_NONE && status != ML_ERROR_NOT_SUPPORTED) {
1142 return ML_ERROR_NONE;
1145 int ml_train_dataset_set_property_for_mode(ml_train_dataset_h dataset,
1146 ml_train_dataset_mode_e mode, ...) {
1147 std::vector<void *> arg_list;
1149 va_start(arguments, mode);
1152 while ((data = va_arg(arguments, void *))) {
1153 arg_list.push_back(data);
1157 int status = ml_train_dataset_set_property_for_mode_(dataset, mode, arg_list);
1159 return status != ML_ERROR_NONE ? ML_ERROR_INVALID_PARAMETER : ML_ERROR_NONE;
1162 int ml_train_dataset_set_property_for_mode_with_single_param(
1163 ml_train_dataset_h dataset, ml_train_dataset_mode_e mode,
1164 const char *single_param) {
1165 ML_TRAIN_VERIFY_VALID_HANDLE(dataset);
1167 return ml_train_dataset_set_property_for_mode(dataset, mode, single_param,
1171 int ml_train_dataset_destroy(ml_train_dataset_h dataset) {
1172 int status = ML_ERROR_NONE;
1173 ml_train_dataset *nndataset;
1175 check_feature_state();
1178 ML_TRAIN_GET_VALID_DATASET_LOCKED_RESET(nndataset, dataset);
1179 ML_TRAIN_ADOPT_LOCK(nndataset, dataset_lock);
1181 if (nndataset->in_use) {
1182 ml_loge("Cannot delete dataset already set to a model."
1183 "Delete model will delete this dataset.");
1184 return ML_ERROR_INVALID_PARAMETER;
1192 int ml_train_model_get_input_tensors_info(ml_train_model_h model,
1193 ml_tensors_info_h *info) {
1194 int status = ML_ERROR_NONE;
1195 ml_train_model *nnmodel;
1196 std::shared_ptr<ml::train::Model> m;
1199 check_feature_state();
1202 return ML_ERROR_INVALID_PARAMETER;
1205 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
1206 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
1209 return ML_ERROR_INVALID_PARAMETER;
1212 std::vector<ml::train::TensorDim> dims;
1214 dims = m->getInputDimension();
1215 return ML_ERROR_NONE;
1217 status = nntrainer_exception_boundary(f);
1218 if (status != ML_ERROR_NONE) {
1222 status = ml_tensors_info_create(info);
1223 if (status != ML_ERROR_NONE) {
1227 status = ml_tensors_info_set_count(*info, dims.size());
1228 if (status != ML_ERROR_NONE) {
1229 ml_tensors_info_destroy(*info);
1233 for (unsigned int i = 0; i < dims.size(); ++i) {
1234 status = ml_tensors_info_set_tensor_type(*info, i, ML_TENSOR_TYPE_FLOAT32);
1235 if (status != ML_ERROR_NONE) {
1236 ml_tensors_info_destroy(*info);
1240 std::vector<unsigned int> u_dim;
1242 for (unsigned int j = 0; j < dims[i].getNumDim(); j++)
1243 u_dim.push_back(dims[i].getDim()[j]);
1245 status = ml_tensors_info_set_tensor_dimension(*info, i, u_dim.data());
1246 if (status != ML_ERROR_NONE) {
1247 ml_tensors_info_destroy(*info);
1255 int ml_train_model_get_output_tensors_info(ml_train_model_h model,
1256 ml_tensors_info_h *info) {
1257 int status = ML_ERROR_NONE;
1258 ml_train_model *nnmodel;
1259 std::shared_ptr<ml::train::Model> m;
1262 check_feature_state();
1265 return ML_ERROR_INVALID_PARAMETER;
1268 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
1269 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
1272 return ML_ERROR_INVALID_PARAMETER;
1275 std::vector<ml::train::TensorDim> dims;
1278 dims = m->getOutputDimension();
1279 return ML_ERROR_NONE;
1281 status = nntrainer_exception_boundary(f);
1282 if (status != ML_ERROR_NONE) {
1286 status = ml_tensors_info_create(info);
1287 if (status != ML_ERROR_NONE) {
1291 status = ml_tensors_info_set_count(*info, dims.size());
1292 if (status != ML_ERROR_NONE) {
1293 ml_tensors_info_destroy(*info);
1297 for (unsigned int i = 0; i < dims.size(); ++i) {
1298 status = ml_tensors_info_set_tensor_type(*info, i, ML_TENSOR_TYPE_FLOAT32);
1299 if (status != ML_ERROR_NONE) {
1300 ml_tensors_info_destroy(*info);
1304 std::vector<unsigned int> u_dim;
1306 for (unsigned int j = 0; j < dims[i].getNumDim(); j++)
1307 u_dim.push_back(dims[i].getDim()[j]);
1309 status = ml_tensors_info_set_tensor_dimension(*info, i, u_dim.data());
1310 if (status != ML_ERROR_NONE) {
1311 ml_tensors_info_destroy(*info);
1319 int ml_train_model_save(ml_train_model_h model, const char *file_path,
1320 ml_train_model_format_e format) {
1321 int status = ML_ERROR_NONE;
1322 ml_train_model *nnmodel;
1323 std::shared_ptr<ml::train::Model> m;
1326 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
1327 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
1332 returnable f = [&]() {
1333 m->save(file_path, static_cast<ml::train::ModelFormat>(format));
1334 return ML_ERROR_NONE;
1337 status = nntrainer_exception_boundary(f);
1341 int ml_train_model_load(ml_train_model_h model, const char *file_path,
1342 ml_train_model_format_e format) {
1343 int status = ML_ERROR_NONE;
1344 ml_train_model *nnmodel;
1345 std::shared_ptr<ml::train::Model> m;
1348 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
1349 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
1354 returnable f = [&]() {
1355 m->load(file_path, static_cast<ml::train::ModelFormat>(format));
1356 return ML_ERROR_NONE;
1359 status = nntrainer_exception_boundary(f);
1363 int ml_train_model_get_weight(ml_train_model_h model, const char *layer_name,
1364 ml_tensors_data_h *weight,
1365 ml_tensors_info_h *info) {
1366 int status = ML_ERROR_NONE;
1367 ml_train_model *nnmodel;
1369 check_feature_state();
1371 ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model);
1372 ML_TRAIN_ADOPT_LOCK(nnmodel, model_lock);
1374 std::shared_ptr<ml::train::Model> m;
1375 std::shared_ptr<ml::train::Layer> l;
1376 std::vector<float *> w;
1377 std::vector<ml::train::TensorDim> dims;
1378 std::vector<std::string> weight_name;
1382 returnable f = [&]() { return m->getLayer(layer_name, &l); };
1383 status = nntrainer_exception_boundary(f);
1384 if (status != ML_ERROR_NONE)
1388 l->getWeights(w, dims);
1390 for (unsigned int i = 0; i < dims.size(); ++i)
1391 weight_name.emplace_back(l->getWeightName(i));
1393 return ML_ERROR_NONE;
1396 status = nntrainer_exception_boundary(f);
1397 if (status != ML_ERROR_NONE) {
1401 status = ml_tensors_info_create(info);
1402 if (status != ML_ERROR_NONE) {
1406 status = ml_tensors_info_set_count(*info, dims.size());
1407 if (status != ML_ERROR_NONE) {
1408 ml_tensors_info_destroy(*info);
1412 for (unsigned int i = 0; i < dims.size(); ++i) {
1413 status = ml_tensors_info_set_tensor_type(*info, i, ML_TENSOR_TYPE_FLOAT32);
1414 if (status != ML_ERROR_NONE) {
1415 ml_tensors_info_destroy(*info);
1419 std::vector<unsigned int> u_dim;
1421 for (unsigned int j = 0; j < dims[i].getNumDim(); j++)
1422 u_dim.push_back(dims[i].getDim()[j]);
1424 status = ml_tensors_info_set_tensor_dimension(*info, i, u_dim.data());
1425 if (status != ML_ERROR_NONE) {
1426 ml_tensors_info_destroy(*info);
1430 status = ml_tensors_info_set_tensor_name(*info, i, weight_name[i].c_str());
1431 if (status != ML_ERROR_NONE) {
1432 ml_tensors_info_destroy(*info);
1437 status = ml_tensors_data_create(*info, weight);
1438 if (status != ML_ERROR_NONE) {
1439 ml_tensors_data_destroy(*weight);
1443 for (unsigned int i = 0; i < dims.size(); ++i) {
1444 status = ml_tensors_data_set_tensor_data(
1445 *weight, i, w[i], dims[i].getDataLen() * sizeof(float));
1446 if (status != ML_ERROR_NONE) {
1447 ml_tensors_data_destroy(*weight);