BuildRequires: pkgconfig(nnstreamer)
BuildRequires: pkgconfig(capi-ml-inference)
BuildRequires: pkgconfig(nntrainer)
-BuildRequires: pkgconfig(ccapi-ml-training)
+BuildRequires: pkgconfig(capi-ml-training)
%endif
%if "%{?tizen_feature_badge_support}" == "1" || "%{?unified_build}" == "1"
* limitations under the License.
*/
-var MachineLearningManager = function() {
+var MachineLearningManager = function () {
Object.defineProperties(this, {
single: {
enumerable: true,
MVNC: 'MVNC',
NNFW: 'NNFW',
OPEN_VINO: 'OPEN_VINO',
+ SNAP: 'SNAP',
SNPE: 'SNPE',
TENSORFLOW: 'TENSORFLOW',
TENSORFLOW_LITE: 'TENSORFLOW_LITE',
NPU_SLSI: 'NPU_SLSI'
};
-MachineLearningManager.prototype.checkNNFWAvailability = function() {
+MachineLearningManager.prototype.checkNNFWAvailability = function () {
var args = validator_.validateArgs(arguments, [
{
name: 'nnfw',
LAYER_ADDITION: 'LAYER_ADDITION',
LAYER_CONCAT: 'LAYER_CONCAT',
LAYER_MULTIOUT: 'LAYER_MULTIOUT',
- LAYER_LOSS: 'LAYER_LOSS',
+ LAYER_LOSS_MSE: 'LAYER_LOSS_MSE',
+ LAYER_LOSS_CROSS_ENTROPY_SIGMOID: 'LAYER_LOSS_CROSS_ENTROPY_SIGMOID',
+ LAYER_LOSS_CROSS_ENTROPY_SOFTMAX: 'LAYER_LOSS_CROSS_ENTROPY_SOFTMAX',
LAYER_BACKBONE_NNSTREAMER: 'LAYER_BACKBONE_NNSTREAMER',
- LAYER_BACKBONE_TFLITE: 'LAYER_BACKBONE_TFLITE',
LAYER_EMBEDDING: 'LAYER_EMBEDDING',
LAYER_RNN: 'LAYER_RNN',
LAYER_UNKNOWN: 'LAYER_UNKNOWN'
'ml_trainer_manager.h',
'ml_singleshot.cc',
'ml_singleshot.h',
- 'ml_trainer.cc',
- 'ml_trainer.h',
'ml_utils.cc',
'ml_utils.h',
],
'nnstreamer',
'capi-ml-inference',
'nntrainer',
- 'ccapi-ml-training',
+ 'capi-ml-training',
]
},
}],
CHECK_ARGS(args, kType, std::string, out);
int id = -1;
- LayerType layer_type = LayerType::LAYER_UNKNOWN;
- PlatformResult result =
- types::LayerTypeEnum.getValue(args.get(kType).get<std::string>(), &layer_type);
+ ml_train_layer_type_e layer_type = ML_TRAIN_LAYER_TYPE_UNKNOWN;
+ PlatformResult result = types::LayerTypeEnum.getValue(
+ args.get(kType).get<std::string>(), &layer_type);
if (!result) {
LogAndReportError(result, &out);
return;
CHECK_ARGS(args, kType, std::string, out);
int id = -1;
- OptimizerType optimizer_type = OptimizerType::UNKNOWN;
- PlatformResult result =
- types::OptimizerTypeEnum.getValue(args.get(kType).get<std::string>(), &optimizer_type);
+ ml_train_optimizer_type_e optimizer_type = ML_TRAIN_OPTIMIZER_TYPE_UNKNOWN;
+ PlatformResult result = types::OptimizerTypeEnum.getValue(
+ args.get(kType).get<std::string>(), &optimizer_type);
if (!result) {
LogAndReportError(result, &out);
return;
void MlInstance::MLTrainerModelCompile(const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
- CHECK_ARGS(args, kOptions, picojson::object, out);
- auto options = args.get(kOptions).get<picojson::object>();
auto id = static_cast<int>(args.get(kId).get<double>());
- PlatformResult result = trainer_manager_.ModelCompile(id, options);
+ PlatformResult result = trainer_manager_.ModelCompile(id);
if (!result) {
ReportError(result, &out);
#include "ml_trainer_manager.h"
#include "common/tools.h"
-#include "nntrainer/model.h"
using common::ErrorCode;
using common::PlatformResult;
-namespace train = ml::train;
-
namespace extension {
namespace ml {
PlatformResult TrainerManager::CreateModel(int& id) {
ScopeLogger();
- try {
- auto model = train::createModel(train::ModelType::NEURAL_NET);
- models_[next_model_id_] = std::move(model);
- id = next_model_id_++;
- return PlatformResult();
- } catch (const std::exception& e) {
- // TODO: Add errors handling
- LoggerE("Could not create model: %s", e.what());
- return PlatformResult(ErrorCode::ABORT_ERR, e.what());
+ ml_train_model_h n_model = NULL;
+
+ int ret_val = ml_train_model_construct(&n_model);
+ if (ret_val != 0) {
+ LoggerE("Could not create model: %s", ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+
+ models_[next_model_id_] = n_model;
+ id = next_model_id_++;
+
+ return PlatformResult();
}
PlatformResult TrainerManager::CreateModel(int& id, const std::string config) {
ScopeLogger();
- try {
- auto model = train::createModel(train::ModelType::NEURAL_NET);
- model->loadFromConfig(config);
- models_[next_model_id_] = std::move(model);
- id = next_model_id_++;
- return PlatformResult();
- } catch (const std::exception& e) {
- // TODO: Add errors handling
- LoggerE("Could not create model: %s", e.what());
- return PlatformResult(ErrorCode::ABORT_ERR, e.what());
+ ml_train_model_h n_model = NULL;
+
+ int ret_val = ml_train_model_construct_with_conf(config.c_str(), &n_model);
+ if (ret_val != 0) {
+ LoggerE("Could not create model: %s", ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+
+ models_[next_model_id_] = n_model;
+ id = next_model_id_++;
+
+ return PlatformResult();
}
-PlatformResult TrainerManager::ModelCompile(int id, const picojson::object& options) {
+PlatformResult TrainerManager::ModelCompile(int id) {
ScopeLogger();
if (models_.find(id) == models_.end()) {
}
auto& model = models_[id];
- std::stringstream ss;
- for (const auto& opt : options) {
- const auto& key = opt.first;
- const auto& value = opt.second.get<std::string>();
- ss << key << "=" << value;
- try {
- model->setProperty({ss.str()});
- } catch (const std::exception& e) {
- LoggerE("Could not create set property: %s", e.what());
- return PlatformResult(ErrorCode::INVALID_VALUES_ERR, e.what());
- }
- ss.clear();
- }
- try {
- model->compile();
- return PlatformResult();
- } catch (const std::exception& e) {
- // TODO: Add errors handling
- LoggerE("Could not create model: %s", e.what());
- return PlatformResult(ErrorCode::ABORT_ERR, e.what());
+ int ret_val = ml_train_model_compile(model, NULL);
+ if (ret_val != 0) {
+ LoggerE("Could not compile model: %s", ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+
+ return PlatformResult();
}
-PlatformResult TrainerManager::ModelRun(int id, const picojson::object& options) {
+PlatformResult TrainerManager::ModelRun(int id) {
ScopeLogger();
+
+ if (models_.find(id) == models_.end()) {
+ LoggerE("Could not find model with id: %d", id);
+ return PlatformResult(ErrorCode::ABORT_ERR, "Could not find model");
+ }
+
+ auto& model = models_[id];
+
+ int ret_val = ml_train_model_run(model, NULL);
+ if (ret_val != 0) {
+ LoggerE("Could not run model: %s", ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+
return PlatformResult();
}
-PlatformResult TrainerManager::CreateLayer(int& id, train::LayerType type) {
+PlatformResult TrainerManager::CreateLayer(int& id,
+ ml_train_layer_type_e type) {
ScopeLogger();
- try {
- auto layer = train::createLayer(type);
- layers_[next_layer_id_] = std::move(layer);
- id = next_layer_id_++;
- return PlatformResult();
- } catch (const std::exception& e) {
- LoggerE("Could not create layer: %s", e.what());
- return PlatformResult(ErrorCode::ABORT_ERR, e.what());
+ ml_train_layer_h n_layer = NULL;
+
+ int ret_val = ml_train_layer_create(&n_layer, type);
+ if (ret_val != 0) {
+ LoggerE("Could not create layer: %s", ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+
+ layers_[next_layer_id_] = n_layer;
+ id = next_layer_id_++;
+ return PlatformResult();
}
PlatformResult TrainerManager::LayerSetProperty(int& id, const std::string& name,
LoggerE("Could not find layer with id: %d", id);
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find layer");
}
+
auto layer = layers_[id];
- std::stringstream ss;
- ss << name << '=' << value;
- try {
- layer->setProperty({ss.str()});
- return PlatformResult();
- } catch (const std::exception& e) {
- LoggerE("Failed to set property for layer: %s", e.what());
- return PlatformResult(ErrorCode::ABORT_ERR, e.what());
+ std::string opt = name + "=" + value;
+
+ int ret_val = ml_train_layer_set_property(layer, opt.c_str(), NULL);
+ if (ret_val != 0) {
+ LoggerE("Could not set layer property: %s", ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+ return PlatformResult();
}
-PlatformResult TrainerManager::CreateOptimizer(int& id, train::OptimizerType type) {
+PlatformResult TrainerManager::CreateOptimizer(int& id,
+ ml_train_optimizer_type_e type) {
ScopeLogger();
- try {
- auto optimizer = train::createOptimizer(type);
- optimizers_[next_optimizer_id_] = std::move(optimizer);
- id = next_optimizer_id_++;
- return PlatformResult();
- } catch (const std::exception& e) {
- LoggerE("Could not create optimizer: %s", e.what());
- return PlatformResult(ErrorCode::ABORT_ERR, e.what());
+ ml_train_optimizer_h n_optimizer = NULL;
+
+ int ret_val = ml_train_optimizer_create(&n_optimizer, type);
+ if (ret_val != 0) {
+ LoggerE("Could not create optimizer: %s", ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+
+ optimizers_[next_optimizer_id_] = n_optimizer;
+ id = next_optimizer_id_++;
+ return PlatformResult();
}
PlatformResult TrainerManager::OptimizerSetProperty(int& id, const std::string& name,
LoggerE("Could not find optimizer with id: %d", id);
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find optimizer");
}
+
auto optimizer = optimizers_[id];
- std::stringstream ss;
- ss << name << '=' << value;
- try {
- optimizer->setProperty({ss.str()});
- return PlatformResult();
- } catch (const std::exception& e) {
- LoggerE("Failed to set property for optimizer: %s", e.what());
- return PlatformResult(ErrorCode::ABORT_ERR, e.what());
+ std::string opt = name + "=" + value;
+ int ret_val = ml_train_optimizer_set_property(optimizer, opt.c_str(), NULL);
+ if (ret_val != 0) {
+ LoggerE("Could not set optimizer property: %s", ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+ return PlatformResult();
}
PlatformResult TrainerManager::CreateFileDataset(int& id, const std::string train_file,
const std::string valid_file,
const std::string test_file) {
ScopeLogger();
- try {
- auto dataset = train::createDataset(train::DatasetType::FILE, train_file.c_str(),
- valid_file.c_str(), test_file.c_str());
- datasets_[next_dataset_id_] = std::move(dataset);
- id = next_layer_id_++;
- return PlatformResult();
- } catch (const std::exception& e) {
- LoggerE("Failed to set property for dataset: %s", e.what());
- return PlatformResult(ErrorCode::ABORT_ERR, e.what());
+ ml_train_dataset_h n_dataset = NULL;
+
+ int ret_val = ml_train_dataset_create(&n_dataset);
+ if (ret_val != 0) {
+ LoggerE("Could not create dataset: %s", ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+
+ if (!train_file.empty()) {
+ ret_val = ml_train_dataset_add_file(n_dataset, ML_TRAIN_DATASET_MODE_TRAIN,
+ train_file.c_str());
+ if (ret_val != 0) {
+ LoggerE("Could not add train file %s to dataset: %s", train_file.c_str(),
+ ml_strerror(ret_val));
+ ml_train_dataset_destroy(n_dataset);
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+ }
+
+ if (!valid_file.empty()) {
+ ret_val = ml_train_dataset_add_file(n_dataset, ML_TRAIN_DATASET_MODE_VALID,
+ valid_file.c_str());
+ if (ret_val != 0) {
+ LoggerE("Could not add validation file %s to dataset: %s",
+ valid_file.c_str(), ml_strerror(ret_val));
+ ml_train_dataset_destroy(n_dataset);
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+ }
+
+ if (!test_file.empty()) {
+ ret_val = ml_train_dataset_add_file(n_dataset, ML_TRAIN_DATASET_MODE_TEST,
+ test_file.c_str());
+ if (ret_val != 0) {
+ LoggerE("Could not add test file %s to dataset: %s", test_file.c_str(),
+ ml_strerror(ret_val));
+ ml_train_dataset_destroy(n_dataset);
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+ }
+
+ datasets_[next_dataset_id_] = n_dataset;
+ id = next_dataset_id_++;
+ return PlatformResult();
}
+// MK-TODO Add creating Dataset with generator
+
PlatformResult TrainerManager::DatasetSetProperty(int& id, const std::string& name,
const std::string& value) {
ScopeLogger("id: %d, name: %s, value: %s", id, name.c_str(), value.c_str());
LoggerE("Could not find dataset with id: %d", id);
return PlatformResult(ErrorCode::ABORT_ERR, "Could not find dataset");
}
+
auto dataset = datasets_[id];
- std::stringstream ss;
- ss << name << '=' << value;
- try {
- dataset->setProperty({ss.str()});
- return PlatformResult();
- } catch (const std::exception& e) {
- LoggerE("Could not create layer: %s", e.what());
- return PlatformResult(ErrorCode::ABORT_ERR, e.what());
+ std::string opt = name + "=" + value;
+
+ // ml_train_dataset_set_property() is marked as deprecated
+ // temporary set same property for all modes (all data files)
+ int ret_val = ml_train_dataset_set_property_for_mode(
+ dataset, ML_TRAIN_DATASET_MODE_TRAIN, opt.c_str(), NULL);
+ if (ret_val != 0) {
+ LoggerE("Could not set dataset property for train mode: %s",
+ ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
}
+
+ ret_val = ml_train_dataset_set_property_for_mode(
+ dataset, ML_TRAIN_DATASET_MODE_VALID, opt.c_str(), NULL);
+ if (ret_val != 0) {
+ LoggerE("Could not set dataset property for validation mode: %s",
+ ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+
+ ret_val = ml_train_dataset_set_property_for_mode(
+ dataset, ML_TRAIN_DATASET_MODE_TEST, opt.c_str(), NULL);
+ if (ret_val != 0) {
+ LoggerE("Could not set dataset property for test mode: %s",
+ ml_strerror(ret_val));
+ return PlatformResult(ErrorCode::ABORT_ERR, ml_strerror(ret_val));
+ }
+
+ return PlatformResult();
}
} // namespace ml
#ifndef ML_ML_TRAINER_MANAGER_H_
#define ML_ML_TRAINER_MANAGER_H_
-#include <nntrainer/dataset.h>
-#include <nntrainer/layer.h>
-#include <nntrainer/model.h>
-#include <nntrainer/optimizer.h>
-
-#include <mutex>
+#include <nntrainer/nntrainer.h>
#include "common/platform_result.h"
-#include "ml_trainer.h"
using common::PlatformResult;
-namespace train = ml::train;
-
namespace extension {
namespace ml {
PlatformResult CreateModel(int& id);
PlatformResult CreateModel(int& id, const std::string config);
- PlatformResult ModelCompile(int id, const picojson::object& options);
- PlatformResult ModelRun(int id, const picojson::object& options);
+ PlatformResult ModelCompile(int id);
+ PlatformResult ModelRun(int id);
- PlatformResult CreateLayer(int& id, train::LayerType type);
- PlatformResult LayerSetProperty(int& id, const std::string& name, const std::string& value);
+ PlatformResult CreateLayer(int& id, ml_train_layer_type_e type);
+ PlatformResult LayerSetProperty(int& id, const std::string& name,
+ const std::string& value);
- PlatformResult CreateOptimizer(int& id, train::OptimizerType type);
+ PlatformResult CreateOptimizer(int& id, ml_train_optimizer_type_e type);
PlatformResult OptimizerSetProperty(int& id, const std::string& name, const std::string& value);
PlatformResult CreateFileDataset(int& id, const std::string train_file,
int next_layer_id_ = 0;
int next_optimizer_id_ = 0;
int next_dataset_id_ = 0;
- std::map<int, std::unique_ptr<train::Model>> models_;
- std::map<int, std::shared_ptr<train::Optimizer>> optimizers_;
- std::map<int, std::shared_ptr<train::Layer>> layers_;
- std::map<int, std::shared_ptr<train::Dataset>> datasets_;
+
+ std::map<int, ml_train_model_h> models_;
+ std::map<int, ml_train_optimizer_h> optimizers_;
+ std::map<int, ml_train_layer_h> layers_;
+ std::map<int, ml_train_dataset_h> datasets_;
};
} // namespace ml
* limitations under the License.
*/
+#include "ml_utils.h"
+
#include <memory>
#include "common/logger.h"
-#include "ml_utils.h"
namespace extension {
namespace ml {
namespace types {
-const PlatformEnum<ml_nnfw_hw_e> HWTypeEnum{{"ANY", ML_NNFW_HW_ANY},
- {"AUTO", ML_NNFW_HW_AUTO},
- {"CPU", ML_NNFW_HW_CPU},
- {"CPU_NEON", ML_NNFW_HW_CPU_NEON},
- {"CPU_SIMD", ML_NNFW_HW_CPU_SIMD},
- {"GPU", ML_NNFW_HW_GPU},
- {"NPU", ML_NNFW_HW_NPU},
- {"NPU_EDGE_TPU", ML_NNFW_HW_NPU_EDGE_TPU},
- {"NPU_MOVIDIUS", ML_NNFW_HW_NPU_MOVIDIUS},
- {"NPU_SLSI", ML_NNFW_HW_NPU_SLSI},
- {"NPU_SR", ML_NNFW_HW_NPU_SR},
- {"NPU_VIVANTE", ML_NNFW_HW_NPU_VIVANTE}};
+const PlatformEnum<ml_nnfw_hw_e> HWTypeEnum{
+ {"ANY", ML_NNFW_HW_ANY},
+ {"AUTO", ML_NNFW_HW_AUTO},
+ {"CPU", ML_NNFW_HW_CPU},
+ {"CPU_NEON", ML_NNFW_HW_CPU_NEON},
+ {"CPU_SIMD", ML_NNFW_HW_CPU_SIMD},
+ {"GPU", ML_NNFW_HW_GPU},
+ {"NPU", ML_NNFW_HW_NPU},
+ {"NPU_EDGE_TPU", ML_NNFW_HW_NPU_EDGE_TPU},
+ {"NPU_MOVIDIUS", ML_NNFW_HW_NPU_MOVIDIUS},
+ {"NPU_SLSI", ML_NNFW_HW_NPU_SLSI},
+ {"NPU_SR", ML_NNFW_HW_NPU_SR},
+ {"NPU_VIVANTE", ML_NNFW_HW_NPU_VIVANTE}};
-const PlatformEnum<ml_nnfw_type_e> NNFWTypeEnum{{"ANY", ML_NNFW_TYPE_ANY},
- {"ARM_NN", ML_NNFW_TYPE_ARMNN},
- {"CUSTOM_FILTER", ML_NNFW_TYPE_CUSTOM_FILTER},
- {"EDGE_TPU", ML_NNFW_TYPE_EDGE_TPU},
- {"MVNC", ML_NNFW_TYPE_MVNC},
- {"NNFW", ML_NNFW_TYPE_NNFW},
- {"NNTR_INF", ML_NNFW_TYPE_NNTR_INF},
- {"OPEN_VINO", ML_NNFW_TYPE_OPENVINO},
- {"PYTORCH", ML_NNFW_TYPE_PYTORCH},
- {"SNPE", ML_NNFW_TYPE_SNPE},
- {"TRIX_ENGINE", ML_NNFW_TYPE_TRIX_ENGINE},
- {"TENSORFLOW", ML_NNFW_TYPE_TENSORFLOW},
- {"TENSORFLOW_LITE", ML_NNFW_TYPE_TENSORFLOW_LITE},
- {"VD_AIFW", ML_NNFW_TYPE_VD_AIFW},
- {"VIVANTE", ML_NNFW_TYPE_VIVANTE}};
+const PlatformEnum<ml_nnfw_type_e> NNFWTypeEnum{
+ {"ANY", ML_NNFW_TYPE_ANY},
+ {"ARM_NN", ML_NNFW_TYPE_ARMNN},
+ {"CUSTOM_FILTER", ML_NNFW_TYPE_CUSTOM_FILTER},
+ {"EDGE_TPU", ML_NNFW_TYPE_EDGE_TPU},
+ {"MVNC", ML_NNFW_TYPE_MVNC},
+ {"NNFW", ML_NNFW_TYPE_NNFW},
+ {"NNTR_INF", ML_NNFW_TYPE_NNTR_INF},
+ {"OPEN_VINO", ML_NNFW_TYPE_OPENVINO},
+ {"PYTORCH", ML_NNFW_TYPE_PYTORCH},
+ {"SNAP", ML_NNFW_TYPE_SNAP},
+ {"SNPE", ML_NNFW_TYPE_SNPE},
+ {"TRIX_ENGINE", ML_NNFW_TYPE_TRIX_ENGINE},
+ {"TENSORFLOW", ML_NNFW_TYPE_TENSORFLOW},
+ {"TENSORFLOW_LITE", ML_NNFW_TYPE_TENSORFLOW_LITE},
+ {"VD_AIFW", ML_NNFW_TYPE_VD_AIFW},
+ {"VIVANTE", ML_NNFW_TYPE_VIVANTE}};
const PlatformEnum<ml_tensor_type_e> TensorTypeEnum{
{"INT8", ML_TENSOR_TYPE_INT8}, {"UINT8", ML_TENSOR_TYPE_UINT8},
{"INT64", ML_TENSOR_TYPE_INT64}, {"UINT64", ML_TENSOR_TYPE_UINT64},
{"UNKNOWN", ML_TENSOR_TYPE_UNKNOWN}};
-// const PlatformEnum<DatasetType> DatasetTypeEnum{{"DATASET_GENERATOR", DatasetType::GENERATOR},
-// {"DATASET_FILE", DatasetType::FILE},
-// {"DATASET_UNKNOWN", DatasetType::UNKNOWN}};
+// const PlatformEnum<TODO> DatasetTypeEnum{{"DATASET_GENERATOR", TODO},
+// {"DATASET_FILE", TODO},
+// {"DATASET_UNKNOWN",TODO}};
-const PlatformEnum<OptimizerType> OptimizerTypeEnum{{"OPTIMIZER_ADAM", OptimizerType::ADAM},
- {"OPTIMIZER_SGD", OptimizerType::SGD},
- {"OPTIMIZER_UNKNOWN", OptimizerType::UNKNOWN}};
+const PlatformEnum<ml_train_optimizer_type_e> OptimizerTypeEnum{
+ {"OPTIMIZER_ADAM", ML_TRAIN_OPTIMIZER_TYPE_ADAM},
+ {"OPTIMIZER_SGD", ML_TRAIN_OPTIMIZER_TYPE_SGD},
+ {"OPTIMIZER_UNKNOWN", ML_TRAIN_OPTIMIZER_TYPE_UNKNOWN}};
-const PlatformEnum<LayerType> LayerTypeEnum{
- {"LAYER_IN", LayerType::LAYER_IN},
- {"LAYER_FC", LayerType::LAYER_FC},
- {"LAYER_BN", LayerType::LAYER_BN},
- {"LAYER_CONV2D", LayerType::LAYER_CONV2D},
- {"LAYER_POOLING2D", LayerType::LAYER_POOLING2D},
- {"LAYER_FLATTEN", LayerType::LAYER_FLATTEN},
- {"LAYER_ACTIVATION", LayerType::LAYER_ACTIVATION},
- {"LAYER_ADDITION", LayerType::LAYER_ADDITION},
- {"LAYER_CONCAT", LayerType::LAYER_CONCAT},
- {"LAYER_MULTIOUT", LayerType::LAYER_MULTIOUT},
- {"LAYER_LOSS", LayerType::LAYER_LOSS},
- {"LAYER_BACKBONE_NNSTREAMER", LayerType::LAYER_BACKBONE_NNSTREAMER},
- {"LAYER_BACKBONE_TFLITE", LayerType::LAYER_BACKBONE_TFLITE},
- {"LAYER_EMBEDDING", LayerType::LAYER_EMBEDDING},
- {"LAYER_RNN", LayerType::LAYER_RNN},
- {"LAYER_UNKNOWN", LayerType::LAYER_UNKNOWN}};
+const PlatformEnum<ml_train_layer_type_e> LayerTypeEnum{
+ {"LAYER_IN", ML_TRAIN_LAYER_TYPE_INPUT},
+ {"LAYER_FC", ML_TRAIN_LAYER_TYPE_FC},
+ {"LAYER_BN", ML_TRAIN_LAYER_TYPE_BN},
+ {"LAYER_CONV2D", ML_TRAIN_LAYER_TYPE_CONV2D},
+ {"LAYER_POOLING2D", ML_TRAIN_LAYER_TYPE_POOLING2D},
+ {"LAYER_FLATTEN", ML_TRAIN_LAYER_TYPE_FLATTEN},
+ {"LAYER_ACTIVATION", ML_TRAIN_LAYER_TYPE_ACTIVATION},
+ {"LAYER_ADDITION", ML_TRAIN_LAYER_TYPE_ADDITION},
+ {"LAYER_CONCAT", ML_TRAIN_LAYER_TYPE_CONCAT},
+ {"LAYER_MULTIOUT", ML_TRAIN_LAYER_TYPE_MULTIOUT},
+ {"LAYER_EMBEDDING", ML_TRAIN_LAYER_TYPE_EMBEDDING},
+ {"LAYER_RNN", ML_TRAIN_LAYER_TYPE_RNN},
+ {"LAYER_LOSS_MSE", ML_TRAIN_LAYER_TYPE_LOSS_MSE},
+ {"LAYER_LOSS_CROSS_ENTROPY_SIGMOID",
+ ML_TRAIN_LAYER_TYPE_LOSS_CROSS_ENTROPY_SIGMOID},
+ {"LAYER_LOSS_CROSS_ENTROPY_SOFTMAX",
+ ML_TRAIN_LAYER_TYPE_LOSS_CROSS_ENTROPY_SOFTMAX},
+ {"LAYER_BACKBONE_NNSTREAMER", ML_TRAIN_LAYER_TYPE_BACKBONE_NNSTREAMER},
+ {"LAYER_UNKNOWN", ML_TRAIN_LAYER_TYPE_UNKNOWN}};
-} // types
+} // namespace types
namespace util {
-PlatformResult ToPlatformResult(int ml_error_code, const std::string& error_message_beginning) {
- ScopeLogger("ml_error_code: [%d] (%s)", ml_error_code, get_error_message(ml_error_code));
+PlatformResult ToPlatformResult(int ml_error_code,
+ const std::string& error_message_beginning) {
+ ScopeLogger("ml_error_code: [%d] (%s)", ml_error_code,
+ get_error_message(ml_error_code));
switch (ml_error_code) {
case ML_ERROR_NONE:
return PlatformResult{ErrorCode::INVALID_STATE_ERR,
error_message_beginning + ": invalid state"};
case ML_ERROR_TIMED_OUT:
- return PlatformResult{ErrorCode::TIMEOUT_ERR, error_message_beginning + ": timeout"};
+ return PlatformResult{ErrorCode::TIMEOUT_ERR,
+ error_message_beginning + ": timeout"};
case ML_ERROR_NOT_SUPPORTED:
return PlatformResult{ErrorCode::NOT_SUPPORTED_ERR,
error_message_beginning + ": not supported"};
case ML_ERROR_UNKNOWN:
case ML_ERROR_OUT_OF_MEMORY:
default:
- return PlatformResult{ErrorCode::ABORT_ERR,
- error_message_beginning + ": an unknown error occurred"};
+ return PlatformResult{
+ ErrorCode::ABORT_ERR,
+ error_message_beginning + ": an unknown error occurred"};
}
}
PlatformResult result = types::NNFWTypeEnum.getValue(nnfw, &nnfw_e);
if (!result) {
- LoggerE("NNFWTypeEnum.getValue() failed, error: %s", result.message().c_str());
+ LoggerE("NNFWTypeEnum.getValue() failed, error: %s",
+ result.message().c_str());
return false;
}
result = types::HWTypeEnum.getValue(hw, &hw_e);
if (!result) {
- LoggerE("HWTypeEnum.getValue() failed, error: %s", result.message().c_str());
+ LoggerE("HWTypeEnum.getValue() failed, error: %s",
+ result.message().c_str());
return false;
}
- const char* customRequirementPtr = customRequirement ? customRequirement->c_str() : nullptr;
+ const char* customRequirementPtr =
+ customRequirement ? customRequirement->c_str() : nullptr;
bool available = false;
- int ret = ml_check_nnfw_availability_full(nnfw_e, hw_e, customRequirementPtr, &available);
+ int ret = ml_check_nnfw_availability_full(nnfw_e, hw_e, customRequirementPtr,
+ &available);
if (ML_ERROR_NONE != ret) {
- LoggerE("ml_check_nnfw_availability_full failed: %d (%s)", ret, get_error_message(ret));
+ LoggerE("ml_check_nnfw_availability_full failed: %d (%s)", ret,
+ get_error_message(ret));
return false;
}
return available;
}
-PlatformResult GetDimensionsFromJsonArray(const picojson::array& dim,
- unsigned int dimensions[ML_TENSOR_RANK_LIMIT]) {
+PlatformResult GetDimensionsFromJsonArray(
+ const picojson::array& dim, unsigned int dimensions[ML_TENSOR_RANK_LIMIT]) {
ScopeLogger();
bool foundValidValue = false;
unsigned int validDimensions[ML_TENSOR_RANK_LIMIT];
for (int i = dimSize - 1; i >= 0; i--) {
auto& d = dim[i];
if (!d.is<double>()) {
- LoggerE("dimensions array contains an invalid value: %s", d.serialize().c_str());
+ LoggerE("dimensions array contains an invalid value: %s",
+ d.serialize().c_str());
return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
"dimensions array contains an invalid value");
}
return PlatformResult(ErrorCode::NO_ERROR);
}
-PlatformResult GetLocationFromJsonArray(const picojson::array& array,
- unsigned int location[ML_TENSOR_RANK_LIMIT]) {
+PlatformResult GetLocationFromJsonArray(
+ const picojson::array& array, unsigned int location[ML_TENSOR_RANK_LIMIT]) {
if (array.size() > ML_TENSOR_RANK_LIMIT) {
LoggerD("Provided size array is bigger than supported");
}
num = a.get<double>();
}
if (num < 0) {
- LoggerE("location array contains negative value: %s", a.serialize().c_str());
+ LoggerE("location array contains negative value: %s",
+ a.serialize().c_str());
return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
"location array contains negative value");
}
return PlatformResult(ErrorCode::NO_ERROR);
}
-PlatformResult GetSizeFromJsonArray(const picojson::array& array,
- unsigned int location[ML_TENSOR_RANK_LIMIT],
- unsigned int dimensions[ML_TENSOR_RANK_LIMIT],
- unsigned int size[ML_TENSOR_RANK_LIMIT]) {
+PlatformResult GetSizeFromJsonArray(
+ const picojson::array& array, unsigned int location[ML_TENSOR_RANK_LIMIT],
+ unsigned int dimensions[ML_TENSOR_RANK_LIMIT],
+ unsigned int size[ML_TENSOR_RANK_LIMIT]) {
if (array.size() > ML_TENSOR_RANK_LIMIT) {
LoggerD("Provided size array is bigger than supported");
}
}
if (num == 0) {
LoggerE("size array contains zero value: %s", a.serialize().c_str());
- return PlatformResult(ErrorCode::INVALID_VALUES_ERR, "size array contains zero value");
+ return PlatformResult(ErrorCode::INVALID_VALUES_ERR,
+ "size array contains zero value");
} else if (num > 0) {
size[i] = static_cast<unsigned int>(num);
} else {
- // in case of negative value, size becomes size from location to end of axis
+ // in case of negative value, size becomes size from location to end of
+ // axis
size[i] = dimensions[i] - location[i];
}
i++;
return PlatformResult(ErrorCode::NO_ERROR);
}
-} // util
-} // ml
-} // extension
+} // namespace util
+} // namespace ml
+} // namespace extension
#define ML_ML_UTILS_H_
#include <nnstreamer/nnstreamer.h>
-#include <nntrainer/dataset.h>
-#include <nntrainer/layer.h>
-#include <nntrainer/optimizer.h>
+#include <nntrainer/nntrainer-api-common.h>
#if __cplusplus > 201402L
#include <optional>
using common::PlatformEnum;
using common::PlatformResult;
-using ml::train::DatasetType;
-using ml::train::LayerType;
-using ml::train::OptimizerType;
-
namespace extension {
namespace ml {
extern const PlatformEnum<ml_nnfw_type_e> NNFWTypeEnum;
extern const PlatformEnum<ml_tensor_type_e> TensorTypeEnum;
-// extern const PlatformEnum<DatasetType> DatasetTypeEnum;
-extern const PlatformEnum<OptimizerType> OptimizerTypeEnum;
-extern const PlatformEnum<LayerType> LayerTypeEnum;
+// MK-TODO implement internal enum or remove from API design if not needed
+// extern const PlatformEnum<TODO> DatasetTypeEnum;
+extern const PlatformEnum<ml_train_optimizer_type_e> OptimizerTypeEnum;
+extern const PlatformEnum<ml_train_layer_type_e> LayerTypeEnum;
} // namespace types