return 1;
}
- /// creating array of layers same as in `custom_layer_client.ini`
- std::vector<std::shared_ptr<ml::train::Layer>> layers{
- ml::train::layer::Input({"name=inputlayer", "input_shape=1:1:100"}),
- ml::train::createLayer(
- "pow", {"name=powlayer", "exponent=3", "input_layers=inputlayer"}),
- ml::train::layer::FullyConnected(
- {"name=outputlayer", "input_layers=powlayer", "unit=10",
- "bias_initializer=zeros", "activation=softmax"})};
+ std::vector<std::shared_ptr<ml::train::Layer>> layers;
+
+ try {
+ /// creating array of layers same as in `custom_layer_client.ini`
+ layers = std::vector<std::shared_ptr<ml::train::Layer>>{
+ ml::train::layer::Input({"name=inputlayer", "input_shape=1:1:100"}),
+ ml::train::createLayer(
+ "pow", {"name=powlayer", "exponent=3", "input_layers=inputlayer"}),
+ ml::train::layer::FullyConnected(
+ {"name=outputlayer", "input_layers=powlayer", "unit=10",
+ "bias_initializer=zeros", "activation=softmax"})};
+ } catch (nntrainer::exception::not_supported &e) {
+ std::cerr << "creating model failed";
+ return 1;
+ }
for (auto &layer : layers) {
model->addLayer(layer);
/**
* @brief Data buffer Create & Initialization
*/
- std::shared_ptr<ml::train::Dataset> dataset =
- createDataset(ml::train::DatasetType::GENERATOR);
- dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_TRAIN,
- getBatch_train);
- dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_VAL, getBatch_val);
+ std::shared_ptr<ml::train::Dataset> dataset;
+ try {
+ dataset = createDataset(ml::train::DatasetType::GENERATOR);
+ dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_TRAIN,
+ getBatch_train);
+ dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_VAL,
+ getBatch_val);
+ } catch (...) {
+ std::cerr << "Error creating dataset";
+ return 1;
+ }
- /**
- * @brief Neural Network Create & Initialization
- */
- std::unique_ptr<ml::train::Model> model =
- createModel(ml::train::ModelType::NEURAL_NET);
+ std::unique_ptr<ml::train::Model> model;
try {
+ /**
+ * @brief Neural Network Create & Initialization
+ */
+ model = createModel(ml::train::ModelType::NEURAL_NET);
model->loadFromConfig(config);
} catch (...) {
std::cerr << "Error during loadFromConfig" << std::endl;
- return 0;
+ return 1;
}
try {
model->compile();
model->initialize();
+ model->readModel();
+ model->setDataset(dataset);
} catch (...) {
std::cerr << "Error during init" << std::endl;
- return 0;
+ return 1;
}
- model->readModel();
- model->setDataset(dataset);
#if defined(APP_VALIDATE)
status = model->setProperty({"epochs=5"});
if (status != ML_ERROR_NONE) {
image[i] = ((float)in[i]) / 255.0;
}
- free(in);
+ delete[] in;
}
/**
getBatch_train);
dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_VAL, getBatch_val);
+ std::unique_ptr<ml::train::Model> model;
/**
* @brief Neural Network Create & Initialization
*/
- std::unique_ptr<ml::train::Model> model =
- createModel(ml::train::ModelType::NEURAL_NET);
-
try {
+ model = createModel(ml::train::ModelType::NEURAL_NET);
model->loadFromConfig(config);
} catch (...) {
std::cerr << "Error during loadFromConfig" << std::endl;
}
if (INPUT_SIZE != input_img_size) {
- delete in;
+ delete[] in;
throw std::runtime_error("Input size does not match the required size");
}
float featureVector[INPUT_SIZE];
status = getInputFeature_c(test_file_path, featureVector);
+ free(test_file_path);
if (status != ML_ERROR_NONE)
goto fail_info_release;
std::string data_path = args[1];
/// @todo add capi version of this
- nntrainer::AppContext::Global().setWorkingDirectory(data_path);
+ try {
+ nntrainer::AppContext::Global().setWorkingDirectory(data_path);
+ } catch (std::invalid_argument &e) {
+ std::cerr << "setting data_path failed, pwd is used instead";
+ }
srand(time(NULL));
return 0;
}
- NN.readModel();
- NN.setDataBuffer((DB));
-
try {
+ NN.readModel();
+ NN.setDataBuffer((DB));
NN.train();
training_loss = NN.getTrainingLoss();
validation_loss = NN.getValidationLoss();
if (!alloc_train) {
duplicate = (bool *)malloc(sizeof(bool) * data_size);
+ if (duplicate == nullptr) {
+ ml_loge("[test_util] allocationg memory failed");
+ alloc_train = false;
+ *last = false;
+ F.close();
+ return ML_ERROR_BAD_ADDRESS;
+ }
+
for (unsigned int i = 0; i < data_size; ++i) {
duplicate[i] = false;
}
if (!alloc_val) {
valduplicate = (bool *)malloc(sizeof(bool) * data_size);
+ if (valduplicate == nullptr) {
+ ml_loge("[test_util] allocationg memory failed");
+ alloc_val = false;
+ *last = false;
+ F.close();
+ return ML_ERROR_BAD_ADDRESS;
+ }
for (unsigned int i = 0; i < data_size; ++i) {
valduplicate[i] = false;
}