update exception handling for applications..
Signed-off-by: Seungbaek Hong <sb92.hong@samsung.com>
}
#endif
int main(int argc, char *argv[]) {
- // Setting locale
- std::locale::global(std::locale("ko_KR.UTF-8"));
+ try {
+ // Setting locale
+ std::locale::global(std::locale("ko_KR.UTF-8"));
#if defined(ENABLE_ENCODER)
- // Getting arguments From terminal
- std::wstring input;
- std::getline(std::wcin, input);
- std::wstring test = decodeUnicodeEscape(input);
- std::wstring_convert<std::codecvt_utf16<wchar_t>> converter;
- std::string text = converter.to_bytes(test);
+ // Getting arguments From terminal
+ std::wstring input;
+ std::getline(std::wcin, input);
+ std::wstring test = decodeUnicodeEscape(input);
+ std::wstring_convert<std::codecvt_utf16<wchar_t>> converter;
+ std::string text = converter.to_bytes(test);
#else
- std::string text = "This is smaple input for LLaMA.";
+ std::string text = "This is sample input for LLaMA.";
#endif
- auto &app_context = nntrainer::AppContext::Global();
- try {
- app_context.registerFactory(nntrainer::createLayer<custom::SwiGLULayer>);
- } catch (std::invalid_argument &e) {
- std::cerr << "failed to register factory, reason: " << e.what()
- << std::endl;
- return 1;
- }
+ auto &app_context = nntrainer::AppContext::Global();
+ try {
+ app_context.registerFactory(nntrainer::createLayer<custom::SwiGLULayer>);
+ } catch (std::exception &e) {
+ std::cerr << "failed to register factory, reason: " << e.what()
+ << std::endl;
+ return 1;
+ }
- try {
- app_context.registerFactory(nntrainer::createLayer<custom::RMSNormLayer>);
- } catch (std::invalid_argument &e) {
- std::cerr << "failed to register factory, reason: " << e.what()
- << std::endl;
- return 1;
- }
+ try {
+ app_context.registerFactory(nntrainer::createLayer<custom::RMSNormLayer>);
+ } catch (std::exception &e) {
+ std::cerr << "failed to register factory, reason: " << e.what()
+ << std::endl;
+ return 1;
+ }
- try {
- const std::vector<std::string> args(argv + 1, argv + argc);
+ try {
+ const std::vector<std::string> args(argv + 1, argv + argc);
- bool apply_temp = (strcasecmp("true", args[1].c_str()) == 0);
+ bool apply_temp = (strcasecmp("true", args[1].c_str()) == 0);
- createAndRun(epoch, batch_size);
+ createAndRun(epoch, batch_size);
- run(text, apply_temp);
+ run(text, apply_temp);
+ } catch (const std::exception &e) {
+ std::cerr << "uncaught error while running! details: " << e.what()
+ << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ int status = EXIT_SUCCESS;
+ return status;
} catch (const std::exception &e) {
- std::cerr << "uncaught error while running! details: " << e.what()
- << std::endl;
- return EXIT_FAILURE;
+ std::cerr << "uncaught error while running! details: " << e.what() << "\n";
+ return 1;
}
-
- int status = EXIT_SUCCESS;
- return status;
}
* in each row represents user id, product id, rating (0 to 10)
*/
int main(int argc, char *argv[]) {
- if (argc < 4) {
- std::cout << "./Embedding train (| inference) Config.ini data.txt\n";
- exit(1);
- }
-
- std::string weight_path = "product_ratings_model.bin";
-
- const std::vector<std::string> args(argv + 1, argv + argc);
- std::string config = args[1];
- data_file = args[2];
-
- if (!args[0].compare("train"))
- training = true;
-
- train_idxes.resize(total_train_data_size);
- std::iota(train_idxes.begin(), train_idxes.end(), 0);
- rng.seed(SEED);
-
- std::shared_ptr<ml::train::Dataset> dataset_train, dataset_val;
- try {
- dataset_train =
- createDataset(ml::train::DatasetType::GENERATOR, getSample_train);
- dataset_val =
- createDataset(ml::train::DatasetType::GENERATOR, getSample_train);
- } catch (std::exception &e) {
- std::cerr << "Error creating dataset " << e.what() << std::endl;
- return 1;
- }
-
- /**
- * @brief Create NN
- */
- nntrainer::NeuralNetwork NN;
- /**
- * @brief Initialize NN with configuration file path
- */
-
try {
- auto status = NN.loadFromConfig(config);
- if (status != 0) {
- std::cerr << "Error during loading" << std::endl;
- return 1;
+ if (argc < 4) {
+ std::cout << "./Embedding train (| inference) Config.ini data.txt\n";
+ exit(1);
}
- status = NN.compile();
- if (status != 0) {
- std::cerr << "Error during compile" << std::endl;
- return 1;
- }
- status = NN.initialize();
- if (status != 0) {
- std::cerr << "Error during initialize" << std::endl;
- return 1;
- }
+ std::string weight_path = "product_ratings_model.bin";
- std::cout << "Input dimension: " << NN.getInputDimension()[0];
+ const std::vector<std::string> args(argv + 1, argv + argc);
+ std::string config = args[1];
+ data_file = args[2];
- } catch (std::exception &e) {
- std::cerr << "Unexpected Error during init " << e.what() << std::endl;
- return 1;
- }
+ if (!args[0].compare("train"))
+ training = true;
+
+ train_idxes.resize(total_train_data_size);
+ std::iota(train_idxes.begin(), train_idxes.end(), 0);
+ rng.seed(SEED);
- if (training) {
- NN.setDataset(ml::train::DatasetModeType::MODE_TRAIN, dataset_train);
- NN.setDataset(ml::train::DatasetModeType::MODE_VALID, dataset_val);
+ std::shared_ptr<ml::train::Dataset> dataset_train, dataset_val;
try {
- NN.train({"batch_size=" + std::to_string(batch_size)});
+ dataset_train =
+ createDataset(ml::train::DatasetType::GENERATOR, getSample_train);
+ dataset_val =
+ createDataset(ml::train::DatasetType::GENERATOR, getSample_train);
} catch (std::exception &e) {
- std::cerr << "Error during train " << e.what() << std::endl;
+ std::cerr << "Error creating dataset " << e.what() << std::endl;
return 1;
}
+ /**
+ * @brief Create NN
+ */
+ nntrainer::NeuralNetwork NN;
+ /**
+ * @brief Initialize NN with configuration file path
+ */
+
try {
- /****** testing with a golden data if any ********/
- nntrainer::Tensor golden(1, 1, 15, 8);
+ auto status = NN.loadFromConfig(config);
+ if (status != 0) {
+ std::cerr << "Error during loading" << std::endl;
+ return 1;
+ }
+
+ status = NN.compile();
+ if (status != 0) {
+ std::cerr << "Error during compile" << std::endl;
+ return 1;
+ }
+ status = NN.initialize();
+ if (status != 0) {
+ std::cerr << "Error during initialize" << std::endl;
+ return 1;
+ }
- loadFile("embedding_weight_golden.out", golden);
- golden.print(std::cout);
+ std::cout << "Input dimension: " << NN.getInputDimension()[0];
- nntrainer::Tensor weight_out_fc(1, 1, 32, 1);
- loadFile("fc_weight_golden.out", weight_out_fc);
- weight_out_fc.print(std::cout);
- } catch (...) {
- std::cerr << "Warning: during loading golden data\n";
- }
- } else {
- try {
- NN.load(weight_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
} catch (std::exception &e) {
- std::cerr << "Error during loading weights: " << e.what() << "\n";
+ std::cerr << "Unexpected Error during init " << e.what() << std::endl;
return 1;
}
- std::ifstream dataFile(data_file);
- int cn = 0;
- for (unsigned int j = 0; j < total_val_data_size; ++j) {
- nntrainer::Tensor d;
- std::vector<float> o;
- std::vector<float> l;
- o.resize(feature_size);
- l.resize(1);
- getData(dataFile, o.data(), l.data(), j);
+ if (training) {
+ NN.setDataset(ml::train::DatasetModeType::MODE_TRAIN, dataset_train);
+ NN.setDataset(ml::train::DatasetModeType::MODE_VALID, dataset_val);
+ try {
+ NN.train({"batch_size=" + std::to_string(batch_size)});
+ } catch (std::exception &e) {
+ std::cerr << "Error during train " << e.what() << std::endl;
+ return 1;
+ }
try {
- float answer =
- NN.inference({MAKE_SHARED_TENSOR(nntrainer::Tensor({o}, nntrainer::TensorDim::TensorType()))})[0]
- ->apply<float>(stepFunction)
- .getValue(0, 0, 0, 0);
+ /****** testing with a golden data if any ********/
+ nntrainer::Tensor golden(1, 1, 15, 8);
+
+ loadFile("embedding_weight_golden.out", golden);
+ golden.print(std::cout);
- std::cout << answer << " : " << l[0] << std::endl;
- cn += answer == l[0];
+ nntrainer::Tensor weight_out_fc(1, 1, 32, 1);
+ loadFile("fc_weight_golden.out", weight_out_fc);
+ weight_out_fc.print(std::cout);
} catch (...) {
- std::cerr << "Error during forwarding the model" << std::endl;
+ std::cerr << "Warning: during loading golden data\n";
+ }
+ } else {
+ try {
+ NN.load(weight_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
+ } catch (std::exception &e) {
+ std::cerr << "Error during loading weights: " << e.what() << "\n";
return 1;
}
+ std::ifstream dataFile(data_file);
+ int cn = 0;
+ for (unsigned int j = 0; j < total_val_data_size; ++j) {
+ nntrainer::Tensor d;
+ std::vector<float> o;
+ std::vector<float> l;
+ o.resize(feature_size);
+ l.resize(1);
+
+ getData(dataFile, o.data(), l.data(), j);
+
+ try {
+ float answer = NN.inference({MAKE_SHARED_TENSOR(
+ nntrainer::Tensor({o}, nntrainer::TensorDim::TensorType()))})[0]
+ ->apply<float>(stepFunction)
+ .getValue(0, 0, 0, 0);
+
+ std::cout << answer << " : " << l[0] << std::endl;
+ cn += answer == l[0];
+ } catch (...) {
+ std::cerr << "Error during forwarding the model" << std::endl;
+ return 1;
+ }
+ }
+ std::cout << "[ Accuracy ] : "
+ << ((float)(cn) / total_val_data_size) * 100.0 << "%"
+ << std::endl;
}
- std::cout << "[ Accuracy ] : "
- << ((float)(cn) / total_val_data_size) * 100.0 << "%"
- << std::endl;
- }
- /**
- * @brief Finalize NN
- */
- return 0;
+ /**
+ * @brief Finalize NN
+ */
+ return 0;
+ } catch (const std::exception &e) {
+ std::cerr << "uncaught error while running! details: " << e.what() << "\n";
+ return 1;
+ }
}
* @param[in] arg 2 : resource path
*/
int main(int argc, char *argv[]) {
- if (argc < 3) {
- std::cout << "./nntrainer_classification Config.ini resources\n";
- exit(0);
- }
- const vector<string> args(argv + 1, argv + argc);
- std::string config = args[0];
- data_path = args[1] + '/';
-
- /// @todo add api version of this
try {
- nntrainer::AppContext::Global().setWorkingDirectory(data_path);
- } catch (std::invalid_argument &e) {
- std::cerr << "setting data_path failed, pwd is used instead";
- }
+ if (argc < 3) {
+ std::cout << "./nntrainer_classification Config.ini resources\n";
+ exit(0);
+ }
+ const vector<string> args(argv + 1, argv + argc);
+ std::string config = args[0];
+ data_path = args[1] + '/';
- seed = time(NULL);
- srand(seed);
+ /// @todo add api version of this
+ try {
+ nntrainer::AppContext::Global().setWorkingDirectory(data_path);
+ } catch (std::invalid_argument &e) {
+ std::cerr << "setting data_path failed, pwd is used instead";
+ }
- std::vector<std::vector<float>> inputVector, outputVector;
- std::vector<std::vector<float>> inputValVector, outputValVector;
- std::vector<std::vector<float>> inputTestVector, outputTestVector;
+ seed = time(NULL);
+ srand(seed);
+
+ std::vector<std::vector<float>> inputVector, outputVector;
+ std::vector<std::vector<float>> inputValVector, outputValVector;
+ std::vector<std::vector<float>> inputTestVector, outputTestVector;
+
+ if (!read(inputVector, outputVector, "training")) {
+ /**
+ * @brief Extract Feature
+ */
+ std::string filename = data_path + "trainingSet.dat";
+ std::ofstream f(filename, std::ios::out | std::ios::binary);
+ try {
+ ExtractFeatures(data_path, inputVector, outputVector, "training", f);
+ } catch (...) {
+ std::cerr << "Error during open file: " << filename << std::endl;
+ return 1;
+ }
+ f.close();
+ }
- if (!read(inputVector, outputVector, "training")) {
- /**
- * @brief Extract Feature
- */
- std::string filename = data_path + "trainingSet.dat";
- std::ofstream f(filename, std::ios::out | std::ios::binary);
- try {
- ExtractFeatures(data_path, inputVector, outputVector, "training", f);
- } catch (...) {
- std::cerr << "Error during open file: " << filename << std::endl;
- return 1;
+ if (!read(inputValVector, outputValVector, "val")) {
+ /**
+ * @brief Extract Feature
+ */
+ std::string filename = data_path + "valSet.dat";
+ std::ofstream f(filename, std::ios::out | std::ios::binary);
+ try {
+ ExtractFeatures(data_path, inputValVector, outputValVector, "val", f);
+ } catch (...) {
+ std::cerr << "Error during open file: " << filename << std::endl;
+ return 1;
+ }
+ f.close();
+ }
+
+ if (!read(inputTestVector, outputTestVector, "test")) {
+ /**
+ * @brief Extract Feature
+ */
+ std::string filename = data_path + "testSet.dat";
+ std::ofstream f(filename, std::ios::out | std::ios::binary);
+ try {
+ ExtractFeatures(data_path, inputTestVector, outputTestVector, "test",
+ f);
+ } catch (...) {
+ std::cerr << "Error during open file: " << filename << std::endl;
+ return 1;
+ }
+ f.close();
}
- f.close();
- }
- if (!read(inputValVector, outputValVector, "val")) {
/**
- * @brief Extract Feature
+ * @brief Neural Network Create & Initialization
*/
- std::string filename = data_path + "valSet.dat";
- std::ofstream f(filename, std::ios::out | std::ios::binary);
+ nntrainer::NeuralNetwork NN;
+ int status = ML_ERROR_NONE;
try {
- ExtractFeatures(data_path, inputValVector, outputValVector, "val", f);
+ NN.load(config, ml::train::ModelFormat::MODEL_FORMAT_INI);
+ // NN.load(weight_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
+
+ status = NN.compile();
+ if (status != ML_ERROR_NONE)
+ return status;
+
+ status = NN.initialize();
+ if (status != ML_ERROR_NONE)
+ return status;
} catch (...) {
- std::cerr << "Error during open file: " << filename << std::endl;
+ std::cerr << "Error during init" << std::endl;
return 1;
}
- f.close();
- }
- if (!read(inputTestVector, outputTestVector, "test")) {
- /**
- * @brief Extract Feature
- */
- std::string filename = data_path + "testSet.dat";
- std::ofstream f(filename, std::ios::out | std::ios::binary);
try {
- ExtractFeatures(data_path, inputTestVector, outputTestVector, "test", f);
+ NN.train();
} catch (...) {
- std::cerr << "Error during open file: " << filename << std::endl;
+ std::cerr << "Error during train" << std::endl;
return 1;
}
- f.close();
- }
- /**
- * @brief Neural Network Create & Initialization
- */
- nntrainer::NeuralNetwork NN;
- int status = ML_ERROR_NONE;
- try {
- NN.load(config, ml::train::ModelFormat::MODEL_FORMAT_INI);
- // NN.load(weight_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
-
- status = NN.compile();
- if (status != ML_ERROR_NONE)
- return status;
-
- status = NN.initialize();
- if (status != ML_ERROR_NONE)
- return status;
- } catch (...) {
- std::cerr << "Error during init" << std::endl;
- return 1;
- }
+ if (!TRAINING) {
+ std::string img = data_path;
+ std::vector<float> featureVector, resultVector;
+ featureVector.resize(feature_size);
+ getFeature(img, featureVector);
+
+ nntrainer::Tensor X;
+ try {
+ X = nntrainer::Tensor({featureVector}, {nntrainer::Tformat::NCHW,
+ nntrainer::Tdatatype::FP32});
+ NN.forwarding({MAKE_SHARED_TENSOR(X)})[0]->apply<float>(stepFunction);
+ } catch (...) {
+ std::cerr << "Error while forwarding the model" << std::endl;
+ return 1;
+ }
+ }
- try {
- NN.train();
- } catch (...) {
- std::cerr << "Error during train" << std::endl;
+ return 0;
+ } catch (const std::exception &e) {
+ std::cerr << "uncaught error while running! details: " << e.what() << "\n";
return 1;
}
-
- if (!TRAINING) {
- std::string img = data_path;
- std::vector<float> featureVector, resultVector;
- featureVector.resize(feature_size);
- getFeature(img, featureVector);
-
- nntrainer::Tensor X;
- try {
- X = nntrainer::Tensor({featureVector}, {nntrainer::Tformat::NCHW,
- nntrainer::Tdatatype::FP32});
- NN.forwarding({MAKE_SHARED_TENSOR(X)})[0]->apply<float>(stepFunction);
- } catch (...) {
- std::cerr << "Error while forwarding the model" << std::endl;
- return 1;
- }
- }
-
- return 0;
}
* @param[in] arg 2 : resource path
*/
int main(int argc, char *argv[]) {
- if (argc < 3) {
- std::cout << "./nntrainer_classification Config.ini resources\n";
- exit(0);
- }
- const vector<string> args(argv + 1, argv + argc);
- std::string config = args[0];
- data_path = args[1] + "/";
-
- /// @todo add api version of this
try {
- nntrainer::AppContext::Global().setWorkingDirectory(data_path);
- } catch (std::invalid_argument &e) {
- std::cerr << "setting data_path failed, pwd is used instead";
- }
-
- srand(SEED);
- std::vector<std::vector<float>> inputVector, outputVector;
- std::vector<std::vector<float>> inputValVector, outputValVector;
- std::vector<std::vector<float>> inputTestVector, outputTestVector;
-
- /* This is to check duplication of data */
- memset(duplicate, 0, sizeof(bool) * total_label_size * total_train_data_size);
- memset(valduplicate, 0,
- sizeof(bool) * total_label_size * total_val_data_size);
+ if (argc < 3) {
+ std::cout << "./nntrainer_classification Config.ini resources\n";
+ exit(0);
+ }
+ const vector<string> args(argv + 1, argv + argc);
+ std::string config = args[0];
+ data_path = args[1] + "/";
+
+ /// @todo add api version of this
+ try {
+ nntrainer::AppContext::Global().setWorkingDirectory(data_path);
+ } catch (std::exception &e) {
+ std::cerr << "setting data_path failed, pwd is used instead. details: "
+ << e.what() << "\n";
+ return 1;
+ }
- /**
- * @brief Data buffer Create & Initialization
- */
- std::shared_ptr<ml::train::Dataset> dataset_train, dataset_val;
- try {
- dataset_train =
- createDataset(ml::train::DatasetType::GENERATOR, getBatch_train);
- dataset_val =
- createDataset(ml::train::DatasetType::GENERATOR, getBatch_val);
- } catch (...) {
- std::cerr << "Error creating dataset" << std::endl;
- return 1;
- }
+ srand(SEED);
+ std::vector<std::vector<float>> inputVector, outputVector;
+ std::vector<std::vector<float>> inputValVector, outputValVector;
+ std::vector<std::vector<float>> inputTestVector, outputTestVector;
+
+ /* This is to check duplication of data */
+ memset(duplicate, 0,
+ sizeof(bool) * total_label_size * total_train_data_size);
+ memset(valduplicate, 0,
+ sizeof(bool) * total_label_size * total_val_data_size);
+
+ /**
+ * @brief Data buffer Create & Initialization
+ */
+ std::shared_ptr<ml::train::Dataset> dataset_train, dataset_val;
+ try {
+ dataset_train =
+ createDataset(ml::train::DatasetType::GENERATOR, getBatch_train);
+ dataset_val =
+ createDataset(ml::train::DatasetType::GENERATOR, getBatch_val);
+ } catch (...) {
+ std::cerr << "Error creating dataset" << std::endl;
+ return 1;
+ }
- std::unique_ptr<ml::train::Model> model;
- /**
- * @brief Neural Network Create & Initialization
- */
- try {
- model = createModel(ml::train::ModelType::NEURAL_NET);
- model->load(config, ml::train::ModelFormat::MODEL_FORMAT_INI_WITH_BIN);
- } catch (...) {
- std::cerr << "Error during loadFromConfig" << std::endl;
- return 1;
- }
- try {
- model->compile();
- model->initialize();
- } catch (...) {
- std::cerr << "Error during init" << std::endl;
- return 1;
- }
- model->setDataset(ml::train::DatasetModeType::MODE_TRAIN, dataset_train);
- model->setDataset(ml::train::DatasetModeType::MODE_VALID, dataset_val);
+ std::unique_ptr<ml::train::Model> model;
+ /**
+ * @brief Neural Network Create & Initialization
+ */
+ try {
+ model = createModel(ml::train::ModelType::NEURAL_NET);
+ model->load(config, ml::train::ModelFormat::MODEL_FORMAT_INI_WITH_BIN);
+ } catch (...) {
+ std::cerr << "Error during loadFromConfig" << std::endl;
+ return 1;
+ }
+ try {
+ model->compile();
+ model->initialize();
+ } catch (...) {
+ std::cerr << "Error during init" << std::endl;
+ return 1;
+ }
+ model->setDataset(ml::train::DatasetModeType::MODE_TRAIN, dataset_train);
+ model->setDataset(ml::train::DatasetModeType::MODE_VALID, dataset_val);
+
+ /**
+ * @brief Neural Network Train & validation
+ */
+ try {
+ model->train();
+ } catch (...) {
+ std::cerr << "Error during train" << std::endl;
+ return 1;
+ }
- /**
- * @brief Neural Network Train & validation
- */
- try {
- model->train();
- } catch (...) {
- std::cerr << "Error during train" << std::endl;
+ /**
+ * @brief Finalize NN
+ */
+ return 0;
+ } catch (const std::exception &e) {
+ std::cerr << "uncaught error while running! details: " << e.what() << "\n";
return 1;
}
-
- /**
- * @brief Finalize NN
- */
- return 0;
}
}
int main(int argc, char *argv[]) {
- // print start time
- auto start = std::chrono::system_clock::now();
- std::time_t start_time = std::chrono::system_clock::to_time_t(start);
- std::cout << "started computation at " << std::ctime(&start_time)
- << std::endl;
-
- // set training config and print it
- std::cout << "batch_size: " << BATCH_SIZE << " epochs: " << EPOCHS
- << std::endl;
-
try {
- auto &app_context = nntrainer::AppContext::Global();
- app_context.registerFactory(nntrainer::createLayer<custom::UpsampleLayer>);
- } catch (std::invalid_argument &e) {
- std::cerr << "failed to register factory, reason: " << e.what()
+ // print start time
+ auto start = std::chrono::system_clock::now();
+ std::time_t start_time = std::chrono::system_clock::to_time_t(start);
+ std::cout << "started computation at " << std::ctime(&start_time)
<< std::endl;
- return 1;
- }
- try {
- auto &app_context = nntrainer::AppContext::Global();
- app_context.registerFactory(
- nntrainer::createLayer<custom::YoloV3LossLayer>);
- } catch (std::invalid_argument &e) {
- std::cerr << "failed to register yolov3 loss, reason: " << e.what()
+ // set training config and print it
+ std::cout << "batch_size: " << BATCH_SIZE << " epochs: " << EPOCHS
<< std::endl;
- return 1;
- }
-
- try {
- // create YOLOv3 model
- ModelHandle model = YOLOv3();
- model->setProperty({withKey("batch_size", BATCH_SIZE),
- withKey("epochs", EPOCHS),
- withKey("save_path", "darknet53.bin")});
- // create optimizer
- auto optimizer = ml::train::createOptimizer(
- "adam", {"learning_rate=0.000001", "epsilon=1e-8", "torch_ref=true"});
- model->setOptimizer(std::move(optimizer));
-
- // compile and initialize model
- model->compile();
- model->initialize();
-
- model->summarize(std::cout,
- ml_train_summary_type_e::ML_TRAIN_SUMMARY_MODEL);
-
- // create train and validation data
- std::array<UserDataType, 1> user_datas;
- user_datas = createDetDataGenerator(TRAIN_DIR_PATH, MAX_OBJECT_NUMBER, 3,
- IMAGE_HEIGHT_SIZE, IMAGE_WIDTH_SIZE);
- auto &[train_user_data] = user_datas;
-
- auto dataset_train = ml::train::createDataset(
- ml::train::DatasetType::GENERATOR, trainData_cb, train_user_data.get());
-
- model->setDataset(ml::train::DatasetModeType::MODE_TRAIN,
- std::move(dataset_train));
-
- model->train();
+ try {
+ auto &app_context = nntrainer::AppContext::Global();
+ app_context.registerFactory(
+ nntrainer::createLayer<custom::UpsampleLayer>);
+ } catch (std::exception &e) {
+ std::cerr << "failed to register factory, reason: " << e.what()
+ << std::endl;
+ return 1;
+ }
+
+ try {
+ auto &app_context = nntrainer::AppContext::Global();
+ app_context.registerFactory(
+ nntrainer::createLayer<custom::YoloV3LossLayer>);
+ } catch (std::exception &e) {
+ std::cerr << "failed to register yolov3 loss, reason: " << e.what()
+ << std::endl;
+ return 1;
+ }
+
+ try {
+ // create YOLOv3 model
+ ModelHandle model = YOLOv3();
+ model->setProperty({withKey("batch_size", BATCH_SIZE),
+ withKey("epochs", EPOCHS),
+ withKey("save_path", "darknet53.bin")});
+
+ // create optimizer
+ auto optimizer = ml::train::createOptimizer(
+ "adam", {"learning_rate=0.000001", "epsilon=1e-8", "torch_ref=true"});
+ model->setOptimizer(std::move(optimizer));
+
+ // compile and initialize model
+ model->compile();
+ model->initialize();
+
+ model->summarize(std::cout,
+ ml_train_summary_type_e::ML_TRAIN_SUMMARY_MODEL);
+
+ // create train and validation data
+ std::array<UserDataType, 1> user_datas;
+ user_datas = createDetDataGenerator(TRAIN_DIR_PATH, MAX_OBJECT_NUMBER, 3,
+ IMAGE_HEIGHT_SIZE, IMAGE_WIDTH_SIZE);
+ auto &[train_user_data] = user_datas;
+
+ auto dataset_train = ml::train::createDataset(
+ ml::train::DatasetType::GENERATOR, trainData_cb, train_user_data.get());
+
+ model->setDataset(ml::train::DatasetModeType::MODE_TRAIN,
+ std::move(dataset_train));
+
+ model->train();
+ } catch (const std::exception &e) {
+ std::cerr << "uncaught error while running! details: " << e.what()
+ << std::endl;
+ return EXIT_FAILURE;
+ }
+
+ // print end time and duration
+ auto end = std::chrono::system_clock::now();
+ std::chrono::duration<double> elapsed_seconds = end - start;
+ std::time_t end_time = std::chrono::system_clock::to_time_t(end);
+ std::cout << "finished computation at " << std::ctime(&end_time)
+ << "elapsed time: " << elapsed_seconds.count() << "s\n";
} catch (const std::exception &e) {
- std::cerr << "uncaught error while running! details: " << e.what()
- << std::endl;
- return EXIT_FAILURE;
+ std::cerr << "uncaught error while running! details: " << e.what() << "\n";
+ return 1;
}
-
- // print end time and duration
- auto end = std::chrono::system_clock::now();
- std::chrono::duration<double> elapsed_seconds = end - start;
- std::time_t end_time = std::chrono::system_clock::to_time_t(end);
- std::cout << "finished computation at " << std::ctime(&end_time)
- << "elapsed time: " << elapsed_seconds.count() << "s\n";
}
return [sz, input_dims, this](unsigned int idx, std::vector<Tensor> &inputs,
std::vector<Tensor> &labels) {
- NNTR_THROW_IF(idx >= sz, std::range_error)
+ NNTR_THROW_IF((idx < 0) || (idx >= sz), std::range_error)
<< "given index is out of bound, index: " << idx << " size: " << sz;
std::string file_name = data_list[idx].second;
#include <chrono>
#include <exception>
#include <future>
+#include <iostream>
#include <memory>
#include <stdexcept>
#include <tuple>
TaskExecutor::~TaskExecutor() {
run_thread = false;
-
- task_thread.join();
+ try {
+ task_thread.join();
+ } catch (const std::exception &e) {
+ std::cerr << "An unexpected exception occurred in destructor of "
+ "TaskExecutor. details: "
+ << e.what() << "\n";
+ }
}
int TaskExecutor::run(std::shared_ptr<Task> task) {