1 // SPDX-License-Identifier: Apache-2.0
3 * Copyright (C) 2021 Jijoong Moon <jijoong.moon@samsung.com>
7 * @see https://github.com/nnstreamer/nntrainer
8 * @author Jijoong Moon <jijoong.moon@samsung.com>
9 * @bug No known bugs except for NYI items
10 * @brief This is a simple recommendation system Example
12 * Trainig set (embedding_input.txt) : 4 colume data + result (1.0
13 * or 0.0) Configuration file : ../../res/Embedding.ini
25 #include <ml-api-common.h>
26 #include <neuralnet.h>
29 std::string data_file;
31 const unsigned int total_train_data_size = 25;
33 unsigned int train_count = 0;
35 const unsigned int batch_size = 20;
37 const unsigned int feature_size = 2;
39 const unsigned int total_val_data_size = 25;
41 bool training = false;
44 * @brief step function
45 * @param[in] x value to be distinguished
48 float stepFunction(float x) {
61 * @brief get idth Data
62 * @param[in] F file stream
63 * @param[out] outVec feature data
64 * @param[out] outLabel label data
66 * @retval boolean true if there is no error
68 bool getData(std::ifstream &F, std::vector<float> &outVec,
69 std::vector<float> &outLabel, unsigned int id) {
72 F.seekg(0, std::ios_base::beg);
75 while (F.get(c) && i < id)
81 if (!std::getline(F, temp)) {
85 std::istringstream buffer(temp);
87 for (unsigned int j = 0; j < feature_size; ++j) {
97 template <typename T> void loadFile(const char *filename, T &t) {
98 std::ifstream file(filename);
100 throw std::runtime_error("could not read, check filename");
107 * @brief get Data as much as batch size
108 * @param[out] outVec feature data
109 * @param[out] outLabel label data
110 * @param[out] last end of data
111 * @param[in] user_data user data
112 * @retval int 0 if there is no error
114 int getBatch_train(float **outVec, float **outLabel, bool *last,
116 std::ifstream dataFile(data_file);
117 unsigned int data_size = total_train_data_size;
118 unsigned int count = 0;
120 if (data_size - train_count < batch_size) {
126 std::vector<float> o;
127 std::vector<float> l;
128 o.resize(feature_size);
131 for (unsigned int i = train_count; i < train_count + batch_size; ++i) {
132 if (!getData(dataFile, o, l, i)) {
136 for (unsigned int j = 0; j < feature_size; ++j) {
137 outVec[0][count * feature_size + j] = o[j];
139 outLabel[0][count] = l[0];
146 train_count += batch_size;
152 * back propagation of NN
153 * @param[in] arg 1 : train / inference
154 * @param[in] arg 2 : configuration file path
155 * @param[in] arg 3 : resource path (data) with below format
156 * (int) (int) (float) #first data
158 * in each row represents user id, product id, rating (0 to 10)
160 int main(int argc, char *argv[]) {
162 std::cout << "./Embedding train (| inference) Config.ini data.txt\n";
166 const std::vector<std::string> args(argv + 1, argv + argc);
167 std::string config = args[1];
170 if (!args[0].compare("train"))
175 std::shared_ptr<ml::train::Dataset> dataset_train, dataset_val;
178 createDataset(ml::train::DatasetType::GENERATOR, getBatch_train);
180 createDataset(ml::train::DatasetType::GENERATOR, getBatch_train);
181 } catch (std::exception &e) {
182 std::cerr << "Error creating dataset" << e.what() << std::endl;
189 std::vector<std::vector<float>> inputVector, outputVector;
190 nntrainer::NeuralNetwork NN;
192 * @brief Initialize NN with configuration file path
196 auto status = NN.loadFromConfig(config);
197 if (status != ML_ERROR_NONE) {
198 std::cerr << "Error during loading" << std::endl;
202 status = NN.compile();
203 if (status != ML_ERROR_NONE) {
204 std::cerr << "Error during compile" << std::endl;
207 status = NN.initialize();
208 if (status != ML_ERROR_NONE) {
209 std::cerr << "Error during initialize" << std::endl;
214 std::cout << "Input dimension: " << NN.getInputDimension()[0];
216 } catch (std::exception &e) {
217 std::cerr << "Unexpected Error during init " << e.what() << std::endl;
222 NN.setDataset(ml::train::DatasetDataUsageType::DATA_TRAIN, dataset_train);
223 NN.setDataset(ml::train::DatasetDataUsageType::DATA_VAL, dataset_val);
225 NN.train({"batch_size=" + std::to_string(batch_size)});
226 } catch (std::exception &e) {
227 std::cerr << "Error during train " << e.what() << std::endl;
232 /****** testing with a golden data if any ********/
233 nntrainer::Tensor golden(1, 1, 15, 8);
235 loadFile("embedding_weight_golden.out", golden);
236 golden.print(std::cout);
238 nntrainer::Tensor weight_out_fc(1, 1, 32, 1);
239 loadFile("fc_weight_golden.out", weight_out_fc);
240 weight_out_fc.print(std::cout);
242 std::cerr << "Warning: during loading golden data\n";
247 } catch (std::exception &e) {
248 std::cerr << "Error during readModel: " << e.what() << "\n";
251 std::ifstream dataFile(data_file);
253 for (unsigned int j = 0; j < total_val_data_size; ++j) {
255 std::vector<float> o;
256 std::vector<float> l;
257 o.resize(feature_size);
260 getData(dataFile, o, l, j);
264 NN.inference({MAKE_SHARED_TENSOR(nntrainer::Tensor({o}))})[0]
265 ->apply(stepFunction)
266 .getValue(0, 0, 0, 0);
268 std::cout << answer << " : " << l[0] << std::endl;
269 cn += answer == l[0];
271 std::cerr << "Error during forwarding the model" << std::endl;
275 std::cout << "[ Accuracy ] : "
276 << ((float)(cn) / total_val_data_size) * 100.0 << "%"