This patch fixes inconcistent batch size between source file and model
configuration.
Also found a bug #1238, which will be dealt seperately.
**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: Jihoon Lee <jhoon.it.lee@samsung.com>
unsigned int train_count = 0;
-const unsigned int batch_size = 10;
+const unsigned int batch_size = 20;
const unsigned int feature_size = 2;
return -1;
}
- for (unsigned int j = 0; j < feature_size; ++j)
+ for (unsigned int j = 0; j < feature_size; ++j) {
outVec[0][count * feature_size + j] = o[j];
+ }
outLabel[0][count] = l[0];
count++;
if (training) {
NN.setDataset(dataset);
try {
- NN.train();
- } catch (...) {
- std::cerr << "Error during train" << std::endl;
+ NN.train({"batch_size=" + std::to_string(batch_size)});
+ } catch (std::exception &e) {
+ std::cerr << "Error during train " << e.what() << std::endl;
return 1;
}