From 709c9b5eb2a6eaf35a59757180c95496c0680b83 Mon Sep 17 00:00:00 2001 From: Parichay Kapoor Date: Tue, 21 Jul 2020 15:46:12 +0900 Subject: [PATCH] [neuralNet] Moving batch size property to compile time Batch size property should be set before compiling for now Initializing of layers allocates memory for the layers Setting batch size property with training call requires layers to be re-initialized which isnt supported yet Signed-off-by: Parichay Kapoor --- nntrainer/src/neuralnet.cpp | 25 ++++++++++++------------- test/tizen_capi/unittest_tizen_capi.cpp | 5 ++--- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/nntrainer/src/neuralnet.cpp b/nntrainer/src/neuralnet.cpp index e852a95..e4cdf3a 100644 --- a/nntrainer/src/neuralnet.cpp +++ b/nntrainer/src/neuralnet.cpp @@ -392,6 +392,18 @@ int NeuralNetwork::setProperty(std::vector values) { unsigned int type = parseNetProperty(key); switch (static_cast(type)) { + case PropertyType::batch_size: { + status = setInt(batch_size, value); + NN_RETURN_STATUS(); + for (unsigned int i = 0; i < layers.size(); ++i) { + if (layers[i]->getTensorDim().batch() != + static_cast(batch_size)) { + ml_logw("Warning: Batch Size is changing!! : %d -> %d", + layers[i]->getTensorDim().batch(), batch_size); + layers[i]->getTensorDim().batch(batch_size); + } + } + } break; case PropertyType::cost: case PropertyType::loss: { cost = (CostType)parseType(value, TOKEN_COST); @@ -417,20 +429,7 @@ int NeuralNetwork::setTrainConfig(std::vector values) { unsigned int type = parseNetProperty(key); - /** TODO: disable this batch size */ switch (static_cast(type)) { - case PropertyType::batch_size: { - status = setInt(batch_size, value); - NN_RETURN_STATUS(); - for (unsigned int i = 0; i < layers.size(); ++i) { - if (layers[i]->getTensorDim().batch() != - static_cast(batch_size)) { - ml_logw("Warning: Batch Size is changing!! : %d -> %d", - layers[i]->getTensorDim().batch(), batch_size); - layers[i]->getTensorDim().batch(batch_size); - } - } - } break; case PropertyType::epochs: { int e; status = setInt(e, value); diff --git a/test/tizen_capi/unittest_tizen_capi.cpp b/test/tizen_capi/unittest_tizen_capi.cpp index b3740ba..f83212b 100644 --- a/test/tizen_capi/unittest_tizen_capi.cpp +++ b/test/tizen_capi/unittest_tizen_capi.cpp @@ -658,11 +658,10 @@ TEST(nntrainer_capi_nnmodel, train_with_file_01_p) { status = ml_train_model_set_dataset(model, dataset); EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_train_model_compile(model, "loss=cross", NULL); + status = ml_train_model_compile(model, "loss=cross", "batch_size=16", NULL); EXPECT_EQ(status, ML_ERROR_NONE); - status = ml_train_model_run(model, "epochs=2", "batch_size=16", - "model_file=model.bin", NULL); + status = ml_train_model_run(model, "epochs=2", "model_file=model.bin", NULL); EXPECT_EQ(status, ML_ERROR_NONE); status = ml_train_model_destroy(model); -- 2.7.4