[svace] Error handling for applications/test
authorJihoon Lee <jhoon.it.lee@samsung.com>
Wed, 16 Dec 2020 07:41:44 +0000 (16:41 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Thu, 17 Dec 2020 04:50:49 +0000 (13:50 +0900)
1. Fix inconsistent alloc/dealloc(new/free)
2. Add try catch to some statements
3. Fix memory leak from `asprintf`

**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Jihoon Lee <jhoon.it.lee@samsung.com>
Applications/Custom/LayerClient/jni/main.cpp
Applications/MNIST/jni/main.cpp
Applications/TransferLearning/CIFAR_Classification/jni/main_func.cpp
Applications/TransferLearning/Draw_Classification/jni/main.cpp
Applications/VGG/jni/main.cpp
test/nntrainer_test_util.cpp

index 8a32082..584446c 100644 (file)
@@ -127,14 +127,21 @@ int api_model_run() {
     return 1;
   }
 
-  /// creating array of layers same as in `custom_layer_client.ini`
-  std::vector<std::shared_ptr<ml::train::Layer>> layers{
-    ml::train::layer::Input({"name=inputlayer", "input_shape=1:1:100"}),
-    ml::train::createLayer(
-      "pow", {"name=powlayer", "exponent=3", "input_layers=inputlayer"}),
-    ml::train::layer::FullyConnected(
-      {"name=outputlayer", "input_layers=powlayer", "unit=10",
-       "bias_initializer=zeros", "activation=softmax"})};
+  std::vector<std::shared_ptr<ml::train::Layer>> layers;
+
+  try {
+    /// creating array of layers same as in `custom_layer_client.ini`
+    layers = std::vector<std::shared_ptr<ml::train::Layer>>{
+      ml::train::layer::Input({"name=inputlayer", "input_shape=1:1:100"}),
+      ml::train::createLayer(
+        "pow", {"name=powlayer", "exponent=3", "input_layers=inputlayer"}),
+      ml::train::layer::FullyConnected(
+        {"name=outputlayer", "input_layers=powlayer", "unit=10",
+         "bias_initializer=zeros", "activation=softmax"})};
+  } catch (nntrainer::exception::not_supported &e) {
+    std::cerr << "creating model failed";
+    return 1;
+  }
 
   for (auto &layer : layers) {
     model->addLayer(layer);
index 49e331e..e7b0fba 100644 (file)
@@ -271,34 +271,40 @@ int main(int argc, char *argv[]) {
   /**
    * @brief     Data buffer Create & Initialization
    */
-  std::shared_ptr<ml::train::Dataset> dataset =
-    createDataset(ml::train::DatasetType::GENERATOR);
-  dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_TRAIN,
-                            getBatch_train);
-  dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_VAL, getBatch_val);
+  std::shared_ptr<ml::train::Dataset> dataset;
+  try {
+    dataset = createDataset(ml::train::DatasetType::GENERATOR);
+    dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_TRAIN,
+                              getBatch_train);
+    dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_VAL,
+                              getBatch_val);
+  } catch (...) {
+    std::cerr << "Error creating dataset";
+    return 1;
+  }
 
-  /**
-   * @brief     Neural Network Create & Initialization
-   */
-  std::unique_ptr<ml::train::Model> model =
-    createModel(ml::train::ModelType::NEURAL_NET);
+  std::unique_ptr<ml::train::Model> model;
   try {
+    /**
+     * @brief     Neural Network Create & Initialization
+     */
+    model = createModel(ml::train::ModelType::NEURAL_NET);
     model->loadFromConfig(config);
   } catch (...) {
     std::cerr << "Error during loadFromConfig" << std::endl;
-    return 0;
+    return 1;
   }
 
   try {
     model->compile();
     model->initialize();
+    model->readModel();
+    model->setDataset(dataset);
   } catch (...) {
     std::cerr << "Error during init" << std::endl;
-    return 0;
+    return 1;
   }
 
-  model->readModel();
-  model->setDataset(dataset);
 #if defined(APP_VALIDATE)
   status = model->setProperty({"epochs=5"});
   if (status != ML_ERROR_NONE) {
index 0c54560..628b6f8 100644 (file)
@@ -123,7 +123,7 @@ void getImage(const string filename, float *image) {
     image[i] = ((float)in[i]) / 255.0;
   }
 
-  free(in);
+  delete[] in;
 }
 
 /**
@@ -271,13 +271,12 @@ int main(int argc, char *argv[]) {
                             getBatch_train);
   dataset->setGeneratorFunc(ml::train::DatasetDataType::DATA_VAL, getBatch_val);
 
+  std::unique_ptr<ml::train::Model> model;
   /**
    * @brief     Neural Network Create & Initialization
    */
-  std::unique_ptr<ml::train::Model> model =
-    createModel(ml::train::ModelType::NEURAL_NET);
-
   try {
+    model = createModel(ml::train::ModelType::NEURAL_NET);
     model->loadFromConfig(config);
   } catch (...) {
     std::cerr << "Error during loadFromConfig" << std::endl;
index 5931f29..a87a48e 100644 (file)
@@ -111,7 +111,7 @@ void getInputFeature(const std::string &filename, float *feature_input) {
   }
 
   if (INPUT_SIZE != input_img_size) {
-    delete in;
+    delete[] in;
     throw std::runtime_error("Input size does not match the required size");
   }
 
@@ -331,6 +331,7 @@ int testModel(const char *data_path, const char *model) {
 
     float featureVector[INPUT_SIZE];
     status = getInputFeature_c(test_file_path, featureVector);
+    free(test_file_path);
     if (status != ML_ERROR_NONE)
       goto fail_info_release;
 
@@ -422,7 +423,11 @@ int main(int argc, char *argv[]) {
   std::string data_path = args[1];
 
   /// @todo add capi version of this
-  nntrainer::AppContext::Global().setWorkingDirectory(data_path);
+  try {
+    nntrainer::AppContext::Global().setWorkingDirectory(data_path);
+  } catch (std::invalid_argument &e) {
+    std::cerr << "setting data_path failed, pwd is used instead";
+  }
 
   srand(time(NULL));
 
index ca576e4..372524b 100644 (file)
@@ -423,10 +423,9 @@ int main(int argc, char *argv[]) {
     return 0;
   }
 
-  NN.readModel();
-  NN.setDataBuffer((DB));
-
   try {
+    NN.readModel();
+    NN.setDataBuffer((DB));
     NN.train();
     training_loss = NN.getTrainingLoss();
     validation_loss = NN.getValidationLoss();
index c35d223..8321962 100644 (file)
@@ -136,6 +136,14 @@ int getBatch_train(float **outVec, float **outLabel, bool *last,
 
   if (!alloc_train) {
     duplicate = (bool *)malloc(sizeof(bool) * data_size);
+    if (duplicate == nullptr) {
+      ml_loge("[test_util] allocationg memory failed");
+      alloc_train = false;
+      *last = false;
+      F.close();
+      return ML_ERROR_BAD_ADDRESS;
+    }
+
     for (unsigned int i = 0; i < data_size; ++i) {
       duplicate[i] = false;
     }
@@ -213,6 +221,13 @@ int getBatch_val(float **outVec, float **outLabel, bool *last,
 
   if (!alloc_val) {
     valduplicate = (bool *)malloc(sizeof(bool) * data_size);
+    if (valduplicate == nullptr) {
+      ml_loge("[test_util] allocationg memory failed");
+      alloc_val = false;
+      *last = false;
+      F.close();
+      return ML_ERROR_BAD_ADDRESS;
+    }
     for (unsigned int i = 0; i < data_size; ++i) {
       valduplicate[i] = false;
     }