[tct] fix coverity issues
authorSeungbaek Hong <sb92.hong@samsung.com>
Fri, 21 Apr 2023 08:33:43 +0000 (17:33 +0900)
committerjijoong.moon <jijoong.moon@samsung.com>
Fri, 21 Apr 2023 13:51:20 +0000 (22:51 +0900)
Fix some coverity issues.

This pr is still work in process.

**Self evaluation:**
1. Build test:  [X]Passed [ ]Failed [ ]Skipped
2. Run test:  [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Seungbaek Hong <sb92.hong@samsung.com>
api/capi/src/nntrainer.cpp
nntrainer/compiler/tflite_interpreter.cpp
nntrainer/utils/profiler.cpp
test/tizen_capi/unittest_tizen_capi_lr_scheduler.cpp

index 2e90275..6915bf4 100644 (file)
@@ -785,17 +785,17 @@ int ml_train_optimizer_destroy(ml_train_optimizer_h optimizer) {
   {
     ML_TRAIN_GET_VALID_OPT_LOCKED_RESET(nnopt, optimizer);
     ML_TRAIN_ADOPT_LOCK(nnopt, optimizer_lock);
+  }
 
-    if (nnopt->in_use) {
-      ml_loge("Cannot delete optimizer already set to a model."
-              "Delete model will delete this optimizer.");
-      return ML_ERROR_INVALID_PARAMETER;
-    }
+  if (nnopt->in_use) {
+    ml_loge("Cannot delete optimizer already set to a model."
+            "Delete model will delete this optimizer.");
+    return ML_ERROR_INVALID_PARAMETER;
+  }
 
-    if (nnopt->lr_sheduler) {
-      ML_TRAIN_RESET_VALIDATED_HANDLE(nnopt->lr_sheduler);
-      delete nnopt->lr_sheduler;
-    }
+  if (nnopt->lr_sheduler) {
+    ML_TRAIN_RESET_VALIDATED_HANDLE(nnopt->lr_sheduler);
+    delete nnopt->lr_sheduler;
   }
 
   delete nnopt;
@@ -854,22 +854,26 @@ int ml_train_optimizer_set_lr_scheduler(ml_train_optimizer_h optimizer,
 
   check_feature_state();
 
-  ML_TRAIN_GET_VALID_OPT_LOCKED(nnopt, optimizer);
-  ML_TRAIN_ADOPT_LOCK(nnopt, opt_lock);
-  ML_TRAIN_GET_VALID_LR_SCHEDULER_LOCKED(nnlrscheduler, lr_scheduler);
-  ML_TRAIN_ADOPT_LOCK(nnlrscheduler, lr_scheduler_lock);
+  std::shared_ptr<ml::train::Optimizer> opt;
+  std::shared_ptr<ml::train::LearningRateScheduler> lr_sched;
+
+  {
+    ML_TRAIN_GET_VALID_OPT_LOCKED(nnopt, optimizer);
+    ML_TRAIN_ADOPT_LOCK(nnopt, opt_lock);
+    opt = nnopt->optimizer;
+  }
+
+  {
+    ML_TRAIN_GET_VALID_LR_SCHEDULER_LOCKED(nnlrscheduler, lr_scheduler);
+    ML_TRAIN_ADOPT_LOCK(nnlrscheduler, lr_scheduler_lock);
+    lr_sched = nnlrscheduler->lr_scheduler;
+  }
 
   if (nnlrscheduler->in_use) {
     ml_loge("learning rate scheduler already in use.");
     return ML_ERROR_INVALID_PARAMETER;
   }
 
-  std::shared_ptr<ml::train::Optimizer> opt;
-  std::shared_ptr<ml::train::LearningRateScheduler> lr_sched;
-
-  opt = nnopt->optimizer;
-  lr_sched = nnlrscheduler->lr_scheduler;
-
   returnable f = [&]() { return opt->setLearningRateScheduler(lr_sched); };
 
   status = nntrainer_exception_boundary(f);
index 67872f6..be18a67 100644 (file)
@@ -360,7 +360,10 @@ TfOpNodes buildOpNodes(const GraphRepresentation &representation,
   /// set arity of TfOpNodes
   for (auto &n : nodes) {
     auto tf_node = n.get();
-    auto layer_node = tf_to_layer.find(tf_node)->second;
+    auto searched_layer = tf_to_layer.find(tf_node);
+    if (searched_layer == tf_to_layer.end())
+      throw std::runtime_error("Cannot find layer for TfOpNode");
+    auto layer_node = searched_layer->second;
     auto layer_node_inputs = layer_node->getInputConnections();
 
     /// assume that the TfOpNode and the LayerNode have a one-to-one
index aaa84d7..f5afb9d 100644 (file)
@@ -161,10 +161,8 @@ void GenericProfileListener::report(std::ostream &out) const {
       auto &sum_ = std::get<GenericProfileListener::SUM>(time.second);
 
       auto title = names.find(time.first);
-#ifdef DEBUG
       if (title == names.end())
         throw std::runtime_error("Couldn't find name. it's already removed.");
-#endif
 
       if (warmups >= cnt_) {
         out_ << std::left << std::setw(total_col_size) << title->second
index 4dfe75d..7d5f5f2 100644 (file)
@@ -70,7 +70,7 @@ TEST(nntrainer_capi_lr_scheduler, create_destruct_03_p) {
 TEST(nntrainer_capi_lr_scheduler, create_destruct_04_n) {
   ml_train_lr_scheduler_h handle = NULL;
   int status;
-  status = ml_train_lr_scheduler_destroy(&handle);
+  status = ml_train_lr_scheduler_destroy(handle);
   EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
 }