From: Seungbaek Hong Date: Fri, 21 Apr 2023 08:33:43 +0000 (+0900) Subject: [tct] fix coverity issues X-Git-Tag: accepted/tizen/unified/20230425.130129~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1b57ca5ae373ddcc004ad4050cc1995dcf6dc8cb;p=platform%2Fcore%2Fml%2Fnntrainer.git [tct] fix coverity issues Fix some coverity issues. This pr is still work in process. **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Seungbaek Hong --- diff --git a/api/capi/src/nntrainer.cpp b/api/capi/src/nntrainer.cpp index 2e90275..6915bf4 100644 --- a/api/capi/src/nntrainer.cpp +++ b/api/capi/src/nntrainer.cpp @@ -785,17 +785,17 @@ int ml_train_optimizer_destroy(ml_train_optimizer_h optimizer) { { ML_TRAIN_GET_VALID_OPT_LOCKED_RESET(nnopt, optimizer); ML_TRAIN_ADOPT_LOCK(nnopt, optimizer_lock); + } - if (nnopt->in_use) { - ml_loge("Cannot delete optimizer already set to a model." - "Delete model will delete this optimizer."); - return ML_ERROR_INVALID_PARAMETER; - } + if (nnopt->in_use) { + ml_loge("Cannot delete optimizer already set to a model." + "Delete model will delete this optimizer."); + return ML_ERROR_INVALID_PARAMETER; + } - if (nnopt->lr_sheduler) { - ML_TRAIN_RESET_VALIDATED_HANDLE(nnopt->lr_sheduler); - delete nnopt->lr_sheduler; - } + if (nnopt->lr_sheduler) { + ML_TRAIN_RESET_VALIDATED_HANDLE(nnopt->lr_sheduler); + delete nnopt->lr_sheduler; } delete nnopt; @@ -854,22 +854,26 @@ int ml_train_optimizer_set_lr_scheduler(ml_train_optimizer_h optimizer, check_feature_state(); - ML_TRAIN_GET_VALID_OPT_LOCKED(nnopt, optimizer); - ML_TRAIN_ADOPT_LOCK(nnopt, opt_lock); - ML_TRAIN_GET_VALID_LR_SCHEDULER_LOCKED(nnlrscheduler, lr_scheduler); - ML_TRAIN_ADOPT_LOCK(nnlrscheduler, lr_scheduler_lock); + std::shared_ptr opt; + std::shared_ptr lr_sched; + + { + ML_TRAIN_GET_VALID_OPT_LOCKED(nnopt, optimizer); + ML_TRAIN_ADOPT_LOCK(nnopt, opt_lock); + opt = nnopt->optimizer; + } + + { + ML_TRAIN_GET_VALID_LR_SCHEDULER_LOCKED(nnlrscheduler, lr_scheduler); + ML_TRAIN_ADOPT_LOCK(nnlrscheduler, lr_scheduler_lock); + lr_sched = nnlrscheduler->lr_scheduler; + } if (nnlrscheduler->in_use) { ml_loge("learning rate scheduler already in use."); return ML_ERROR_INVALID_PARAMETER; } - std::shared_ptr opt; - std::shared_ptr lr_sched; - - opt = nnopt->optimizer; - lr_sched = nnlrscheduler->lr_scheduler; - returnable f = [&]() { return opt->setLearningRateScheduler(lr_sched); }; status = nntrainer_exception_boundary(f); diff --git a/nntrainer/compiler/tflite_interpreter.cpp b/nntrainer/compiler/tflite_interpreter.cpp index 67872f6..be18a67 100644 --- a/nntrainer/compiler/tflite_interpreter.cpp +++ b/nntrainer/compiler/tflite_interpreter.cpp @@ -360,7 +360,10 @@ TfOpNodes buildOpNodes(const GraphRepresentation &representation, /// set arity of TfOpNodes for (auto &n : nodes) { auto tf_node = n.get(); - auto layer_node = tf_to_layer.find(tf_node)->second; + auto searched_layer = tf_to_layer.find(tf_node); + if (searched_layer == tf_to_layer.end()) + throw std::runtime_error("Cannot find layer for TfOpNode"); + auto layer_node = searched_layer->second; auto layer_node_inputs = layer_node->getInputConnections(); /// assume that the TfOpNode and the LayerNode have a one-to-one diff --git a/nntrainer/utils/profiler.cpp b/nntrainer/utils/profiler.cpp index aaa84d7..f5afb9d 100644 --- a/nntrainer/utils/profiler.cpp +++ b/nntrainer/utils/profiler.cpp @@ -161,10 +161,8 @@ void GenericProfileListener::report(std::ostream &out) const { auto &sum_ = std::get(time.second); auto title = names.find(time.first); -#ifdef DEBUG if (title == names.end()) throw std::runtime_error("Couldn't find name. it's already removed."); -#endif if (warmups >= cnt_) { out_ << std::left << std::setw(total_col_size) << title->second diff --git a/test/tizen_capi/unittest_tizen_capi_lr_scheduler.cpp b/test/tizen_capi/unittest_tizen_capi_lr_scheduler.cpp index 4dfe75d..7d5f5f2 100644 --- a/test/tizen_capi/unittest_tizen_capi_lr_scheduler.cpp +++ b/test/tizen_capi/unittest_tizen_capi_lr_scheduler.cpp @@ -70,7 +70,7 @@ TEST(nntrainer_capi_lr_scheduler, create_destruct_03_p) { TEST(nntrainer_capi_lr_scheduler, create_destruct_04_n) { ml_train_lr_scheduler_h handle = NULL; int status; - status = ml_train_lr_scheduler_destroy(&handle); + status = ml_train_lr_scheduler_destroy(handle); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); }