[AHub] Fix AHub Defect
authorDongHak Park <donghak.park@samsung.com>
Thu, 6 Apr 2023 07:06:39 +0000 (16:06 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Mon, 10 Apr 2023 04:49:42 +0000 (13:49 +0900)
Fix AHub Defect
- make some exception statement
- change auto element -> auto &element

Signed-off-by: DongHak Park <donghak.park@samsung.com>
Applications/Resnet/jni/main.cpp
nntrainer/compiler/tflite_interpreter.cpp
nntrainer/models/neuralnet.cpp
nntrainer/tensor/lazy_tensor.cpp
nntrainer/utils/profiler.cpp

index 47660f9..9282c67 100644 (file)
@@ -207,7 +207,7 @@ ModelHandle createResnet18() {
                                              {withKey("loss", "cross")});
 #endif
 
-  for (auto layer : createResnet18Graph()) {
+  for (auto &layer : createResnet18Graph()) {
     model->addLayer(layer);
   }
 
index 8e53e20..67872f6 100644 (file)
@@ -373,9 +373,12 @@ TfOpNodes buildOpNodes(const GraphRepresentation &representation,
         [&input_layer_name](std::shared_ptr<nntrainer::LayerNode> node) {
           return istrequal(node.get()->getName(), input_layer_name);
         });
+
       if (input_later_node_iterator != representation.end()) {
         auto input_layer_node = input_later_node_iterator->get();
-        tf_node->setArg(index, layer_to_tf.find(input_layer_node)->second);
+        if (layer_to_tf.find(input_layer_node) != layer_to_tf.end()) {
+          tf_node->setArg(index, layer_to_tf.find(input_layer_node)->second);
+        }
       }
     }
   }
index 5334d5a..09d2ed8 100644 (file)
@@ -335,7 +335,8 @@ void NeuralNetwork::backwarding(int iteration,
         node->calcGradient();
 
       /**
-       * If optimization off, or gradient must be applied, then this will be true
+       * If optimization off, or gradient must be applied, then this will be
+       * true
        * @todo This apply gradient should be passed to the each weight and later
        * be queried when updating gradient at once. (after moving apply_gradient
        * out of this function)
@@ -343,8 +344,8 @@ void NeuralNetwork::backwarding(int iteration,
        */
       // auto &layer = node->getObject();
       // apply_gradient = dynamic_training_opt.checkIfApply(
-      //   layer->getWeightsRef(), layer->net_input[0], layer->net_hidden[0], opt,
-      //   iteration);
+      //   layer->getWeightsRef(), layer->net_input[0], layer->net_hidden[0],
+      //   opt, iteration);
 
       /** If gradient must be applied and its not gradient mode, calculate
        * gradient
@@ -838,7 +839,12 @@ int NeuralNetwork::train_run(std::function<bool(void *userdata)> stop_cb,
 
   auto train_epoch_end = [this, stop_cb, user_data](RunStats &stat,
                                                     DataBuffer &buffer) {
-    stat.loss /= static_cast<float>(stat.num_iterations);
+    if (stat.num_iterations != 0) {
+      stat.loss /= static_cast<float>(stat.num_iterations);
+    } else {
+      std::cerr << "stat.num_iterations is 0" << std::endl;
+      return;
+    }
     auto &save_path = std::get<props::SavePath>(model_flex_props);
     if (!stop_cb(user_data)) {
       if (!save_path.empty()) {
@@ -880,7 +886,12 @@ int NeuralNetwork::train_run(std::function<bool(void *userdata)> stop_cb,
   auto eval_epoch_end = [this, batch_size, max_acc = 0.0f,
                          min_loss = std::numeric_limits<float>::max()](
                           RunStats &stat, DataBuffer &buffer) mutable {
-    stat.loss /= static_cast<float>(stat.num_iterations);
+    if (stat.num_iterations != 0) {
+      stat.loss /= static_cast<float>(stat.num_iterations);
+    } else {
+      std::cerr << "stat.num_iterations is 0" << std::endl;
+      return;
+    }
     stat.accuracy = stat.num_correct_predictions /
                     static_cast<float>(stat.num_iterations * batch_size) *
                     100.0f;
index 17f9641..cc74d7c 100644 (file)
@@ -226,7 +226,7 @@ LazyTensor &LazyTensor::average() {
  */
 Tensor LazyTensor::run() {
   int status;
-  for (auto item : call_chain) {
+  for (auto &item : call_chain) {
     status = item(target);
     if (status != ML_ERROR_NONE) {
       throw std::runtime_error("Error: evaluation failed");
index f064cad..aaa84d7 100644 (file)
@@ -34,6 +34,9 @@ void GenericProfileListener::onNotifyTimeEvent(
   if (time_iter == time_taken.end()) {
     reset(time_item, str);
     time_iter = time_taken.find(time_item);
+    if (time_iter == time_taken.end()) {
+      throw std::runtime_error("Couldn't find time_iter.");
+    }
   }
 
   auto &cnt_ = std::get<GenericProfileListener::CNT>(time_iter->second);
@@ -117,10 +120,8 @@ void GenericProfileListener::report(std::ostream &out) const {
 
   for (auto &[item, time] : time_taken) {
     auto title = names.find(item);
-#ifdef DEBUG
     if (title == names.end())
       throw std::runtime_error("Couldn't find name. it's already removed.");
-#endif
     column_size[0] =
       std::max(column_size[0], static_cast<unsigned int>(title->second.size()));
   }
@@ -380,10 +381,8 @@ void Profiler::dealloc(const void *ptr, const std::string &policy, bool swap) {
   auto end = std::chrono::steady_clock::now();
   auto found = allocates.find(ptr);
 
-#ifdef DEBUG
   if (found == allocates.end())
     throw std::invalid_argument("memory profiler didn't allocated");
-#endif
 
   auto duration = std::chrono::duration_cast<std::chrono::microseconds>(
     end - std::get<timepoint>(found->second));