From 3802eed7cd206fcd00bbd91f05de509b326b4588 Mon Sep 17 00:00:00 2001 From: DongHak Park Date: Thu, 6 Apr 2023 16:06:39 +0900 Subject: [PATCH] [AHub] Fix AHub Defect Fix AHub Defect - make some exception statement - change auto element -> auto &element Signed-off-by: DongHak Park --- Applications/Resnet/jni/main.cpp | 2 +- nntrainer/compiler/tflite_interpreter.cpp | 5 ++++- nntrainer/models/neuralnet.cpp | 21 ++++++++++++++++----- nntrainer/tensor/lazy_tensor.cpp | 2 +- nntrainer/utils/profiler.cpp | 7 +++---- 5 files changed, 25 insertions(+), 12 deletions(-) diff --git a/Applications/Resnet/jni/main.cpp b/Applications/Resnet/jni/main.cpp index 47660f9..9282c67 100644 --- a/Applications/Resnet/jni/main.cpp +++ b/Applications/Resnet/jni/main.cpp @@ -207,7 +207,7 @@ ModelHandle createResnet18() { {withKey("loss", "cross")}); #endif - for (auto layer : createResnet18Graph()) { + for (auto &layer : createResnet18Graph()) { model->addLayer(layer); } diff --git a/nntrainer/compiler/tflite_interpreter.cpp b/nntrainer/compiler/tflite_interpreter.cpp index 8e53e20..67872f6 100644 --- a/nntrainer/compiler/tflite_interpreter.cpp +++ b/nntrainer/compiler/tflite_interpreter.cpp @@ -373,9 +373,12 @@ TfOpNodes buildOpNodes(const GraphRepresentation &representation, [&input_layer_name](std::shared_ptr node) { return istrequal(node.get()->getName(), input_layer_name); }); + if (input_later_node_iterator != representation.end()) { auto input_layer_node = input_later_node_iterator->get(); - tf_node->setArg(index, layer_to_tf.find(input_layer_node)->second); + if (layer_to_tf.find(input_layer_node) != layer_to_tf.end()) { + tf_node->setArg(index, layer_to_tf.find(input_layer_node)->second); + } } } } diff --git a/nntrainer/models/neuralnet.cpp b/nntrainer/models/neuralnet.cpp index 5334d5a..09d2ed8 100644 --- a/nntrainer/models/neuralnet.cpp +++ b/nntrainer/models/neuralnet.cpp @@ -335,7 +335,8 @@ void NeuralNetwork::backwarding(int iteration, node->calcGradient(); /** - * If optimization off, or gradient must be applied, then this will be true + * If optimization off, or gradient must be applied, then this will be + * true * @todo This apply gradient should be passed to the each weight and later * be queried when updating gradient at once. (after moving apply_gradient * out of this function) @@ -343,8 +344,8 @@ void NeuralNetwork::backwarding(int iteration, */ // auto &layer = node->getObject(); // apply_gradient = dynamic_training_opt.checkIfApply( - // layer->getWeightsRef(), layer->net_input[0], layer->net_hidden[0], opt, - // iteration); + // layer->getWeightsRef(), layer->net_input[0], layer->net_hidden[0], + // opt, iteration); /** If gradient must be applied and its not gradient mode, calculate * gradient @@ -838,7 +839,12 @@ int NeuralNetwork::train_run(std::function stop_cb, auto train_epoch_end = [this, stop_cb, user_data](RunStats &stat, DataBuffer &buffer) { - stat.loss /= static_cast(stat.num_iterations); + if (stat.num_iterations != 0) { + stat.loss /= static_cast(stat.num_iterations); + } else { + std::cerr << "stat.num_iterations is 0" << std::endl; + return; + } auto &save_path = std::get(model_flex_props); if (!stop_cb(user_data)) { if (!save_path.empty()) { @@ -880,7 +886,12 @@ int NeuralNetwork::train_run(std::function stop_cb, auto eval_epoch_end = [this, batch_size, max_acc = 0.0f, min_loss = std::numeric_limits::max()]( RunStats &stat, DataBuffer &buffer) mutable { - stat.loss /= static_cast(stat.num_iterations); + if (stat.num_iterations != 0) { + stat.loss /= static_cast(stat.num_iterations); + } else { + std::cerr << "stat.num_iterations is 0" << std::endl; + return; + } stat.accuracy = stat.num_correct_predictions / static_cast(stat.num_iterations * batch_size) * 100.0f; diff --git a/nntrainer/tensor/lazy_tensor.cpp b/nntrainer/tensor/lazy_tensor.cpp index 17f9641..cc74d7c 100644 --- a/nntrainer/tensor/lazy_tensor.cpp +++ b/nntrainer/tensor/lazy_tensor.cpp @@ -226,7 +226,7 @@ LazyTensor &LazyTensor::average() { */ Tensor LazyTensor::run() { int status; - for (auto item : call_chain) { + for (auto &item : call_chain) { status = item(target); if (status != ML_ERROR_NONE) { throw std::runtime_error("Error: evaluation failed"); diff --git a/nntrainer/utils/profiler.cpp b/nntrainer/utils/profiler.cpp index f064cad..aaa84d7 100644 --- a/nntrainer/utils/profiler.cpp +++ b/nntrainer/utils/profiler.cpp @@ -34,6 +34,9 @@ void GenericProfileListener::onNotifyTimeEvent( if (time_iter == time_taken.end()) { reset(time_item, str); time_iter = time_taken.find(time_item); + if (time_iter == time_taken.end()) { + throw std::runtime_error("Couldn't find time_iter."); + } } auto &cnt_ = std::get(time_iter->second); @@ -117,10 +120,8 @@ void GenericProfileListener::report(std::ostream &out) const { for (auto &[item, time] : time_taken) { auto title = names.find(item); -#ifdef DEBUG if (title == names.end()) throw std::runtime_error("Couldn't find name. it's already removed."); -#endif column_size[0] = std::max(column_size[0], static_cast(title->second.size())); } @@ -380,10 +381,8 @@ void Profiler::dealloc(const void *ptr, const std::string &policy, bool swap) { auto end = std::chrono::steady_clock::now(); auto found = allocates.find(ptr); -#ifdef DEBUG if (found == allocates.end()) throw std::invalid_argument("memory profiler didn't allocated"); -#endif auto duration = std::chrono::duration_cast( end - std::get(found->second)); -- 2.7.4