[neurun] Favor `map::emplace` over `map::insert` (#6355)
author이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Thu, 8 Aug 2019 01:46:33 +0000 (10:46 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 8 Aug 2019 01:46:33 +0000 (10:46 +0900)
Favor `map::emplace` over `map::insert` for every occurences in neurun.

Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
runtimes/neurun/backend/acl_common/TemplTensorBuilder.h
runtimes/neurun/backend/cpu/TensorBuilder.cc
runtimes/neurun/core/src/backend/BackendManager.cc
runtimes/neurun/core/src/backend/ExecTime.cc
runtimes/neurun/core/src/compiler/Scheduler.cc
runtimes/neurun/core/src/exec/interp/ExecEnv.h
runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.cc

index bf8511d..a10ed8c 100644 (file)
@@ -150,8 +150,8 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerTen
 {
   assert(_mem_mgr->tensors().size() == 0);
 
-  _tensor_info_map.insert({ind, info});
-  _apply_dim_correction_map.insert({ind, true});
+  _tensor_info_map.emplace(ind, info);
+  _apply_dim_correction_map.emplace(ind, true);
   _layout = layout;
 
   assert(_first_uses_visit.find(ind) == _first_uses_visit.end());
@@ -164,8 +164,8 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerSub
 {
   assert(_mem_mgr->tensors().size() == 0);
 
-  _subtensor_info_map.insert({ind, info});
-  _apply_dim_correction_map.insert({ind, true});
+  _subtensor_info_map.emplace(ind, info);
+  _apply_dim_correction_map.emplace(ind, true);
 
   assert(_first_uses_visit.find(ind) == _first_uses_visit.end());
   _first_uses_visit[ind] = false;
index 17115e5..4d6a535 100644 (file)
@@ -35,7 +35,7 @@ TensorBuilder::TensorBuilder() : _mem_mgr{new MemoryManager()}
 void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
                                        const model::OperandInfo &info, model::Layout)
 {
-  _tensor_info_map.insert({ind, info});
+  _tensor_info_map.emplace(ind, info);
   // TODO set the layout
 }
 
index e92b5b3..155f7f5 100644 (file)
@@ -93,7 +93,7 @@ void BackendManager::loadBackend(const std::string &backend)
   }
 
   // Save backend handle (avoid warning by handle lost without dlclose())
-  _handle_map.insert({backend, handle});
+  _handle_map.emplace(backend, handle);
 }
 
 BackendManager::BackendManager()
index b2812ee..d5aa679 100644 (file)
@@ -107,7 +107,7 @@ void ExecTime::updateOperationExecTime(const Backend *backend, const std::string
   }
   else
   {
-    auto it = _measurements[backend][operation][quant].insert({op_size, time});
+    auto it = _measurements[backend][operation][quant].emplace(op_size, time);
     if (!it.second)
     {
       // affect of the last measurement is bigger than the previous ones:
index 78736d1..ed6c7fc 100644 (file)
@@ -434,7 +434,7 @@ void Scheduler::scheduleNode(const model::OperationIndex &index)
   }
 
   _parents_eft[index] = eft;
-  _backends_avail_time[chosen_backend].insert({eft, eft - selected_exec_time});
+  _backends_avail_time[chosen_backend].emplace(eft, eft - selected_exec_time);
   _backend_resolver->setBackend(index, chosen_backend);
 
   VERBOSE(Scheduler::scheduleNode) << "backend for " << node.getName() << " is "
@@ -497,7 +497,7 @@ Scheduler::ESTAndExecTime(const backend::Backend *backend, const model::Operatio
 
     max_pred_eft = std::max(max_pred_eft, prev_op_ft + it.second);
 
-    const auto tmp = _backends_avail_time[cpu_backend].insert({prev_op_ft + it.second, prev_op_ft});
+    const auto tmp = _backends_avail_time[cpu_backend].emplace(prev_op_ft + it.second, prev_op_ft);
     inserted_permutations.push_back(tmp.first);
   }
   // find the hole/gap, where this op can be put or the finishing time of the last assigned op
@@ -549,7 +549,7 @@ int64_t Scheduler::predMaxEFT(const backend::Backend *backend, const model::Oper
         // Multiply operand size by 2 because size must discribe input+output size
         int64_t transfer_cost = getTime(parent_backend, backend->config()->id(), quant,
                                         operand.info().total_size() * 2);
-        transfer_st_exec_time.insert({_parents_eft.at(defs), transfer_cost});
+        transfer_st_exec_time.emplace(_parents_eft.at(defs), transfer_cost);
       }
     }
   }
index 0e170f7..5d0d5eb 100644 (file)
@@ -65,7 +65,7 @@ public:
   void assignTensor(const model::OperandIndex index, std::shared_ptr<ITensor> tensor)
   {
     assert(tensor->bufferRO() != nullptr);
-    _tensors.insert({index, tensor});
+    _tensors.emplace(index, tensor);
   }
 
   /**
index b27ab05..76e0d8c 100644 (file)
@@ -57,7 +57,7 @@ void PermutationInsertionPass::callback(const model::OperandIndex &index, model:
     assert(operand_li->def_factors().size() == 1);
     for (auto factor : operand_li->def_factors())
     {
-      factor_to_index.insert({factor, index});
+      factor_to_index.emplace(factor, index);
     }
 
     auto insert_set = operand_li->use_factors() - operand_li->def_factors();
@@ -69,7 +69,7 @@ void PermutationInsertionPass::callback(const model::OperandIndex &index, model:
                                         << index.value() << std::endl;
       const auto &permute_operation = _graph.operations().at(permute_operation_index);
       const auto permuted_operand_index = permute_operation.getOutputs().at(0);
-      factor_to_index.insert({factor, permuted_operand_index});
+      factor_to_index.emplace(factor, permuted_operand_index);
     }
   }