Favor `map::emplace` over `map::insert` for every occurences in neurun.
Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
{
assert(_mem_mgr->tensors().size() == 0);
- _tensor_info_map.insert({ind, info});
- _apply_dim_correction_map.insert({ind, true});
+ _tensor_info_map.emplace(ind, info);
+ _apply_dim_correction_map.emplace(ind, true);
_layout = layout;
assert(_first_uses_visit.find(ind) == _first_uses_visit.end());
{
assert(_mem_mgr->tensors().size() == 0);
- _subtensor_info_map.insert({ind, info});
- _apply_dim_correction_map.insert({ind, true});
+ _subtensor_info_map.emplace(ind, info);
+ _apply_dim_correction_map.emplace(ind, true);
assert(_first_uses_visit.find(ind) == _first_uses_visit.end());
_first_uses_visit[ind] = false;
void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
const model::OperandInfo &info, model::Layout)
{
- _tensor_info_map.insert({ind, info});
+ _tensor_info_map.emplace(ind, info);
// TODO set the layout
}
}
// Save backend handle (avoid warning by handle lost without dlclose())
- _handle_map.insert({backend, handle});
+ _handle_map.emplace(backend, handle);
}
BackendManager::BackendManager()
}
else
{
- auto it = _measurements[backend][operation][quant].insert({op_size, time});
+ auto it = _measurements[backend][operation][quant].emplace(op_size, time);
if (!it.second)
{
// affect of the last measurement is bigger than the previous ones:
}
_parents_eft[index] = eft;
- _backends_avail_time[chosen_backend].insert({eft, eft - selected_exec_time});
+ _backends_avail_time[chosen_backend].emplace(eft, eft - selected_exec_time);
_backend_resolver->setBackend(index, chosen_backend);
VERBOSE(Scheduler::scheduleNode) << "backend for " << node.getName() << " is "
max_pred_eft = std::max(max_pred_eft, prev_op_ft + it.second);
- const auto tmp = _backends_avail_time[cpu_backend].insert({prev_op_ft + it.second, prev_op_ft});
+ const auto tmp = _backends_avail_time[cpu_backend].emplace(prev_op_ft + it.second, prev_op_ft);
inserted_permutations.push_back(tmp.first);
}
// find the hole/gap, where this op can be put or the finishing time of the last assigned op
// Multiply operand size by 2 because size must discribe input+output size
int64_t transfer_cost = getTime(parent_backend, backend->config()->id(), quant,
operand.info().total_size() * 2);
- transfer_st_exec_time.insert({_parents_eft.at(defs), transfer_cost});
+ transfer_st_exec_time.emplace(_parents_eft.at(defs), transfer_cost);
}
}
}
void assignTensor(const model::OperandIndex index, std::shared_ptr<ITensor> tensor)
{
assert(tensor->bufferRO() != nullptr);
- _tensors.insert({index, tensor});
+ _tensors.emplace(index, tensor);
}
/**
assert(operand_li->def_factors().size() == 1);
for (auto factor : operand_li->def_factors())
{
- factor_to_index.insert({factor, index});
+ factor_to_index.emplace(factor, index);
}
auto insert_set = operand_li->use_factors() - operand_li->def_factors();
<< index.value() << std::endl;
const auto &permute_operation = _graph.operations().at(permute_operation_index);
const auto permuted_operand_index = permute_operation.getOutputs().at(0);
- factor_to_index.insert({factor, permuted_operand_index});
+ factor_to_index.emplace(factor, permuted_operand_index);
}
}