From: Sanggyu Lee Date: Mon, 8 Jan 2024 04:54:23 +0000 (+0900) Subject: Add & to auto to make static analyzer happy X-Git-Tag: accepted/tizen/unified/20240131.064057~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=b0af54419e892853c00ab204acdee56d39c15b2b;p=platform%2Fcore%2Fml%2Fnnfw.git Add & to auto to make static analyzer happy It adds & to make static analyzer happy. It may help remove copy for non-primitive type. In addition, some variables of auto turned out not-used variables. All un-used variables of auto are removed also. ONE-DCO-1.0-Signed-off-by: Sanggyu Lee --- diff --git a/runtime/onert/backend/cl_common/include/cl_common/BackendContext.h b/runtime/onert/backend/cl_common/include/cl_common/BackendContext.h index 76d4039..06aafa1 100644 --- a/runtime/onert/backend/cl_common/include/cl_common/BackendContext.h +++ b/runtime/onert/backend/cl_common/include/cl_common/BackendContext.h @@ -121,7 +121,7 @@ protected: if (!tensor_builder->isRegistered(ind)) { // These tensors do not exist in any operation (No use and def) - const auto info = obj.info(); + const auto &info = obj.info(); const auto layout = _data.operand_layouts.at(ind); // TODO Change tensor info to have permuted shape registerTensorInfo(ind, info, layout); diff --git a/runtime/onert/backend/cpu/KernelGenerator.cc b/runtime/onert/backend/cpu/KernelGenerator.cc index c927bf5..d462daf 100644 --- a/runtime/onert/backend/cpu/KernelGenerator.cc +++ b/runtime/onert/backend/cpu/KernelGenerator.cc @@ -1266,7 +1266,7 @@ void KernelGenerator::visit(const ir::operation::FusedBatchNorm &node) const auto epsilon = node.param().epsilon; const auto is_training = node.param().is_training; - const auto data_format = node.param().data_format; + const auto &data_format = node.param().data_format; auto fn = std::make_unique(); diff --git a/runtime/onert/backend/cpu/ops/DetectionPostProcessLayer.cc b/runtime/onert/backend/cpu/ops/DetectionPostProcessLayer.cc index d89741c..dc9e20e 100644 --- a/runtime/onert/backend/cpu/ops/DetectionPostProcessLayer.cc +++ b/runtime/onert/backend/cpu/ops/DetectionPostProcessLayer.cc @@ -106,7 +106,7 @@ Array decodeBoxes(const Array &raw_boxes, const Array decodeBoxes(const Array &raw_boxes, const Array box.y1); } - auto decoded_boxes_a_shape = decoded_boxes_a.shape(); + const auto &decoded_boxes_a_shape = decoded_boxes_a.shape(); return array_cast(std::move(decoded_boxes_a), decoded_boxes_a_shape); } diff --git a/runtime/onert/backend/gpu_cl/ClConstantInitializer.h b/runtime/onert/backend/gpu_cl/ClConstantInitializer.h index 95e228a..ad5b47d 100644 --- a/runtime/onert/backend/gpu_cl/ClConstantInitializer.h +++ b/runtime/onert/backend/gpu_cl/ClConstantInitializer.h @@ -39,7 +39,7 @@ template static void Init(const onert::ir::Operand &model_obj, onert::backend::ITensor &obj, const bool copy, const onert::ir::Layout frontend_layout = onert::ir::Layout::UNKNOWN) { - const auto shape = model_obj.shape(); + const auto &shape = model_obj.shape(); assert(model_obj.data()); obj.access([&](::onert::backend::ITensor &tensor) { switch (shape.rank()) diff --git a/runtime/onert/backend/gpu_cl/KernelGenerator.cc b/runtime/onert/backend/gpu_cl/KernelGenerator.cc index a24c4f5..31d3134 100644 --- a/runtime/onert/backend/gpu_cl/KernelGenerator.cc +++ b/runtime/onert/backend/gpu_cl/KernelGenerator.cc @@ -166,7 +166,7 @@ absl::Status KernelGenerator::readConstTensor( absl::variant, tflite::gpu::Tensor> *alpha) { - const auto shape = _ctx.at(index).shape(); + const auto &shape = _ctx.at(index).shape(); if (CheckIfLinearConvertible(&shape)) { tflite::gpu::Tensor tensor; @@ -304,7 +304,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) auto kernel{node.getInputs().at(ir::operation::Conv2D::KERNEL)}; auto bias{node.getInputs().at(ir::operation::Conv2D::BIAS)}; - const auto param = node.param(); + const auto ¶m = node.param(); tflite::gpu::OperationDef op_def; op_def.precision = tflite::gpu::CalculationsPrecision::F32; @@ -375,7 +375,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) { std::unique_ptr gpu_op_1; tflite::gpu::OperationDef op_def_1; - const auto shape = _ctx.at(output).shape(); + const auto &shape = _ctx.at(output).shape(); auto new_ind = _tensor_reg->addNewClTensor(shape); addClNode({input}, {new_ind}, std::move(gpu_op)); @@ -418,7 +418,7 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node) const auto stride = node.param().stride; const auto dilation = node.param().dilation; - const auto padding = node.param().padding; + const auto &padding = node.param().padding; const auto multiplier = node.param().multiplier; diff --git a/runtime/onert/backend/gpu_cl/operand/ICLTensor.cc b/runtime/onert/backend/gpu_cl/operand/ICLTensor.cc index ef71bbc..1e61b99 100644 --- a/runtime/onert/backend/gpu_cl/operand/ICLTensor.cc +++ b/runtime/onert/backend/gpu_cl/operand/ICLTensor.cc @@ -60,7 +60,7 @@ void ICLTensor::writeConvertInit(tflite::gpu::TensorObjectConverterBuilder *conv TensorObjectDef permute_def = input_def; permute_def.object_def.object_type = ToObjectType(handle()->GetStorageType()); - auto dims = permute_def.dimensions; + const auto &dims = permute_def.dimensions; const BHWC shape(dims.b, dims.h, dims.w, dims.c); const TensorDescriptor desc{ permute_def.object_def.data_type, @@ -105,7 +105,7 @@ void ICLTensor::readConvertInit(tflite::gpu::TensorObjectConverterBuilder *conve permute_def.object_def.data_type = DataType::FLOAT32; permute_def.object_def.user_provided = true; - auto dims = permute_def.dimensions; + const auto &dims = permute_def.dimensions; const BHWC shape(dims.b, dims.h, dims.w, dims.c); const TensorDescriptor desc{ permute_def.object_def.data_type, diff --git a/runtime/onert/backend/ruy/KernelGenerator.cc b/runtime/onert/backend/ruy/KernelGenerator.cc index ae7ec28..735a948 100644 --- a/runtime/onert/backend/ruy/KernelGenerator.cc +++ b/runtime/onert/backend/ruy/KernelGenerator.cc @@ -55,7 +55,7 @@ std::unique_ptr KernelGenerator::generate(ir::OperationI assert(_return_fn); // _return_fn must have been generated ret->append(std::move(_return_fn)); - for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs()) + for (const auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs()) { auto portable_tensor = _tensor_reg->getPortableTensor(ind); if (portable_tensor) @@ -101,7 +101,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) const auto stride = node.param().stride; const auto activation = node.param().activation; - const auto param_padding = node.param().padding; + const auto ¶m_padding = node.param().padding; const auto dilation = node.param().dilation; auto fn = std::make_unique(); diff --git a/runtime/onert/backend/xnnpack/KernelGenerator.cc b/runtime/onert/backend/xnnpack/KernelGenerator.cc index 25f3fd2..b721491 100644 --- a/runtime/onert/backend/xnnpack/KernelGenerator.cc +++ b/runtime/onert/backend/xnnpack/KernelGenerator.cc @@ -102,7 +102,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node) const auto stride = node.param().stride; const auto activation = node.param().activation; - const auto param_padding = node.param().padding; + const auto ¶m_padding = node.param().padding; const auto dilation = node.param().dilation; auto fn = std::make_unique(_external_context); @@ -142,7 +142,7 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node) const auto ker_width = ker_shape.dim(2); const auto dilation_width = node.param().dilation.width_factor; const auto dilation_height = node.param().dilation.height_factor; - const auto param_padding = node.param().padding; + const auto ¶m_padding = node.param().padding; const auto padding = ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height, dilation_width, dilation_height); const auto multiplier = node.param().multiplier; diff --git a/runtime/onert/core/include/backend/basic/BackendContextHelpers.h b/runtime/onert/core/include/backend/basic/BackendContextHelpers.h index 9992ca1..7588d42 100644 --- a/runtime/onert/core/include/backend/basic/BackendContextHelpers.h +++ b/runtime/onert/core/include/backend/basic/BackendContextHelpers.h @@ -63,7 +63,7 @@ template void planTensors(const T_BackendContext &ct if (!tensor_builder->isRegistered(ind)) { // These tensors do not exist in any (No use and def) - const auto info = obj.info(); + const auto &info = obj.info(); // NOTE Currently we only support NHWC tensors for cpu-common tensors. // There is no way to get the layout info from the backend context for now. // When we support NCHW tensors as well, we also need to change tensor info to be diff --git a/runtime/onert/core/src/backend/basic/TensorBuilder.cc b/runtime/onert/core/src/backend/basic/TensorBuilder.cc index f9d8387..4912af1 100644 --- a/runtime/onert/core/src/backend/basic/TensorBuilder.cc +++ b/runtime/onert/core/src/backend/basic/TensorBuilder.cc @@ -62,7 +62,7 @@ void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::Op void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind) { assert(_tensor_info_map.find(ind) != _tensor_info_map.end()); - const auto tensor_info = _tensor_info_map.at(ind); + const auto &tensor_info = _tensor_info_map.at(ind); if (!_tensor_reg->getNativeTensor(ind)->is_dynamic()) { diff --git a/runtime/onert/core/src/backend/builtin/TensorBuilder.cc b/runtime/onert/core/src/backend/builtin/TensorBuilder.cc index fefae40..a2f7af3 100644 --- a/runtime/onert/core/src/backend/builtin/TensorBuilder.cc +++ b/runtime/onert/core/src/backend/builtin/TensorBuilder.cc @@ -57,7 +57,7 @@ void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind) if (_tensor_info_map.find(ind) == _tensor_info_map.end()) // Do not proceed for user tensors return; - const auto tensor_info = _tensor_info_map.at(ind); + const auto &tensor_info = _tensor_info_map.at(ind); if (!nativeOwnTensorAt(ind)->is_dynamic()) { diff --git a/runtime/onert/core/src/compiler/HEScheduler.cc b/runtime/onert/core/src/compiler/HEScheduler.cc index f662ef5..56e2208 100644 --- a/runtime/onert/core/src/compiler/HEScheduler.cc +++ b/runtime/onert/core/src/compiler/HEScheduler.cc @@ -409,7 +409,7 @@ int64_t HEScheduler::DFSChildrenMaxRank(const ir::OperationIndex &index) int64_t HEScheduler::backendAvailableTime(const backend::Backend *backend, const int64_t &starting_time, const int64_t &time_amount) { - const auto backend_times = _backends_avail_time.at(backend); + const auto &backend_times = _backends_avail_time.at(backend); // finishing and starting times of an op, that will come after current op auto next_op_fst = backend_times.upper_bound(starting_time); // finishing time of an op, that will come before current op diff --git a/runtime/onert/core/src/compiler/ShapeValidator.cc b/runtime/onert/core/src/compiler/ShapeValidator.cc index 3e940f0..5c25ea1 100644 --- a/runtime/onert/core/src/compiler/ShapeValidator.cc +++ b/runtime/onert/core/src/compiler/ShapeValidator.cc @@ -227,9 +227,9 @@ void ShapeValidator::visit(const ir::operation::Reduce &node) if (operands.at(output_index).info().isDynamic()) return; - const auto input_index{node.getInputs().at(ir::operation::Reduce::Input::INPUT)}; - const auto input_shape = operands.at(input_index).shape(); - const auto output_shape = operands.at(output_index).shape(); + const auto &input_index{node.getInputs().at(ir::operation::Reduce::Input::INPUT)}; + const auto &input_shape = operands.at(input_index).shape(); + const auto &output_shape = operands.at(output_index).shape(); OP_REQUIRES(input_shape.rank() <= 4); OP_REQUIRES(output_shape.rank() <= input_shape.rank()); @@ -516,9 +516,9 @@ void ShapeValidator::visit(const ir::operation::Gather &node) const auto ifm_index{node.getInputs().at(ir::operation::Gather::Input::INPUT)}; const auto indices_index{node.getInputs().at(ir::operation::Gather::Input::INDICES)}; - const auto ifm_shape = operands.at(ifm_index).shape(); - const auto indices_shape = operands.at(indices_index).shape(); - const auto ofm_shape = operands.at(ofm_index).shape(); + const auto &ifm_shape = operands.at(ifm_index).shape(); + const auto &indices_shape = operands.at(indices_index).shape(); + const auto &ofm_shape = operands.at(ofm_index).shape(); OP_REQUIRES(ifm_shape.rank() <= 4); OP_REQUIRES(indices_shape.rank() <= 3); @@ -566,7 +566,7 @@ void ShapeValidator::visit(const ir::operation::Pack &node) const auto output_rank = static_cast(output_shape.rank()); const auto input1_index{node.getInputs().at(0)}; - const auto input_shape = operands.at(input1_index).shape(); + const auto &input_shape = operands.at(input1_index).shape(); OP_REQUIRES(axis >= -output_rank && axis < output_rank); for (const auto &index : node.getInputs()) diff --git a/runtime/onert/core/src/compiler/StaticShapeInferer.cc b/runtime/onert/core/src/compiler/StaticShapeInferer.cc index a25b326..68cff7e 100644 --- a/runtime/onert/core/src/compiler/StaticShapeInferer.cc +++ b/runtime/onert/core/src/compiler/StaticShapeInferer.cc @@ -524,11 +524,11 @@ void StaticShapeInferer::visit(const ir::operation::Fill &op) assert(dims_buf); const auto &dims_shape = shape.info().shape(); - auto new_shape = ((dims_type == ir::DataType::INT32) - ? shape_inference::inferFillShape( - dims_shape, reinterpret_cast(dims_buf)) - : shape_inference::inferFillShape( - dims_shape, reinterpret_cast(dims_buf))); + const auto &new_shape = ((dims_type == ir::DataType::INT32) + ? shape_inference::inferFillShape( + dims_shape, reinterpret_cast(dims_buf)) + : shape_inference::inferFillShape( + dims_shape, reinterpret_cast(dims_buf))); output.info().shape(new_shape); } @@ -1088,8 +1088,8 @@ void StaticShapeInferer::visit(const ir::operation::SpaceToBatchND &op) const auto output_index = op.getOutputs().at(0); const auto input_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::INPUT)}; - const auto block_shape_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)}; - const auto padding_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)}; + const auto &block_shape_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::BLOCK_SIZE)}; + const auto &padding_idx{op.getInputs().at(ir::operation::SpaceToBatchND::Input::PADDINGS)}; ir::Operand &output = operands.at(output_index); const auto &input = operands.at(input_idx); @@ -1103,9 +1103,9 @@ void StaticShapeInferer::visit(const ir::operation::SpaceToBatchND &op) return; } - auto input_shape = input.info().shape(); - auto block_shape_shape = block_shape.info().shape(); - auto padding_shape = padding.info().shape(); + const auto &input_shape = input.info().shape(); + const auto &block_shape_shape = block_shape.info().shape(); + const auto &padding_shape = padding.info().shape(); auto block_shape_data = reinterpret_cast(block_shape.data()->base()); auto padding_data = reinterpret_cast(padding.data()->base()); @@ -1325,7 +1325,7 @@ void StaticShapeInferer::visit(const ir::operation::While &op) auto body_input_observer = _subg_input_observers.at(op.param().body_subg_index).get(); auto cond_input_observer = _subg_input_observers.at(op.param().cond_subg_index).get(); // re-sizing input shapes of body subgraph - const auto inputs = op.getInputs(); + const auto &inputs = op.getInputs(); std::vector inputs_info; const auto &graph = _lowered_subg->graph(); for (size_t i = 0; i < inputs.size(); ++i) @@ -1401,9 +1401,7 @@ void StaticShapeInferer::visit(const ir::operation::Bulk &op) const auto output_idx = op.getOutputs().at(0); ir::Operand &output = operands.at(output_idx); - auto cur_input_shape = input.info().shape(); - auto origin_input_shape = op.param().origin_input_shapes[0]; - auto cur_output_shape = output.info().shape(); + const auto &cur_input_shape = input.info().shape(); auto origin_output_shape = op.param().origin_output_shapes[0]; // TODO: more check for valid batch request diff --git a/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc b/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc index 39eb803..1657c0c 100644 --- a/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc +++ b/runtime/onert/core/src/compiler/pass/PermutationInsertionPass.cc @@ -87,7 +87,7 @@ void PermutationInsertionPass::callback(const ir::OperandIndex &index, ir::Opera const auto op_layout = op_li->layout(); const backend::Backend *backend = op_li->backend(); assert(backend); - auto use_node_inputs = operation.getInputs(); + const auto &use_node_inputs = operation.getInputs(); assert(use_node_inputs.contains(index)); auto new_index = factor_to_index.at({backend, op_layout}); diff --git a/runtime/onert/core/src/exec/DynamicShapeInferer.cc b/runtime/onert/core/src/exec/DynamicShapeInferer.cc index 78b21cf..4cbf2fe 100644 --- a/runtime/onert/core/src/exec/DynamicShapeInferer.cc +++ b/runtime/onert/core/src/exec/DynamicShapeInferer.cc @@ -423,11 +423,11 @@ void DynamicShapeInferer::visit(const ir::operation::Fill &op) assert(dims_buf); const auto &dims_shape = shape->getShape(); - auto output_shape = ((dims_type == ir::DataType::INT32) - ? shape_inference::inferFillShape( - dims_shape, reinterpret_cast(dims_buf)) - : shape_inference::inferFillShape( - dims_shape, reinterpret_cast(dims_buf))); + const auto &output_shape = ((dims_type == ir::DataType::INT32) + ? shape_inference::inferFillShape( + dims_shape, reinterpret_cast(dims_buf)) + : shape_inference::inferFillShape( + dims_shape, reinterpret_cast(dims_buf))); output->applyShape(output_shape); assert(output->buffer() != nullptr); diff --git a/runtime/onert/core/src/exec/Execution.cc b/runtime/onert/core/src/exec/Execution.cc index 1384c9f..f51bed8 100644 --- a/runtime/onert/core/src/exec/Execution.cc +++ b/runtime/onert/core/src/exec/Execution.cc @@ -48,7 +48,7 @@ void Execution::changeInputShape(const ir::IOIndex &index, const ir::Shape &new_ void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t length, ir::Layout layout) { - const auto info = _executors->inputInfo(index); + const auto &info = _executors->inputInfo(index); // TODO handle when (!buffer && length != 0) : setting the input as an optional tensor @@ -88,7 +88,7 @@ void Execution::setInput(const ir::IOIndex &index, const ir::TypeInfo &type, con // TODO Remove default parameter void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout) { - const auto info = _executors->outputInfo(index); + const auto &info = _executors->outputInfo(index); if (length < info.total_size()) { @@ -102,7 +102,7 @@ void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, void Execution::setOutput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout) { - auto info = ir::OperandInfo::createStaticInfo(shape, type); + const auto &info = ir::OperandInfo::createStaticInfo(shape, type); if (length < info.total_size()) { diff --git a/runtime/onert/core/src/exec/ExecutorBase.cc b/runtime/onert/core/src/exec/ExecutorBase.cc index ad00734..0bc088b 100644 --- a/runtime/onert/core/src/exec/ExecutorBase.cc +++ b/runtime/onert/core/src/exec/ExecutorBase.cc @@ -66,8 +66,8 @@ void ExecutorBase::execute(const std::vector &inputs assert(input_tensor != nullptr); if (input != nullptr) { - const auto orig_input_shape = input_tensor->orig_info().shape(); - const auto changed_input_shape = + const auto &orig_input_shape = input_tensor->orig_info().shape(); + const auto &changed_input_shape = convertShape(input->getShape(), input->layout(), input_tensor->orig_layout()); if (input_tensor->get_info().shape() != changed_input_shape) { diff --git a/runtime/onert/core/src/ir/Graph.cc b/runtime/onert/core/src/ir/Graph.cc index ef0f988..306572c 100644 --- a/runtime/onert/core/src/ir/Graph.cc +++ b/runtime/onert/core/src/ir/Graph.cc @@ -168,7 +168,7 @@ void Graph::verify(void) const void Graph::initializeUseDef() { operations().iterate([&](const OperationIndex &index, const IOperation &node) -> void { - auto outputs = node.getOutputs(); + const auto &outputs = node.getOutputs(); for (auto &&output : outputs | ir::Remove::UNDEFINED) { operands().at(output).setDef(index); diff --git a/runtime/onert/core/src/ir/OperationValidator.cc b/runtime/onert/core/src/ir/OperationValidator.cc index cf7323d..09f773c 100644 --- a/runtime/onert/core/src/ir/OperationValidator.cc +++ b/runtime/onert/core/src/ir/OperationValidator.cc @@ -213,7 +213,7 @@ void OperationValidator::visit(const operation::DepthToSpace &node) void OperationValidator::visit(const operation::DetectionPostProcess &node) { - auto param = node.param(); + const auto ¶m = node.param(); // FIXME: number of classes should be 1 for now. OP_REQUIRES(param.num_classes == 1); diff --git a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc index 21c7cdd..2265e99 100644 --- a/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc +++ b/runtime/onert/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc @@ -64,7 +64,7 @@ bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType { try { - const auto operand_type = _execution->primary_subgraph().operands().at(index).typeInfo(); + const auto &operand_type = _execution->primary_subgraph().operands().at(index).typeInfo(); const auto typeInfo = NNAPIConvert::getTypeInfo(type); if (operand_type != typeInfo) @@ -111,7 +111,7 @@ bool ANeuralNetworksExecution::IsOptionalInput(const onert::ir::OperandIndex ind bool ANeuralNetworksExecution::hasUnspecifiedDims(const onert::ir::OperandIndex index) noexcept { - const auto operand_shape = _execution->primary_subgraph().operands().at(index).shape(); + const auto &operand_shape = _execution->primary_subgraph().operands().at(index).shape(); return operand_shape.hasUnspecifiedDims(); } @@ -138,7 +138,7 @@ bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOpe onert::ir::IOIndex input_index{index}; const auto operand_index = getInputOperandIndex(index); - const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo(); + const auto &type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo(); const auto shape = (type != nullptr) ? NNAPIConvert::getShape(type) : _execution->primary_subgraph().operands().at(operand_index).shape(); @@ -171,7 +171,6 @@ bool ANeuralNetworksExecution::setOptionalInput(uint32_t index, onert::ir::IOIndex input_index{index}; const auto operand_index = getInputOperandIndex(index); - const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo(); const auto shape = (type != nullptr) ? NNAPIConvert::getShape(type) : _execution->primary_subgraph().operands().at(operand_index).shape(); @@ -206,7 +205,7 @@ bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOp onert::ir::IOIndex output_index{index}; const auto operand_index = getOutputOperandIndex(index); - const auto type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo(); + const auto &type_info = _execution->primary_subgraph().operands().at(operand_index).typeInfo(); const auto shape = (type != nullptr) ? NNAPIConvert::getShape(type) : _execution->primary_subgraph().operands().at(operand_index).shape();