Fix coverity major defects accepted/tizen/unified/20240308.174145 accepted/tizen/unified/x/20240311.100104
authorChunseok Lee <chunseok.lee@samsung.com>
Thu, 7 Mar 2024 07:55:55 +0000 (16:55 +0900)
committerChunseok Lee <chunseok.lee@samsung.com>
Fri, 8 Mar 2024 02:14:41 +0000 (11:14 +0900)
- list of fixed defects

|  6 | Major    | 1738991 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/runtime/onert/core/src/dumper/dot/OperandNode.cc:38           | R |
|  7 | Major    | 1739127 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/runtime/libs/benchmark/src/Result.cpp:164                     | R |
|  9 | Major    | 1740270 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/runtime/onert/core/src/compiler/StaticShapeInferer.cc:806     | R |
| 11 | Major    | 1741349 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/runtime/onert/backend/cpu/KernelGenerator.cc:632              | R |
| 12 | Major    | 1742302 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/runtime/libs/benchmark/src/Result.cpp:160                     | R |
| 13 | Major    | 1742935 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/compute/cker/include/cker/operation/Helper/MatmulBCast.h:66   | R |
| 14 | Major    | 1743063 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/runtime/onert/backend/cpu/KernelGenerator.cc:310              | R |
| 17 | Major    | 1744612 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/runtime/onert/backend/gpu_cl/KernelGenerator.cc:113           | R |
| 19 | Major    | 1745118 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/runtime/onert/backend/gpu_cl/KernelGenerator.cc:517           | R |
| 23 | Major    | 1747394 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/compute/cker/include/cker/operation/Helper/MatmulBCast.h:65   | R |
| 25 | Major    | 1748390 | Undecided | AUTO_CAUSES_COPY | /home/abuild/rpmbuild/BUILD/nnfw-1.25.0/runtime/libs/benchmark/src/Result.cpp:169                     | R |

compute/cker/include/cker/operation/Helper/MatmulBCast.h
runtime/libs/benchmark/src/Result.cpp
runtime/onert/backend/cpu/KernelGenerator.cc
runtime/onert/backend/gpu_cl/KernelGenerator.cc
runtime/onert/core/src/compiler/StaticShapeInferer.cc
runtime/onert/core/src/dumper/dot/OperandNode.cc

index b80ccc0..b7d6394 100644 (file)
@@ -62,13 +62,13 @@ public:
     if (!_batch_bcast->IsValid())
       return;
 
-    auto x_reshaped = _batch_bcast->x_reshape();
-    auto y_reshaped = _batch_bcast->y_reshape();
+    const auto &x_reshaped = _batch_bcast->x_reshape();
+    const auto &y_reshaped = _batch_bcast->y_reshape();
     auto output_shape = _batch_bcast->output_shape();
 
     _x_batch_size = std::accumulate(x_reshaped.cbegin(), x_reshaped.cend(), INT32_C(1),
                                     std::multiplies<int32_t>());
-    _y_batch_size = std::accumulate(x_reshaped.cbegin(), x_reshaped.cend(), INT32_C(1),
+    _y_batch_size = std::accumulate(y_reshaped.cbegin(), y_reshaped.cend(), INT32_C(1),
                                     std::multiplies<int32_t>());
     _output_shape.ReplaceWith(output_shape.size(), output_shape.data());
     _output_batch_size = _output_shape.FlatSize();
index 04925f4..8c1e2d2 100644 (file)
@@ -157,16 +157,16 @@ namespace benchmark
 
 Result::Result(const Phases &phases)
 {
-  const auto option = phases.option();
+  const auto &option = phases.option();
   {
     for (int i = PhaseEnum::MODEL_LOAD; i <= PhaseEnum::PREPARE; ++i)
     {
-      auto phase = phases.at(gPhaseStrings[i]);
+      const auto &phase = phases.at(gPhaseStrings[i]);
       time[i][FigureType::MEAN] = averageTimeMs(phase);
     }
 
     int i = PhaseEnum::EXECUTE;
-    auto exec_phase = phases.at(gPhaseStrings[i]);
+    const auto &exec_phase = phases.at(gPhaseStrings[i]);
     time[i][FigureType::MEAN] = averageTimeMs(exec_phase);
     time[i][FigureType::MAX] = maxTimeMs(exec_phase);
     time[i][FigureType::MIN] = minTimeMs(exec_phase);
index d462daf..dff54c1 100644 (file)
@@ -307,7 +307,7 @@ void KernelGenerator::visit(const ir::operation::Conv2D &node)
 
   const auto stride = node.param().stride;
   const auto activation = node.param().activation;
-  const auto param_padding = node.param().padding;
+  const auto &param_padding = node.param().padding;
   const auto dilation = node.param().dilation;
   auto fn = std::make_unique<ops::ConvolutionLayer>();
 
@@ -629,7 +629,7 @@ void KernelGenerator::visit(const ir::operation::Einsum &node)
   for (const auto &ifm_idx : node.getInputs())
     input_tensors.emplace_back(_tensor_reg->getPortableTensor(ifm_idx));
 
-  const auto equation = node.param().equation;
+  const auto &equation = node.param().equation;
 
   auto fn = std::make_unique<ops::EinsumLayer>();
 
index 31d3134..de8d3b4 100644 (file)
@@ -110,7 +110,7 @@ void KernelGenerator::get_operation(FunctionMap &Functions)
 absl::Status KernelGenerator::readConstTensor(const ir::OperandIndex &index,
                                               tflite::gpu::TensorOrScalar *param)
 {
-  const auto shape = _ctx.at(index).shape();
+  const auto &shape = _ctx.at(index).shape();
   if (shape.rank() == 0 && shape.num_elements() == 1)
   {
     tflite::gpu::Tensor<tflite::gpu::Scalar, tflite::gpu::DataType::FLOAT32> tensor;
@@ -514,7 +514,7 @@ void KernelGenerator::visit(const ir::operation::DepthwiseConv2D &node)
     {
       std::unique_ptr<tflite::gpu::GPUOperation> gpu_op_1;
       tflite::gpu::OperationDef op_def_1;
-      const auto shape = _ctx.at(ofm_index).shape();
+      const auto &shape = _ctx.at(ofm_index).shape();
       auto new_ind = _tensor_reg->addNewClTensor(shape);
 
       addClNode({ifm_index}, {new_ind}, std::move(gpu_op));
index 68cff7e..ec5d214 100644 (file)
@@ -803,7 +803,7 @@ void StaticShapeInferer::visit(const ir::operation::Permute &op)
   // However, it is not applied here, so input/output have the same layout of frontend. Because
   // "ExecutorFactory" would convert shape of input/output accoding to the layouts when registering
   // operand info to "TensorBuilder" after calling "StaticShapeInferer"
-  const auto new_shape = input.info().shape();
+  const auto &new_shape = input.info().shape();
   output.info().shape(new_shape);
 }
 
index 88f5254..49319d5 100644 (file)
@@ -35,7 +35,7 @@ Operand::Operand(const ir::OperandIndex &index, Type type)
   : Node{"operand" + std::to_string(index.value())}
 {
   {
-    auto type_to_shape = [](Type type) {
+    auto type_to_shape = [](Type type) -> const auto & {
       switch (type)
       {
         case Type::MODEL_INPUT: