coverity issue fix 10/291910/2 accepted/tizen_8.0_unified accepted/tizen_unified accepted/tizen_unified_x_asan tizen tizen_8.0 accepted/tizen/8.0/unified/20231005.093420 accepted/tizen/unified/20230425.175854 accepted/tizen/unified/x/asan/20240415.123327 tizen_8.0_m2_release
authorInki Dae <inki.dae@samsung.com>
Tue, 25 Apr 2023 04:27:54 +0000 (13:27 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 25 Apr 2023 04:35:44 +0000 (04:35 +0000)
[Version] : 0.4.10
[Issue type] : bug fix

Fix a coverity issue - Out-of-bounds access.

NNStreamer uses a fixed tensor demension with 16 so correct the indims
and outdims array size.

Change-Id: I9569398df8d29ed5b1b3a1a9ee84290aa2e1fee0
Signed-off-by: Inki Dae <inki.dae@samsung.com>
packaging/inference-engine-mlapi.spec
src/inference_engine_mlapi.cpp

index ad18b74..d2ea2d9 100644 (file)
@@ -1,6 +1,6 @@
 Name:       inference-engine-mlapi
 Summary:    ML Single API backend of NNStreamer for MediaVision
-Version:    0.4.9
+Version:    0.4.10
 Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
index 4f3adfc..7fbf997 100644 (file)
@@ -176,8 +176,8 @@ namespace MLAPIImpl
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                        }
 
-                       // TODO. nnstreamer needs fixed dimention with 4 for nntrainer tensor filter. Why??
-                       std::vector<unsigned int> indim(4, 1);
+                       // NNStreamer uses a fixed dimention with 16.
+                       std::vector<unsigned int> indim(ML_TENSOR_RANK_LIMIT, 1);
 
                        LOGI("Input tensor(%zu) shape:", layer_idx);
 
@@ -616,7 +616,7 @@ namespace MLAPIImpl
                for (auto& output : mDesignated_outputs) {
                        inference_engine_tensor_info tensor_info;
                        ml_tensor_type_e out_type;
-                       unsigned int out_dim[MAX_TENSOR_DIMENSION_SIZE];
+                       unsigned int out_dim[ML_TENSOR_RANK_LIMIT];
                        size_t out_size = 1;
 
                        ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);