coverity issue fix 83/298983/1 accepted/tizen_7.0_unified tizen_7.0 accepted/tizen/7.0/unified/20230920.022050
authorInki Dae <inki.dae@samsung.com>
Tue, 25 Apr 2023 04:27:54 +0000 (13:27 +0900)
committerInki Dae <inki.dae@samsung.com>
Mon, 18 Sep 2023 04:53:45 +0000 (13:53 +0900)
[Version] : 0.4.10
[Issue type] : bug fix

Fix a coverity issue - Out-of-bounds access.

NNStreamer uses a fixed tensor demension with 16 so correct the indims
and outdims array size.

Change-Id: I9569398df8d29ed5b1b3a1a9ee84290aa2e1fee0
Signed-off-by: Inki Dae <inki.dae@samsung.com>
packaging/inference-engine-mlapi.spec
src/inference_engine_mlapi.cpp

index ad18b7485626d74d05404fcf32bcb42f6600fbac..d2ea2d945039d126adc8be728d22e3009194dfeb 100644 (file)
@@ -1,6 +1,6 @@
 Name:       inference-engine-mlapi
 Summary:    ML Single API backend of NNStreamer for MediaVision
-Version:    0.4.9
+Version:    0.4.10
 Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
index 4f3adfc60d908c33fb239b1502d9803001340831..7fbf9976d0d95fd8e8d58f38014a3654edf4b3d5 100644 (file)
@@ -176,8 +176,8 @@ namespace MLAPIImpl
                                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                        }
 
-                       // TODO. nnstreamer needs fixed dimention with 4 for nntrainer tensor filter. Why??
-                       std::vector<unsigned int> indim(4, 1);
+                       // NNStreamer uses a fixed dimention with 16.
+                       std::vector<unsigned int> indim(ML_TENSOR_RANK_LIMIT, 1);
 
                        LOGI("Input tensor(%zu) shape:", layer_idx);
 
@@ -616,7 +616,7 @@ namespace MLAPIImpl
                for (auto& output : mDesignated_outputs) {
                        inference_engine_tensor_info tensor_info;
                        ml_tensor_type_e out_type;
-                       unsigned int out_dim[MAX_TENSOR_DIMENSION_SIZE];
+                       unsigned int out_dim[ML_TENSOR_RANK_LIMIT];
                        size_t out_size = 1;
 
                        ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);