fix build error on aarch64 26/231726/2 submit/tizen/20200424.035635
authorInki Dae <inki.dae@samsung.com>
Fri, 24 Apr 2020 03:28:01 +0000 (12:28 +0900)
committerInki Dae <inki.dae@samsung.com>
Fri, 24 Apr 2020 03:45:28 +0000 (12:45 +0900)
On aarch64, size_t is 64bit data so make sure to use correct
format specifier.

Change-Id: I4e22b46ada58ea520900fcd6271d7582a31347e9
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_inference/inference/src/Inference.cpp

index d75607d..ace5315 100755 (executable)
@@ -527,7 +527,7 @@ void Inference::CleanupTensorBuffers(void)
                                delete[] (unsigned char *)tensor_buffer.buffer;
                }
 
-               LOGI("input tensor buffers(%d) have been released.", mInputTensorBuffers.size());
+               LOGI("input tensor buffers(%zu) have been released.", mInputTensorBuffers.size());
                std::vector<inference_engine_tensor_buffer>().swap(mInputTensorBuffers);
        }
 
@@ -548,7 +548,7 @@ void Inference::CleanupTensorBuffers(void)
                                delete[] (unsigned char *)tensor_buffer.buffer;
                }
 
-               LOGI("output tensor buffers(%d) have been released.", mOutputTensorBuffers.size());
+               LOGI("output tensor buffers(%zu) have been released.", mOutputTensorBuffers.size());
                std::vector<inference_engine_tensor_buffer>().swap(mOutputTensorBuffers);
        }
 
@@ -608,14 +608,14 @@ int Inference::PrepareTenosrBuffers(void)
                                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
                        }
 
-                       LOGI("Allocated input tensor buffer(size = %d, data type = %d)", tensor_info.size, tensor_info.data_type);
+                       LOGI("Allocated input tensor buffer(size = %zu, data type = %d)", tensor_info.size, tensor_info.data_type);
                        tensor_buffer.owner_is_backend = 0;
                        tensor_buffer.data_type = tensor_info.data_type;
                        mInputTensorBuffers.push_back(tensor_buffer);
                }
        }
 
-       LOGI("Input tensor buffer count is %d", mInputTensorBuffers.size());
+       LOGI("Input tensor buffer count is %zu", mInputTensorBuffers.size());
 
        // Get output tensor buffers from a backend engine if the backend engine allocated.
        ret = mBackend->GetOutputTensorBuffers(mOutputTensorBuffers);
@@ -654,7 +654,7 @@ int Inference::PrepareTenosrBuffers(void)
                                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
                        }
 
-                       LOGI("Allocated output tensor buffer(size = %d, data type = %d)", tensor_info.size, tensor_info.data_type);
+                       LOGI("Allocated output tensor buffer(size = %zu, data type = %d)", tensor_info.size, tensor_info.data_type);
 
                        tensor_buffer.owner_is_backend = 0;
                        tensor_buffer.data_type = tensor_info.data_type;
@@ -662,7 +662,7 @@ int Inference::PrepareTenosrBuffers(void)
                }
        }
 
-       LOGI("Output tensor buffer count is %d", mOutputTensorBuffers.size());
+       LOGI("Output tensor buffer count is %zu", mOutputTensorBuffers.size());
 
        return MEDIA_VISION_ERROR_NONE;
 }
@@ -925,7 +925,7 @@ int Inference::Run(std::vector<mv_source_h> &mvSources, std::vector<mv_rectangle
                cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer)(cvRoi).clone();
        }
 
-       LOGE("Size: w:%d, h:%d", cvSource.size().width, cvSource.size().height);
+       LOGE("Size: w:%zu, h:%zu", cvSource.size().width, cvSource.size().height);
 
        if (mCh != 1 && mCh != 3) {
                LOGE("Channel not supported.");
@@ -1038,10 +1038,10 @@ int Inference::GetObjectDetectionResults(ObjectDetectionResults *detectionResult
        // a model may apply post-process but others may not.
        // Thus, those cases should be hanlded separately.
        std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
-       LOGI("inferDimInfo size: %d", outputData.dimInfo.size());
+       LOGI("inferDimInfo size: %zu", outputData.dimInfo.size());
 
        std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
-       LOGI("inferResults size: %d", inferResults.size());
+       LOGI("inferResults size: %zu", inferResults.size());
 
        float* boxes = nullptr;
        float* classes = nullptr;
@@ -1139,10 +1139,10 @@ int Inference::GetFaceDetectionResults(FaceDetectionResults *detectionResults)
        // a model may apply post-process but others may not.
        // Thus, those cases should be hanlded separately.
        std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
-       LOGI("inferDimInfo size: %d", outputData.dimInfo.size());
+       LOGI("inferDimInfo size: %zu", outputData.dimInfo.size());
 
        std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
-       LOGI("inferResults size: %d", inferResults.size());
+       LOGI("inferResults size: %zu", inferResults.size());
 
        float* boxes = nullptr;
        float* classes = nullptr;