delete[] (unsigned char *)tensor_buffer.buffer;
}
- LOGI("input tensor buffers(%d) have been released.", mInputTensorBuffers.size());
+ LOGI("input tensor buffers(%zu) have been released.", mInputTensorBuffers.size());
std::vector<inference_engine_tensor_buffer>().swap(mInputTensorBuffers);
}
delete[] (unsigned char *)tensor_buffer.buffer;
}
- LOGI("output tensor buffers(%d) have been released.", mOutputTensorBuffers.size());
+ LOGI("output tensor buffers(%zu) have been released.", mOutputTensorBuffers.size());
std::vector<inference_engine_tensor_buffer>().swap(mOutputTensorBuffers);
}
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
}
- LOGI("Allocated input tensor buffer(size = %d, data type = %d)", tensor_info.size, tensor_info.data_type);
+ LOGI("Allocated input tensor buffer(size = %zu, data type = %d)", tensor_info.size, tensor_info.data_type);
tensor_buffer.owner_is_backend = 0;
tensor_buffer.data_type = tensor_info.data_type;
mInputTensorBuffers.push_back(tensor_buffer);
}
}
- LOGI("Input tensor buffer count is %d", mInputTensorBuffers.size());
+ LOGI("Input tensor buffer count is %zu", mInputTensorBuffers.size());
// Get output tensor buffers from a backend engine if the backend engine allocated.
ret = mBackend->GetOutputTensorBuffers(mOutputTensorBuffers);
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
}
- LOGI("Allocated output tensor buffer(size = %d, data type = %d)", tensor_info.size, tensor_info.data_type);
+ LOGI("Allocated output tensor buffer(size = %zu, data type = %d)", tensor_info.size, tensor_info.data_type);
tensor_buffer.owner_is_backend = 0;
tensor_buffer.data_type = tensor_info.data_type;
}
}
- LOGI("Output tensor buffer count is %d", mOutputTensorBuffers.size());
+ LOGI("Output tensor buffer count is %zu", mOutputTensorBuffers.size());
return MEDIA_VISION_ERROR_NONE;
}
cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer)(cvRoi).clone();
}
- LOGE("Size: w:%d, h:%d", cvSource.size().width, cvSource.size().height);
+ LOGE("Size: w:%zu, h:%zu", cvSource.size().width, cvSource.size().height);
if (mCh != 1 && mCh != 3) {
LOGE("Channel not supported.");
// a model may apply post-process but others may not.
// Thus, those cases should be hanlded separately.
std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- LOGI("inferDimInfo size: %d", outputData.dimInfo.size());
+ LOGI("inferDimInfo size: %zu", outputData.dimInfo.size());
std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
- LOGI("inferResults size: %d", inferResults.size());
+ LOGI("inferResults size: %zu", inferResults.size());
float* boxes = nullptr;
float* classes = nullptr;
// a model may apply post-process but others may not.
// Thus, those cases should be hanlded separately.
std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
- LOGI("inferDimInfo size: %d", outputData.dimInfo.size());
+ LOGI("inferDimInfo size: %zu", outputData.dimInfo.size());
std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
- LOGI("inferResults size: %d", inferResults.size());
+ LOGI("inferResults size: %zu", inferResults.size());
float* boxes = nullptr;
float* classes = nullptr;