try {
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
- tensor_buffer.buffer = new float[tensor_info.size];
+ tensor_buffer.buffer = new float[tensor_info.size]();
else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64)
- tensor_buffer.buffer = new long long[tensor_info.size];
+ tensor_buffer.buffer = new long long[tensor_info.size]();
else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT64)
- tensor_buffer.buffer = new unsigned long long[tensor_info.size];
+ tensor_buffer.buffer = new unsigned long long[tensor_info.size]();
else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32)
- tensor_buffer.buffer = new unsigned int[tensor_info.size];
+ tensor_buffer.buffer = new unsigned int[tensor_info.size]();
else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8)
- tensor_buffer.buffer = new char[tensor_info.size];
+ tensor_buffer.buffer = new char[tensor_info.size]();
else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16)
- tensor_buffer.buffer = new unsigned short[tensor_info.size];
+ tensor_buffer.buffer = new unsigned short[tensor_info.size]();
} catch (const std::bad_alloc &e) {
LOGE("Fail to allocate tensor buffer.(%s)", e.what());
return MEDIA_VISION_ERROR_OUT_OF_MEMORY;