LOGI("ENTER");
if (!mInputTensorBuffers.empty()) {
- mInputTensorBuffers.clear();
+ mInputTensorBuffers.release();
}
if (!mOutputTensorBuffers.empty()) {
- mOutputTensorBuffers.clear();
+ mOutputTensorBuffers.release();
}
LOGI("LEAVE");
// then allocate the buffers at here.
if (mInputTensorBuffers.empty()) {
for(auto& layer : mInputLayerProperty.layers) {
- const inference_engine_tensor_info& tensor_info = layer.second;
inference_engine_tensor_buffer tensor_buffer;
- if (tensor_info.data_type ==
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
- tensor_buffer.buffer = new float[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else if (tensor_info.data_type ==
- INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- tensor_buffer.buffer = new unsigned char[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else if (tensor_info.data_type ==
- INFERENCE_TENSOR_DATA_TYPE_UINT16) {
- tensor_buffer.buffer = new unsigned short[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else {
- LOGE("Invalid input tensor data type.");
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
- if (tensor_buffer.buffer == NULL) {
- LOGE("Fail to allocate input tensor buffer.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to allocate tensor buffer.");
+ mInputTensorBuffers.release();
+ return ret;
}
- LOGI("Allocated input tensor buffer(size = %zu, data type = %d)",
- tensor_info.size, tensor_info.data_type);
- tensor_buffer.owner_is_backend = 0;
- tensor_buffer.data_type = tensor_info.data_type;
mInputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer);
}
}
// then allocate the buffers at here.
if (mOutputTensorBuffers.empty()) {
for (auto& layer : mOutputLayerProperty.layers) {
- const inference_engine_tensor_info& tensor_info = layer.second;
inference_engine_tensor_buffer tensor_buffer;
- if (tensor_info.data_type ==
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
- tensor_buffer.buffer = new float[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else if (tensor_info.data_type ==
- INFERENCE_TENSOR_DATA_TYPE_INT64) {
- tensor_buffer.buffer = new long long[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else if (tensor_info.data_type ==
- INFERENCE_TENSOR_DATA_TYPE_UINT32) {
- tensor_buffer.buffer = new unsigned int[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else if (tensor_info.data_type ==
- INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- tensor_buffer.buffer = new char[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else if (tensor_info.data_type ==
- INFERENCE_TENSOR_DATA_TYPE_UINT16) {
- tensor_buffer.buffer = new unsigned short[tensor_info.size];
- tensor_buffer.size = tensor_info.size;
- } else {
- LOGE("Invalid output tensor data type.");
- CleanupTensorBuffers();
- return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- }
- if (tensor_buffer.buffer == NULL) {
- LOGE("Fail to allocate output tensor buffer.");
- CleanupTensorBuffers();
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ ret = mInputTensorBuffers.allocate(tensor_buffer, layer.second);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to allocate tensor buffer.");
+ mInputTensorBuffers.release();
+ return ret;
}
- LOGI("Allocated output tensor buffer(size = %zu, data type = %d)",
- tensor_info.size, tensor_info.data_type);
-
- tensor_buffer.owner_is_backend = 0;
- tensor_buffer.data_type = tensor_info.data_type;
mOutputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer);
}
}
return getTensorBuffer(name) != nullptr;
}
- void TensorBuffer::clear()
+ int TensorBuffer::allocate(inference_engine_tensor_buffer& tensor_buffer,
+ const inference_engine_tensor_info& tensor_info)
+ {
+ if (tensor_info.data_type <= INFERENCE_TENSOR_DATA_TYPE_NONE ||
+ tensor_info.data_type >= INFERENCE_TENSOR_DATA_TYPE_MAX) {
+ LOGE("Invalid tensor data type.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ try {
+ if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32)
+ tensor_buffer.buffer = new float[tensor_info.size];
+ else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64)
+ tensor_buffer.buffer = new long long[tensor_info.size];
+ else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT64)
+ tensor_buffer.buffer = new unsigned long long[tensor_info.size];
+ else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT32)
+ tensor_buffer.buffer = new unsigned int[tensor_info.size];
+ else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8)
+ tensor_buffer.buffer = new char[tensor_info.size];
+ else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16)
+ tensor_buffer.buffer = new unsigned short[tensor_info.size];
+ } catch (const std::bad_alloc& e) {
+ LOGE("Fail to allocate tensor buffer.(%s)", e.what());
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ tensor_buffer.size = tensor_info.size;
+
+ LOGI("Allocated tensor buffer(size = %zu, data type = %d)",
+ tensor_info.size, tensor_info.data_type);
+ tensor_buffer.owner_is_backend = 0;
+ tensor_buffer.data_type = tensor_info.data_type;
+
+ return MEDIA_VISION_ERROR_NONE;
+ }
+
+ void TensorBuffer::release()
{
for (auto& tensorBuffer : mTensorBuffer) {
auto& tBuffer = tensorBuffer.second;
bool TensorBuffer::setTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer)
{
- if (name.empty() ||
- buffer.buffer == nullptr) {
+ if (name.empty() || buffer.buffer == nullptr) {
LOGE("Invalid parameters: %s, %p", name.c_str(), buffer.buffer);
return false;
}