return ret;
}
- mInputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer);
+ mInputTensorBuffers.addTensorBuffer(layer.first, tensor_buffer);
}
}
return ret;
}
- mOutputTensorBuffers.setTensorBuffer(layer.first, tensor_buffer);
+ mOutputTensorBuffers.addTensorBuffer(layer.first, tensor_buffer);
}
}
outputData.dimInfo.push_back(tmpDimInfo);
- inference_engine_tensor_buffer* tensorBuffers =
+ inference_engine_tensor_buffer* tensorBuffer =
mOutputTensorBuffers.getTensorBuffer(layer.first);
- if (tensorBuffers == NULL) {
+ if (tensorBuffer == NULL) {
LOGE("Fail to getTensorBuffer with name %s", layer.first.c_str());
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
// Normalize output tensor data converting it to float type in case of quantized model.
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- float *new_buf = new float[tensor_info.size];
- if (new_buf == NULL) {
- LOGE("Fail to allocate a new output tensor buffer.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- auto *ori_buf = static_cast<unsigned char *>(
- tensorBuffers->buffer);
-
- for (size_t j = 0; j < tensor_info.size; j++) {
- new_buf[j] = static_cast<float>(ori_buf[j]) / 255.0f;
+ int ret = mOutputTensorBuffers.convertToFloat<unsigned char>(tensorBuffer, tensor_info);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to convert tensor data to float type.");
+ return ret;
}
-
- // replace original buffer with new one, and release origin one.
- tensorBuffers->buffer = new_buf;
-
- if (!tensorBuffers->owner_is_backend)
- delete[] ori_buf;
}
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) {
- float *new_buf = new float[tensor_info.size];
- if (new_buf == NULL) {
- LOGE("Fail to allocate a new output tensor buffer.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- auto *ori_buf =
- static_cast<short *>(tensorBuffers->buffer);
-
- for (size_t j = 0; j < tensor_info.size; j++) {
- new_buf[j] = static_cast<float>(ori_buf[j]);
+ int ret = mOutputTensorBuffers.convertToFloat<unsigned short>(tensorBuffer, tensor_info);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to convert tensor data to float type.");
+ return ret;
}
-
- // replace original buffer with new one, and release origin one.
- tensorBuffers->buffer = new_buf;
-
- if (!tensorBuffers->owner_is_backend)
- delete[] ori_buf;
}
- outputData.data.push_back(static_cast<void *>(tensorBuffers->buffer));
+ outputData.data.push_back(static_cast<void *>(tensorBuffer->buffer));
}
return MEDIA_VISION_ERROR_NONE;
return &mTensorBuffer[name];
}
- bool TensorBuffer::setTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer)
+ bool TensorBuffer::addTensorBuffer(std::string name, inference_engine_tensor_buffer& buffer)
{
if (name.empty() || buffer.buffer == nullptr) {
LOGE("Invalid parameters: %s, %p", name.c_str(), buffer.buffer);
}
template <typename T>
+ int TensorBuffer::convertToFloat(inference_engine_tensor_buffer *tensorBuffer, const inference_engine_tensor_info& tensor_info)
+ {
+ float *new_buf = new(std::nothrow) float[tensor_info.size];
+ if (new_buf == NULL) {
+ LOGE("Fail to allocate a new output tensor buffer.");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ auto ori_buf = static_cast<T *>(tensorBuffer->buffer);
+
+ for (size_t idx = 0; idx < tensor_info.size; idx++)
+ new_buf[idx] = static_cast<float>(ori_buf[idx]) / 255.0f;
+
+ // replace original buffer with new one, and release origin one.
+ tensorBuffer->buffer = new_buf;
+
+ if (!tensorBuffer->owner_is_backend)
+ delete[] ori_buf;
+
+ tensorBuffer->data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensorBuffer->owner_is_backend = false;
+
+ return MEDIA_VISION_ERROR_NONE;
+ }
+
+ template <typename T>
T TensorBuffer::getValue(std::string name, int idx)
{
inference_engine_tensor_buffer* tBuffer =
template float TensorBuffer::getValue<float>(std::string, int);
template int TensorBuffer::getValue<int>(std::string, int);
+ template int TensorBuffer::convertToFloat<unsigned char>(_inference_engine_tensor_buffer*, _inference_engine_tensor_info const&);
+ template int TensorBuffer::convertToFloat<unsigned short>(_inference_engine_tensor_buffer*, _inference_engine_tensor_info const&);
+
} /* Inference */
} /* MediaVision */