return MEDIA_VISION_ERROR_NONE;
}
+int Inference::FillOutputResult(tensor_t &outputData)
+{
+ for (int i = 0; i < mOutputLayerProperty.tensor_infos.size(); ++i) {
+ inference_engine_tensor_info tensor_info = mOutputLayerProperty.tensor_infos[i];
+
+ std::vector<int> tmpDimInfo;
+ for (int i = 0; i < (int)tensor_info.shape.size(); i++) {
+ tmpDimInfo.push_back(tensor_info.shape[i]);
+ }
+
+ outputData.dimInfo.push_back(tmpDimInfo);
+
+ // Normalize output tensor data converting it to float type in case of quantized model.
+ if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+ unsigned char *ori_buf = (unsigned char *)mOutputTensorBuffers[i].buffer;
+ float *new_buf = new float[tensor_info.size];
+ if (new_buf == NULL) {
+ LOGE("Fail to allocate a new output tensor buffer.");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (int j = 0; j < tensor_info.size; j++) {
+ new_buf[j] = (float)ori_buf[j] / 255.0f;
+ }
+
+ // replace original buffer with new one, and release origin one.
+ mOutputTensorBuffers[i].buffer = new_buf;
+ delete[] ori_buf;
+ }
+
+ outputData.data.push_back((void *)mOutputTensorBuffers[i].buffer);
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
int Inference::Bind(void)
{
LOGI("ENTER");
int Inference::GetClassficationResults(ImageClassificationResults *classificationResults)
{
tensor_t outputData;
- for (int i = 0; i < mOutputLayerProperty.tensor_infos.size(); ++i) {
- inference_engine_tensor_info tensor_info = mOutputLayerProperty.tensor_infos[i];
-
- std::vector<int> tmpDimInfo;
- for (int i = 0; i < (int)tensor_info.shape.size(); i++) {
- tmpDimInfo.push_back(tensor_info.shape[i]);
- }
-
- outputData.dimInfo.push_back(tmpDimInfo);
-
- // Normalize output tensor data converting it to float type in case of quantized model.
- if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
- unsigned char *ori_buf = (unsigned char *)mOutputTensorBuffers[i].buffer;
- float *new_buf = new float[tensor_info.size];
- if (new_buf == NULL) {
- LOGE("Fail to allocate a new output tensor buffer.");
- return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
- }
-
- for (int j = 0; j < tensor_info.size; j++) {
- new_buf[j] = (float)ori_buf[j] / 255.0f;
- }
-
- // replace original buffer with new one, and release origin one.
- mOutputTensorBuffers[i].buffer = new_buf;
- delete[] ori_buf;
- }
- outputData.data.push_back((void *)mOutputTensorBuffers[i].buffer);
+ // Get inference result and contain it to outputData.
+ int ret = FillOutputResult(outputData);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get output result.");
+ return ret;
}
// Will contain top N results in ascending order.
int Inference::GetObjectDetectionResults(ObjectDetectionResults *detectionResults)
{
tensor_t outputData;
- int ret = mBackend->GetInferenceResult(outputData);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to GetObjectDetectionResults");
- return ConvertEngineErrorToVisionError(ret);
+
+ // Get inference result and contain it to outputData.
+ int ret = FillOutputResult(outputData);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get output result.");
+ return ret;
}
std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
-
float* boxes = reinterpret_cast<float*>(inferResults[0]);
float* classes = reinterpret_cast<float*>(inferResults[1]);
float* scores = reinterpret_cast<float*>(inferResults[2]);
int number_of_detections = (int)(*reinterpret_cast<float*>(inferResults[3]));
+ LOGI("number_of_detections = %d", number_of_detections);
+
int left, top, right, bottom;
cv::Rect loc;
int Inference::GetFaceDetectionResults(FaceDetectionResults *detectionResults)
{
tensor_t outputData;
- int ret = mBackend->GetInferenceResult(outputData);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to GetFaceDetectionResults");
- return ConvertEngineErrorToVisionError(ret);
+
+ // Get inference result and contain it to outputData.
+ int ret = FillOutputResult(outputData);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get output result.");
+ return ret;
}
std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
int Inference::GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *detectionResults)
{
tensor_t outputData;
- int ret = mBackend->GetInferenceResult(outputData);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Fail to GetFacialLandMarkDetectionResults");
- return ConvertEngineErrorToVisionError(ret);
+
+ // Get inference result and contain it to outputData.
+ int ret = FillOutputResult(outputData);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get output result.");
+ return ret;
}
std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);