mWeightFile(weightFile),
mUserFile(userFile)
{
- LOGE("ENTER");
- LOGE("LEAVE");
+ LOGI("ENTER");
+ LOGI("LEAVE");
}
InferenceTFLite::~InferenceTFLite()
mDim = dim;
mInputSize = cv::Size(width, height);
- LOGE("InputSize is %d x %d\n", mInputSize.width, mInputSize.height);
+ LOGI("InputSize is %d x %d\n", mInputSize.width, mInputSize.height);
return INFERENCE_ENGINE_ERROR_NONE;
}
case INFERENCE_TARGET_CUSTOM:
case INFERENCE_TARGET_NONE:
default:
- LOGE("Not supported device type [%d], Set CPU mode", (int)type);
+ LOGW("Not supported device type [%d], Set CPU mode", (int)type);
}
return INFERENCE_ENGINE_ERROR_NONE;
ret = (userFileLength > 0) ? SetUserFile() : INFERENCE_ENGINE_ERROR_NONE;
if (ret != INFERENCE_ENGINE_ERROR_NONE)
- LOGE("Fail to read categoryList");
+ LOGW("Fail to read categoryList");
// input tensor
if (mInterpreter->inputs().size()) {
{
if (mInputAttrType == kTfLiteUInt8) {
mInputData = mInterpreter->typed_tensor<uint8_t>(mInputLayerId); //tflite
- LOGE("InputType is DT_UINT8");
+ LOGI("InputType is DT_UINT8");
}
else if (mInputAttrType == kTfLiteFloat32) {
mInputData = mInterpreter->typed_tensor<float>(mInputLayerId); //tflite
- LOGE("InputType is DT_FLOAT");
+ LOGI("InputType is DT_FLOAT");
}
else {
LOGE("Not supported");
TfLiteIntArray* dims = mInterpreter->tensor(mOutputLayerId[0])->dims;
const long count = dims->data[1];
- LOGE("dims size: %d", dims->size);
+ LOGI("dims size: %d", dims->size);
for (int k = 0; k < dims->size; ++k) {
- LOGE("%d: %d", k, dims->data[k]);
+ LOGI("%d: %d", k, dims->data[k]);
}
float *prediction = mInterpreter->typed_output_tensor<float>(0);
std::reverse(top_results.begin(), top_results.end());
int classIdx = -1;
+ results.number_of_classes = 0;
for (int idx = 0; idx < mOutputNumbers; ++idx) {
- LOGE("idx:%d", idx);
- classIdx = top_results[idx].second;
+ if (top_results[idx].first < mThreshold)
+ continue;
+ LOGI("idx:%d", idx);
LOGI("classIdx: %d", top_results[idx].second);
LOGI("classProb: %f", top_results[idx].first);
+ classIdx = top_results[idx].second;
results.indices.push_back(classIdx);
results.confidences.push_back(top_results[idx].first);
results.names.push_back(mUserListName[classIdx]);
+ results.number_of_classes++;
}
- results.number_of_classes = mOutputNumbers;
-
return INFERENCE_ENGINE_ERROR_NONE;
}
float* classes = mInterpreter->typed_tensor<float>(mOutputLayerId[1]);
float* scores = mInterpreter->typed_tensor<float>(mOutputLayerId[2]);
- int number_of_objects = 0;
int number_of_detections = (int)(*mInterpreter->typed_tensor<float>(mOutputLayerId[3]));
int left, top, right, bottom;
cv::Rect loc;
+ results.number_of_objects = 0;
for (int idx = 0; idx < number_of_detections; ++idx) {
if (scores[idx] < mThreshold)
continue;
results.confidences.push_back(scores[idx]);
results.names.push_back(mUserListName[(int)classes[idx]]);
results.locations.push_back(loc);
+ results.number_of_objects++;
- LOGE("objectClass: %d", (int)classes[idx]);
- LOGE("confidence:%f", scores[idx]);
- LOGE("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
- number_of_objects++;
+ LOGI("objectClass: %d", (int)classes[idx]);
+ LOGI("confidence:%f", scores[idx]);
+ LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
}
- results.number_of_objects = number_of_objects;
-
return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceTFLite::GetInferenceResult(FaceDetectionResults& results)
{
+ float* boxes = mInterpreter->typed_tensor<float>(mOutputLayerId[0]);
+ float* classes = mInterpreter->typed_tensor<float>(mOutputLayerId[1]);
+ float* scores = mInterpreter->typed_tensor<float>(mOutputLayerId[2]);
+
+ int number_of_detections = (int)(*mInterpreter->typed_tensor<float>(mOutputLayerId[3]));
+ int left, top, right, bottom;
+ cv::Rect loc;
+
results.number_of_faces = 0;
+ for (int idx = 0; idx < number_of_detections; ++idx) {
+ if (scores[idx] < mThreshold)
+ continue;
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ left = (int)(boxes[idx*4 + 1] * mSourceSize.width);
+ top = (int)(boxes[idx*4 + 0] * mSourceSize.height);
+ right = (int)(boxes[idx*4 + 3] * mSourceSize.width);
+ bottom = (int)(boxes[idx*4 + 2] * mSourceSize.height);
+
+ loc.x = left;
+ loc.y = top;
+ loc.width = right -left + 1;
+ loc.height = bottom - top + 1;
+
+ results.confidences.push_back(scores[idx]);
+ results.locations.push_back(loc);
+ results.number_of_faces++;
+
+ LOGI("confidence:%f", scores[idx]);
+ LOGI("class: %f", classes[idx]);
+ LOGI("left:%f, top:%f, right:%f, bottom:%f", boxes[idx*4 + 1], boxes[idx*4 + 0], boxes[idx*4 + 3], boxes[idx*4 + 2]);
+ LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceTFLite::GetInferenceResult(FacialLandMarkDetectionResults& results)
{
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ TfLiteIntArray* dims = mInterpreter->tensor(mOutputLayerId[0])->dims;
+ const long number_of_detections = dims->data[1];
+ float* loc = mInterpreter->typed_tensor<float>(mOutputLayerId[0]);
+
+ cv::Point point(0,0);
+ results.number_of_landmarks = 0;
+ LOGI("imgW:%d, imgH:%d", mSourceSize.width, mSourceSize.height);
+ for (int idx = 0; idx < number_of_detections; idx+=2) {
+ point.x = (int)(loc[idx] * mSourceSize.width);
+ point.y = (int)(loc[idx+1] * mSourceSize.height);
+
+ results.locations.push_back(point);
+ results.number_of_landmarks++;
+
+ LOGI("x:%d, y:%d", point.x, point.y);
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceTFLite::GetInferenceResult(std::vector<std::vector<int>>& dimInfo, std::vector<float*>& results)