int InferenceTFLite::SetInputTensorParamNode(std::string node)
{
+ mInputLayer = node;
return INFERENCE_ENGINE_ERROR_NONE;
}
return INFERENCE_ENGINE_ERROR_NONE;
}
-int InferenceTFLite::SetOutPutTensorParamNodes(std::string node)
+int InferenceTFLite::SetOutputTensorParamNodes(std::vector<std::string> nodes)
{
+ mOutputLayer = nodes;
return INFERENCE_ENGINE_ERROR_NONE;
}
{
switch (type) {
case INFERENCE_TARGET_CPU:
- //mInterpreter->UseNNAPI(false);
+ mInterpreter->UseNNAPI(false);
break;
case INFERENCE_TARGET_GPU:
- //mInterpreter->UseNNAPI(true);
+ mInterpreter->UseNNAPI(true);
break;
+ case INFERENCE_TARGET_CUSTOM:
case INFERENCE_TARGET_NONE:
default:
LOGE("Not supported device type [%d], Set CPU mode", (int)type);
if (ret != INFERENCE_ENGINE_ERROR_NONE)
LOGE("Fail to read categoryList");
- mInputLayer = mInterpreter->inputs()[0];
- mOutputLayer = mInterpreter->outputs()[0];
+ // input tensor
+ if (mInterpreter->inputs().size()) {
+ mInputLayerId = mInterpreter->inputs()[0];
+ } else {
+ mInputLayerId = -1;
+ for (int idx = 0; idx < mInterpreter->tensors_size(); ++idx) {
+ if (mInterpreter->tensor(idx)->name == NULL)
+ continue;
+ if (mInputLayer.compare(mInterpreter->tensor(idx)->name) == 0) {
+ mInputLayerId = idx;
+ break;
+ }
+ }
+ }
+
+ // output tensor
+ if (mInterpreter->outputs().size()) {
+ mOutputLayerId = mInterpreter->outputs();
+ } else {
+ std::vector<std::string>::iterator iter;
+ mOutputLayerId.clear();
+ for (iter = mOutputLayer.begin(); iter != mOutputLayer.end(); ++iter) {
+ LOGI("%s", (*iter).c_str());
+ for (int idx = 0; idx < mInterpreter->tensors_size(); ++idx) {
+ if (mInterpreter->tensor(idx)->name == NULL)
+ continue;
+ if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) {
+ mOutputLayerId.push_back(idx);
+ break;
+ }
+ }
+ }
+ }
if (mInterpreter->AllocateTensors() != kTfLiteOk) {
LOGE("Fail to allocate tensor");
return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
}
- mInputAttrType = mInterpreter->tensor(mInputLayer)->type;
- /*
- if (mInputAttrType == kTfLiteUInt8) {
- mMatType = CV_8UC3;
- mInputData = mInterpreter->typed_tensor<uint8_t>(mInputLayer); //tflite
- LOGE("InputType is DT_UINT8");
- }
- else if (mInputAttrType == kTfLiteFloat32) {
- mMatType = CV_32FC3;
- mInputData = mInterpreter->typed_tensor<float>(mInputLayer); //tflite
- LOGE("InputType is DT_FLOAT");
- }
- else {
- LOGE("Not supported");
- }
-
- mInputBuffer = cv::Mat(mInputSize.height, mInputSize.width, mMatType, mInputData);
- */
+ mInputAttrType = mInterpreter->tensor(mInputLayerId)->type;
return ret;
}
int InferenceTFLite::CreateInputLayerPassage()
{
if (mInputAttrType == kTfLiteUInt8) {
- mInputData = mInterpreter->typed_tensor<uint8_t>(mInputLayer); //tflite
+ mInputData = mInterpreter->typed_tensor<uint8_t>(mInputLayerId); //tflite
LOGE("InputType is DT_UINT8");
}
else if (mInputAttrType == kTfLiteFloat32) {
- mInputData = mInterpreter->typed_tensor<float>(mInputLayer); //tflite
+ mInputData = mInterpreter->typed_tensor<float>(mInputLayerId); //tflite
LOGE("InputType is DT_FLOAT");
}
else {
std::greater<std::pair<float, int>>> top_result_pq;
float value;
- TfLiteIntArray* dims = mInterpreter->tensor(mOutputLayer)->dims;
+ TfLiteIntArray* dims = mInterpreter->tensor(mOutputLayerId[0])->dims;
const long count = dims->data[1];
LOGE("dims size: %d", dims->size);
int InferenceTFLite::GetInferenceResult(ObjectDetectionResults& results)
{
- /**
- * NOTE ( TODO ) : ObjectDetection with TFLite is under testing.
- * When MobileNet_V1_SSD is tested,
- * It doens't work
- * - reason: current TF-lite version (v1.9.0) doens't support PostProcess OP
- */
- results.number_of_objects = 0;
+ float* boxes = mInterpreter->typed_tensor<float>(mOutputLayerId[0]);
+ float* classes = mInterpreter->typed_tensor<float>(mOutputLayerId[1]);
+ float* scores = mInterpreter->typed_tensor<float>(mOutputLayerId[2]);
+
+ int number_of_objects = 0;
+ int number_of_detections = (int)(*mInterpreter->typed_tensor<float>(mOutputLayerId[3]));
+ int left, top, right, bottom;
+ cv::Rect loc;
+
+ for (int idx = 0; idx < number_of_detections; ++idx) {
+ if (scores[idx] < mThreshold)
+ continue;
+
+ left = (int)(boxes[idx*4 + 1] * mSourceSize.width);
+ top = (int)(boxes[idx*4 + 0] * mSourceSize.height);
+ right = (int)(boxes[idx*4 + 3] * mSourceSize.width);
+ bottom = (int)(boxes[idx*4 + 2] * mSourceSize.height);
+
+ loc.x = left;
+ loc.y = top;
+ loc.width = right -left + 1;
+ loc.height = bottom - top + 1;
+
+ results.indices.push_back((int)classes[idx]);
+ results.confidences.push_back(scores[idx]);
+ results.names.push_back(mUserListName[(int)classes[idx]]);
+ results.locations.push_back(loc);
+
+ LOGE("objectClass: %d", (int)classes[idx]);
+ LOGE("confidence:%f", scores[idx]);
+ LOGE("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
+ number_of_objects++;
+ }
- return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+ results.number_of_objects = number_of_objects;
+
+ return INFERENCE_ENGINE_ERROR_NONE;
}
int InferenceTFLite::GetInferenceResult(FaceDetectionResults& results)
return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
}
-
int InferenceTFLite::GetInferenceResult(std::vector<std::vector<int>>& dimInfo, std::vector<float*>& results)
{
dimInfo.clear();
results.clear();
+ TfLiteIntArray* dims = NULL;
+ std::vector<int> tmpDimInfo;
- TfLiteIntArray* dims = mInterpreter->tensor(mOutputLayer)->dims;
+ for (int idx = 0; idx < mOutputLayerId.size(); ++idx) {
+ dims = mInterpreter->tensor(mOutputLayerId[idx])->dims;
- std::vector<int> tmpDimInfo;
- for (int d = 0; d < dims->size; ++d) {
- tmpDimInfo.push_back(dims->data[d]);
- }
+ tmpDimInfo.clear();
+ for (int d = 0; d < dims->size; ++d) {
+ tmpDimInfo.push_back(dims->data[d]);
+ }
- dimInfo.push_back(tmpDimInfo);
- results.push_back(mInterpreter->typed_output_tensor<float>(0));
+ dimInfo.push_back(tmpDimInfo);
+ results.push_back(mInterpreter->typed_tensor<float>(mOutputLayerId[idx]));
+ }
return INFERENCE_ENGINE_ERROR_NONE;
}
}
}
} /* TFLiteImpl */
-} /* InferenceEngineImpl */
\ No newline at end of file
+} /* InferenceEngineImpl */