int InferenceTFLite::GetOutputTensorBuffers(
std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
- void *pBuff = NULL;
+ LOGI("ENTER");
+
+ if (mOutputLayers.empty()) {
+ SetInterpreterInfo();
+ }
+ void *pBuff = NULL;
+ LOGI("mOutputLayers size [%d]", mOutputLayers.size());
for (auto& layer : mOutputLayers) {
inference_engine_tensor_buffer buffer;
size_t size = 1;
buffers.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layer.first])->name, buffer));
}
+ LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
}
}
+ if (mOutputLayers.empty()) {
+ LOGI("mOutputLayers is empty. layers and tensors that mInterpreter has will be returned.");
+
+ mOutputLayers.clear();
+ for (auto& layer : mOutputLayerId) {
+
+ std::vector<size_t> shape_nhwc;
+
+ for (int idx = 0;
+ idx < mInterpreter->tensor(layer.second)->dims->size; idx++) {
+ shape_nhwc.push_back(
+ mInterpreter->tensor(layer.second)->dims->data[idx]);
+ }
+
+ inference_engine_tensor_info tensor_info {
+ shape_nhwc, INFERENCE_TENSOR_SHAPE_NHWC,
+ INFERENCE_TENSOR_DATA_TYPE_NONE, 1
+ };
+
+ if (mInterpreter->tensor(layer.second)->type == kTfLiteUInt8) {
+ LOGI("type is kTfLiteUInt8");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ } else if (mInterpreter->tensor(layer.second)->type ==
+ kTfLiteFloat32) {
+ LOGI("type is kTfLiteFloat32");
+ tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ } else {
+ LOGE("Not supported");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ for (auto& dim : tensor_info.shape) {
+ tensor_info.size *= dim;
+ }
+ mOutputLayers.insert(std::make_pair(mInterpreter->tensor(layer.second)->name, tensor_info));
+ }
+ }
+
return INFERENCE_ENGINE_ERROR_NONE;
}