{
namespace TFLiteImpl
{
- static unsigned int dummy_buffer;
-
InferenceTFLite::InferenceTFLite()
{
LOGI("ENTER");
{
if (mDelegate)
TfLiteGpuDelegateV2Delete(mDelegate);
-
- _constTensorIdx.clear();
}
int InferenceTFLite::SetPrivateData(void *data)
return INFERENCE_ENGINE_ERROR_NONE;
}
- void InferenceTFLite::addConstTensorIdx(inference_engine_tensor_buffer &tensor_buffer, const std::string &layerName)
- {
- tensor_buffer.buffer = static_cast<void *>(&dummy_buffer);
- _constTensorIdx.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layerName])->name,
- mOutputLayerId[layerName]));
- }
-
int InferenceTFLite::GetOutputTensorBuffers(
std::map<std::string, inference_engine_tensor_buffer> &buffers)
{
SetInterpreterInfo();
}
- _constTensorIdx.clear();
-
for (auto& layer : mOutputLayers) {
inference_engine_tensor_buffer buffer;
size_t size = 1;
LOGI("type is kTfLiteUInt8");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
-
- if (pBuff == nullptr && size == 1)
- addConstTensorIdx(buffer, layer.first);
-
break;
case kTfLiteInt64:
LOGI("type is kTfLiteInt64");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
-
- if (pBuff == nullptr && size == 1)
- addConstTensorIdx(buffer, layer.first);
-
break;
case kTfLiteFloat32:
LOGI("type is kTfLiteFloat32");
pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]));
buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
-
- if (pBuff == nullptr && size == 1)
- addConstTensorIdx(buffer, layer.first);
-
break;
default:
LOGE("Not supported");
return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
}
- // If output tensor is const type then set the const buffer because the const buffer is allocated after invoke.
- if (!_constTensorIdx.empty()) {
- for (auto &m : _constTensorIdx) {
- auto &dstTensor = output_buffers[m.first.c_str()];
-
- if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
- dstTensor.buffer = mInterpreter->typed_tensor<uint8_t>(m.second);
- }
- if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64) {
- dstTensor.buffer = mInterpreter->typed_tensor<uint64_t>(m.second);
- }
- if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
- dstTensor.buffer = mInterpreter->typed_tensor<float>(m.second);
- }
- }
-
- _constTensorIdx.clear();
- }
-
LOGI("LEAVE");
return INFERENCE_ENGINE_ERROR_NONE;
}
const std::vector<int>& buffer);
int FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,
std::map<std::string, int>& layerId);
- void addConstTensorIdx(inference_engine_tensor_buffer &tensor_buffer, const std::string &layerName);
std::unique_ptr<tflite::Interpreter> mInterpreter;
std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;
std::map<std::string, inference_engine_tensor_info> mInputLayers;
std::map<std::string, inference_engine_tensor_info> mOutputLayers;
- std::map<std::string, int> _constTensorIdx;
std::map<std::string, int> mInputLayerId;
std::map<std::string, int> mOutputLayerId;