From: Inki Dae Date: Tue, 2 Jul 2024 04:19:33 +0000 (+0900) Subject: consider const type for output tensor X-Git-Tag: accepted/tizen/unified/20240705.082047^0 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e0040fa06a75fba8d5c587bb7db98ab247935c7d;p=platform%2Fcore%2Fmultimedia%2Finference-engine-tflite.git consider const type for output tensor Consider const type for output tensor. If output tensor of tflite model is const then the tensor buffer isn't allocated by mInterpreter->AllocateTensors() so it returns an error. This patch adds _constOutputIdx which manages the const tensor info. If output tensor is const then the output tensor buffer will point out to dummay_buffer in Load() and then will be replaced with const tensor buffer of tflite engine ated in Run() because the const tensor buffer is allocated after Invoke(). Change-Id: I1b7722d4189f757275b90bcab6344075e60d5341 Signed-off-by: Inki Dae --- diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp index edb27c2..5c3eae9 100644 --- a/src/inference_engine_tflite.cpp +++ b/src/inference_engine_tflite.cpp @@ -30,6 +30,8 @@ namespace InferenceEngineImpl { namespace TFLiteImpl { + static unsigned int dummy_buffer; + InferenceTFLite::InferenceTFLite() { LOGI("ENTER"); @@ -40,6 +42,8 @@ namespace TFLiteImpl { if (mDelegate) TfLiteGpuDelegateV2Delete(mDelegate); + + _constTensorIdx.clear(); } int InferenceTFLite::SetPrivateData(void *data) @@ -181,6 +185,13 @@ namespace TFLiteImpl return INFERENCE_ENGINE_ERROR_NONE; } + void InferenceTFLite::addConstTensorIdx(inference_engine_tensor_buffer &tensor_buffer, const std::string &layerName) + { + tensor_buffer.buffer = static_cast(&dummy_buffer); + _constTensorIdx.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layerName])->name, + mOutputLayerId[layerName])); + } + int InferenceTFLite::GetOutputTensorBuffers( std::map &buffers) { @@ -190,6 +201,8 @@ namespace TFLiteImpl SetInterpreterInfo(); } + _constTensorIdx.clear(); + for (auto& layer : mOutputLayers) { inference_engine_tensor_buffer buffer; size_t size = 1; @@ -202,16 +215,28 @@ namespace TFLiteImpl LOGI("type is kTfLiteUInt8"); pBuff = static_cast(mInterpreter->typed_tensor(mOutputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 }; + + if (pBuff == nullptr && size == 1) + addConstTensorIdx(buffer, layer.first); + break; case kTfLiteInt64: LOGI("type is kTfLiteInt64"); pBuff = static_cast(mInterpreter->typed_tensor(mOutputLayerId[layer.first])); buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1}; + + if (pBuff == nullptr && size == 1) + addConstTensorIdx(buffer, layer.first); + break; case kTfLiteFloat32: LOGI("type is kTfLiteFloat32"); pBuff = static_cast(mInterpreter->typed_tensor(mOutputLayerId[layer.first])); buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 }; + + if (pBuff == nullptr && size == 1) + addConstTensorIdx(buffer, layer.first); + break; default: LOGE("Not supported"); @@ -353,6 +378,25 @@ namespace TFLiteImpl return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } + // If output tensor is const type then set the const buffer because the const buffer is allocated after invoke. + if (!_constTensorIdx.empty()) { + for (auto &m : _constTensorIdx) { + auto &dstTensor = output_buffers[m.first.c_str()]; + + if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) { + dstTensor.buffer = mInterpreter->typed_tensor(m.second); + } + if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64) { + dstTensor.buffer = mInterpreter->typed_tensor(m.second); + } + if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) { + dstTensor.buffer = mInterpreter->typed_tensor(m.second); + } + } + + _constTensorIdx.clear(); + } + LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; } diff --git a/src/inference_engine_tflite_private.h b/src/inference_engine_tflite_private.h index 8009022..4ab5d66 100644 --- a/src/inference_engine_tflite_private.h +++ b/src/inference_engine_tflite_private.h @@ -90,12 +90,14 @@ namespace TFLiteImpl const std::vector& buffer); int FillLayer(std::map& layers, std::map& layerId); + void addConstTensorIdx(inference_engine_tensor_buffer &tensor_buffer, const std::string &layerName); std::unique_ptr mInterpreter; std::unique_ptr mFlatBuffModel; std::map mInputLayers; std::map mOutputLayers; + std::map _constTensorIdx; std::map mInputLayerId; std::map mOutputLayerId;