From: Kwanghoon Son Date: Mon, 12 Aug 2024 04:01:49 +0000 (+0000) Subject: Add dynamic tensor mode X-Git-Tag: accepted/tizen/unified/20240820.163517^0 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=025a79b2c1b07c77be133dc67f528f8e0fcf9e9e;p=platform%2Fcore%2Fmultimedia%2Finference-engine-tflite.git Add dynamic tensor mode If dynamic tensor, memcpy buffer. Change-Id: I6d4bb96bc12e286ab3da15f07c3fc3e8900c519d Signed-off-by: Kwanghoon Son --- diff --git a/src/inference_engine_tflite.cpp b/src/inference_engine_tflite.cpp index edb27c2..ee52348 100644 --- a/src/inference_engine_tflite.cpp +++ b/src/inference_engine_tflite.cpp @@ -143,6 +143,14 @@ namespace TFLiteImpl return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY; } + // find dynamic tensor + mIsDynamicTensorMode = false; + for (size_t i = 0; i < mInterpreter->subgraphs_size(); i++) + if (mInterpreter->subgraph(i)->HasDynamicTensors()) { + LOGI("Dynamic tensor mode is enabled"); + mIsDynamicTensorMode = true; + } + return ret; } @@ -154,6 +162,8 @@ namespace TFLiteImpl if (mInputLayers.empty()) { SetInterpreterInfo(); } + if (mIsDynamicTensorMode) + return INFERENCE_ENGINE_ERROR_NONE; for (auto& layer : mInputLayers) { size_t size = 1; @@ -190,6 +200,9 @@ namespace TFLiteImpl SetInterpreterInfo(); } + if (mIsDynamicTensorMode) + return INFERENCE_ENGINE_ERROR_NONE; + for (auto& layer : mOutputLayers) { inference_engine_tensor_buffer buffer; size_t size = 1; @@ -346,6 +359,29 @@ namespace TFLiteImpl std::map &output_buffers) { LOGI("ENTER"); + if (mIsDynamicTensorMode) + for (auto &input_buffer : input_buffers) { + void *pBuff; + switch (mInterpreter->tensor(mInputLayerId[input_buffer.first])->type) { + case kTfLiteUInt8: + LOGI("type is kTfLiteUInt8"); + pBuff = static_cast(mInterpreter->typed_tensor(mInputLayerId[input_buffer.first])); + break; + case kTfLiteInt64: + LOGI("type is kTfLiteInt64"); + pBuff = static_cast(mInterpreter->typed_tensor(mInputLayerId[input_buffer.first])); + break; + case kTfLiteFloat32: + LOGI("type is kTfLiteFloat32"); + pBuff = static_cast(mInterpreter->typed_tensor(mInputLayerId[input_buffer.first])); + break; + default: + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } + memcpy(pBuff, input_buffer.second.buffer, input_buffer.second.size); + } + TfLiteStatus status = mInterpreter->Invoke(); if (status != kTfLiteOk) { @@ -353,6 +389,29 @@ namespace TFLiteImpl return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; } + if (mIsDynamicTensorMode) + for (auto &output_buffer : output_buffers) { + void *pBuff; + switch (mInterpreter->tensor(mOutputLayerId[output_buffer.first])->type) { + case kTfLiteUInt8: + LOGI("type is kTfLiteUInt8"); + pBuff = static_cast(mInterpreter->typed_tensor(mOutputLayerId[output_buffer.first])); + break; + case kTfLiteInt64: + LOGI("type is kTfLiteInt64"); + pBuff = static_cast(mInterpreter->typed_tensor(mOutputLayerId[output_buffer.first])); + break; + case kTfLiteFloat32: + LOGI("type is kTfLiteFloat32"); + pBuff = static_cast(mInterpreter->typed_tensor(mOutputLayerId[output_buffer.first])); + break; + default: + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } + memcpy(output_buffer.second.buffer, pBuff, output_buffer.second.size); + } + LOGI("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; } diff --git a/src/inference_engine_tflite_private.h b/src/inference_engine_tflite_private.h index 8009022..1f366a6 100644 --- a/src/inference_engine_tflite_private.h +++ b/src/inference_engine_tflite_private.h @@ -105,6 +105,7 @@ namespace TFLiteImpl int mTargetTypes { INFERENCE_TARGET_NONE }; TfLiteDelegate *mDelegate {}; + bool mIsDynamicTensorMode; }; } /* InferenceEngineImpl */