Add dynamic tensor mode 52/315952/5 accepted/tizen/unified/20240820.163517 accepted/tizen/unified/dev/20240821.053429 accepted/tizen/unified/x/20240821.013955
authorKwanghoon Son <k.son@samsung.com>
Mon, 12 Aug 2024 04:01:49 +0000 (04:01 +0000)
committerKwanghoon Son <kwangson@yahoo.com>
Wed, 14 Aug 2024 03:19:54 +0000 (12:19 +0900)
If dynamic tensor, memcpy buffer.

Change-Id: I6d4bb96bc12e286ab3da15f07c3fc3e8900c519d
Signed-off-by: Kwanghoon Son <k.son@samsung.com>
src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index edb27c2fa7e8c939638d46465ac21e501617cd8a..ee52348234c262d4178717e840c8ad88759e6855 100644 (file)
@@ -143,6 +143,14 @@ namespace TFLiteImpl
                        return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
                }
 
+               // find dynamic tensor
+               mIsDynamicTensorMode = false;
+               for (size_t i = 0; i < mInterpreter->subgraphs_size(); i++)
+                       if (mInterpreter->subgraph(i)->HasDynamicTensors()) {
+                               LOGI("Dynamic tensor mode is enabled");
+                               mIsDynamicTensorMode = true;
+                       }
+
                return ret;
        }
 
@@ -154,6 +162,8 @@ namespace TFLiteImpl
                if (mInputLayers.empty()) {
                        SetInterpreterInfo();
                }
+               if (mIsDynamicTensorMode)
+                       return INFERENCE_ENGINE_ERROR_NONE;
 
                for (auto& layer : mInputLayers) {
                        size_t size = 1;
@@ -190,6 +200,9 @@ namespace TFLiteImpl
                        SetInterpreterInfo();
                }
 
+               if (mIsDynamicTensorMode)
+                       return INFERENCE_ENGINE_ERROR_NONE;
+
                for (auto& layer : mOutputLayers) {
                        inference_engine_tensor_buffer buffer;
                        size_t size = 1;
@@ -346,6 +359,29 @@ namespace TFLiteImpl
                        std::map<std::string, inference_engine_tensor_buffer> &output_buffers)
        {
                LOGI("ENTER");
+               if (mIsDynamicTensorMode)
+                       for (auto &input_buffer : input_buffers) {
+                               void *pBuff;
+                               switch (mInterpreter->tensor(mInputLayerId[input_buffer.first])->type) {
+                               case kTfLiteUInt8:
+                                       LOGI("type is kTfLiteUInt8");
+                                       pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[input_buffer.first]));
+                                       break;
+                               case kTfLiteInt64:
+                                       LOGI("type is kTfLiteInt64");
+                                       pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mInputLayerId[input_buffer.first]));
+                                       break;
+                               case kTfLiteFloat32:
+                                       LOGI("type is kTfLiteFloat32");
+                                       pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mInputLayerId[input_buffer.first]));
+                                       break;
+                               default:
+                                       LOGE("Not supported");
+                                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+                               }
+                               memcpy(pBuff, input_buffer.second.buffer, input_buffer.second.size);
+                       }
+
                TfLiteStatus status = mInterpreter->Invoke();
 
                if (status != kTfLiteOk) {
@@ -353,6 +389,29 @@ namespace TFLiteImpl
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
+               if (mIsDynamicTensorMode)
+                       for (auto &output_buffer : output_buffers) {
+                               void *pBuff;
+                               switch (mInterpreter->tensor(mOutputLayerId[output_buffer.first])->type) {
+                               case kTfLiteUInt8:
+                                       LOGI("type is kTfLiteUInt8");
+                                       pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[output_buffer.first]));
+                                       break;
+                               case kTfLiteInt64:
+                                       LOGI("type is kTfLiteInt64");
+                                       pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[output_buffer.first]));
+                                       break;
+                               case kTfLiteFloat32:
+                                       LOGI("type is kTfLiteFloat32");
+                                       pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[output_buffer.first]));
+                                       break;
+                               default:
+                                       LOGE("Not supported");
+                                       return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+                               }
+                               memcpy(output_buffer.second.buffer, pBuff, output_buffer.second.size);
+                       }
+
                LOGI("LEAVE");
                return INFERENCE_ENGINE_ERROR_NONE;
        }
index 800902201bfd436cb3518330fa52190e8d1cd08a..1f366a6b0583fe3a8ae47f8c74a9bfd625db3fc4 100644 (file)
@@ -105,6 +105,7 @@ namespace TFLiteImpl
                int mTargetTypes { INFERENCE_TARGET_NONE };
 
                TfLiteDelegate *mDelegate {};
+               bool mIsDynamicTensorMode;
        };
 
 } /* InferenceEngineImpl */