Revert "consider const type for output tensor" 20/315720/1
authorKwanghoon Son <k.son@samsung.com>
Wed, 7 Aug 2024 03:42:51 +0000 (03:42 +0000)
committerKwanghoon Son <k.son@samsung.com>
Wed, 7 Aug 2024 03:47:10 +0000 (03:47 +0000)
Turns out there is no const type tensor, but a dynamic tensor type.
If there is a dynamic tensor, the address may be changed not only for
output but also for input. Therefore, adding a dummy does not solve
the problem.

Tensorflow already warn about dynamic tensor,

WARNING: Attempting to use a delegate that only supports static-sized tensors with a graph that has dynamic-sized tensors (tensor#19 is a dynamic-sized tensor).
INFO: Failed to apply the default TensorFlow Lite delegate indexed at 0 because of incompatibility between runtime and delegate. Ignoring the error, and continuing anyway.

This reverts commit e0040fa06a75fba8d5c587bb7db98ab247935c7d.

Change-Id: If89df39cde39a70521562acd6949f6bf1f748c4c

src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index 5c3eae94aa26dbc428ca77657dd11097aac0596c..edb27c2fa7e8c939638d46465ac21e501617cd8a 100644 (file)
@@ -30,8 +30,6 @@ namespace InferenceEngineImpl
 {
 namespace TFLiteImpl
 {
-       static unsigned int dummy_buffer;
-
        InferenceTFLite::InferenceTFLite()
        {
                LOGI("ENTER");
@@ -42,8 +40,6 @@ namespace TFLiteImpl
        {
                if (mDelegate)
                        TfLiteGpuDelegateV2Delete(mDelegate);
-
-               _constTensorIdx.clear();
        }
 
        int InferenceTFLite::SetPrivateData(void *data)
@@ -185,13 +181,6 @@ namespace TFLiteImpl
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
-       void InferenceTFLite::addConstTensorIdx(inference_engine_tensor_buffer &tensor_buffer, const std::string &layerName)
-       {
-               tensor_buffer.buffer = static_cast<void *>(&dummy_buffer);
-               _constTensorIdx.insert(std::make_pair(mInterpreter->tensor(mOutputLayerId[layerName])->name,
-                                                                                         mOutputLayerId[layerName]));
-       }
-
        int InferenceTFLite::GetOutputTensorBuffers(
                        std::map<std::string, inference_engine_tensor_buffer> &buffers)
        {
@@ -201,8 +190,6 @@ namespace TFLiteImpl
                        SetInterpreterInfo();
                }
 
-               _constTensorIdx.clear();
-
                for (auto& layer : mOutputLayers) {
                        inference_engine_tensor_buffer buffer;
                        size_t size = 1;
@@ -215,28 +202,16 @@ namespace TFLiteImpl
                                LOGI("type is kTfLiteUInt8");
                                pBuff = static_cast<void *>(mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1 };
-
-                               if (pBuff == nullptr && size == 1)
-                                       addConstTensorIdx(buffer, layer.first);
-
                                break;
                        case kTfLiteInt64:
                                LOGI("type is kTfLiteInt64");
                                pBuff = static_cast<void *>(mInterpreter->typed_tensor<int64_t>(mOutputLayerId[layer.first]));
                                buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_INT64, size * 8, 1};
-
-                               if (pBuff == nullptr && size == 1)
-                                       addConstTensorIdx(buffer, layer.first);
-
                                break;
                        case kTfLiteFloat32:
                                LOGI("type is kTfLiteFloat32");
                                pBuff = static_cast<void *>(mInterpreter->typed_tensor<float>(mOutputLayerId[layer.first]));
                                buffer = { pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1 };
-
-                               if (pBuff == nullptr && size == 1)
-                                       addConstTensorIdx(buffer, layer.first);
-
                                break;
                        default:
                                LOGE("Not supported");
@@ -378,25 +353,6 @@ namespace TFLiteImpl
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               // If output tensor is const type then set the const buffer because the const buffer is allocated after invoke.
-               if (!_constTensorIdx.empty()) {
-                       for (auto &m : _constTensorIdx) {
-                               auto &dstTensor = output_buffers[m.first.c_str()];
-
-                               if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
-                                       dstTensor.buffer = mInterpreter->typed_tensor<uint8_t>(m.second);
-                               }
-                               if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64) {
-                                       dstTensor.buffer = mInterpreter->typed_tensor<uint64_t>(m.second);
-                               }
-                               if (dstTensor.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
-                                       dstTensor.buffer = mInterpreter->typed_tensor<float>(m.second);
-                               }
-                       }
-
-                       _constTensorIdx.clear();
-               }
-
                LOGI("LEAVE");
                return INFERENCE_ENGINE_ERROR_NONE;
        }
index 4ab5d660329d572aea2035ac657eeaf030c209fd..800902201bfd436cb3518330fa52190e8d1cd08a 100644 (file)
@@ -90,14 +90,12 @@ namespace TFLiteImpl
                                const std::vector<int>& buffer);
                int FillLayer(std::map<std::string, inference_engine_tensor_info>& layers,
                                std::map<std::string, int>& layerId);
-               void addConstTensorIdx(inference_engine_tensor_buffer &tensor_buffer, const std::string &layerName);
 
                std::unique_ptr<tflite::Interpreter> mInterpreter;
                std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;
 
                std::map<std::string, inference_engine_tensor_info> mInputLayers;
                std::map<std::string, inference_engine_tensor_info> mOutputLayers;
-               std::map<std::string, int> _constTensorIdx;
 
                std::map<std::string, int> mInputLayerId;
                std::map<std::string, int> mOutputLayerId;