Support UINT_8
authorHyunsoo Park <hance.park@samsung.com>
Thu, 2 Apr 2020 09:16:04 +0000 (18:16 +0900)
committerHyunsoo Park <hance.park@samsung.com>
Thu, 2 Apr 2020 09:16:04 +0000 (18:16 +0900)
Change-Id: Idc7776c9123abd665f96204120864801657e2d54
Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
src/inference_engine_tflite.cpp

index 920f74eec3bb18bc852640abff9fbe819c78ce7f..1e8aa2a2bc2f31c9a0a65598e21f1e7ac29a8973 100644 (file)
@@ -151,18 +151,21 @@ int InferenceTFLite::GetInputTensorBuffers(std::vector<inference_engine_tensor_b
     void *pBuff = NULL;
 
     for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx ) {
-
+        size_t size = 1;
         inference_engine_tensor_buffer buffer;
-
+        for (std::vector<int>::iterator iter = mInputTensorInfo[idx].shape.begin();
+                iter != mInputTensorInfo[idx].shape.end(); ++iter) {
+            size *= (*iter);
+        }
         if (mInputAttrType[idx] == kTfLiteUInt8) {
             mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[idx]));
             pBuff = mInputData.back();
-            buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, 0, 1};
+            buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1};
         }
         else if (mInputAttrType[idx] == kTfLiteFloat32) {
-             mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
+            mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
             pBuff = mInputData.back();
-            buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, 0, 1};
+            buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
         }
         else {
             LOGE("Not supported");
@@ -180,9 +183,25 @@ int InferenceTFLite::GetOutputTensorBuffers(std::vector<inference_engine_tensor_
 
     for (unsigned int idx = 0; idx < mOutputLayerId.size(); ++idx) {
         inference_engine_tensor_buffer buffer;
+        size_t size = 1;
+        for (int idx2 = 0; idx2 < mInterpreter->tensor(mOutputLayerId[idx])->dims->size; ++idx2) {
+            size *= mInterpreter->tensor(mOutputLayerId[idx])->dims->data[idx2];
+        }
 
-        pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]);
-        buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, 0, 1};
+        if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteUInt8) {
+            LOGI("type is kTfLiteUInt8");
+            pBuff = (void*)mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[idx]);
+            buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1};
+        }
+        else if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteFloat32) {
+            LOGI("type is kTfLiteFloat32");
+            pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]);
+            buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
+        }
+        else {
+            LOGE("Not supported");
+            return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+        }
 
         buffers.push_back(buffer);
     }
@@ -229,7 +248,6 @@ int InferenceTFLite::GetOutputLayerProperty(inference_engine_layer_property &pro
 
         std::vector<int> shape_nhwc;
         for (int idx = 0; idx <mInterpreter->tensor((*iter))->dims->size; idx++) {
-            LOGI("mInterpreter->tensor((*iter))->dims[%d]= [%d]", idx, mInterpreter->tensor((*iter))->dims->data[idx]);
             shape_nhwc.push_back(mInterpreter->tensor((*iter))->dims->data[idx]);
         }
 
@@ -282,11 +300,14 @@ int InferenceTFLite::SetInputLayerProperty(inference_engine_layer_property &prop
     mInputLayer = property.layer_names;
     mInputTensorInfo = property.tensor_infos;
 
+    LOGI("LEAVE");
+
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
 int InferenceTFLite::SetOutputLayerProperty(inference_engine_layer_property &property)
 {
+    LOGI("ENTER");
     std::vector<std::string>::iterator iter;
     for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
         std::string name = *iter;
@@ -298,6 +319,8 @@ int InferenceTFLite::SetOutputLayerProperty(inference_engine_layer_property &pro
 
     mOutputLayer = property.layer_names;
 
+    LOGI("LEAVE");
+
     return INFERENCE_ENGINE_ERROR_NONE;
 }