ange enumeration values to new ones sandbox/hance.park/dev submit/tizen/20200423.063253 submit/tizen/20200423.064104 submit/tizen/20200423.083755 submit/tizen/20200424.035635 submit/tizen/20200424.041307 submit/tizen/20200424.055009
authorInki Dae <inki.dae@samsung.com>
Fri, 17 Apr 2020 07:52:48 +0000 (16:52 +0900)
committerInki Dae <inki.dae@samsung.com>
Fri, 17 Apr 2020 07:52:48 +0000 (16:52 +0900)
some enumeration values of inference-engine-interface have been
updated so change them.

Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_tflite.cpp

index c7c4781ec9de16dadc744163a7e76b8fe709df5c..ae643af3e4285e27e4338055f9ccb137faea9742 100644 (file)
@@ -160,12 +160,12 @@ int InferenceTFLite::GetInputTensorBuffers(std::vector<inference_engine_tensor_b
         if (mInputAttrType[idx] == kTfLiteUInt8) {
             mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[idx]));
             pBuff = mInputData.back();
-            buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1};
+            buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1};
         }
         else if (mInputAttrType[idx] == kTfLiteFloat32) {
             mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
             pBuff = mInputData.back();
-            buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
+            buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
         }
         else {
             LOGE("Not supported");
@@ -191,12 +191,12 @@ int InferenceTFLite::GetOutputTensorBuffers(std::vector<inference_engine_tensor_
         if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteUInt8) {
             LOGI("type is kTfLiteUInt8");
             pBuff = (void*)mInterpreter->typed_tensor<uint8_t>(mOutputLayerId[idx]);
-            buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, size, 1};
+            buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_UINT8, size, 1};
         }
         else if (mInterpreter->tensor(mOutputLayerId[idx])->type == kTfLiteFloat32) {
             LOGI("type is kTfLiteFloat32");
             pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]);
-            buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
+            buffer = {pBuff, INFERENCE_TENSOR_DATA_TYPE_FLOAT32, size * 4, 1};
         }
         else {
             LOGE("Not supported");
@@ -253,14 +253,14 @@ int InferenceTFLite::GetOutputLayerProperty(inference_engine_layer_property &pro
 
         //tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
         tensor_info.shape = shape_nhwc;
-        tensor_info.shape_type = TENSOR_SHAPE_NHWC;
+        tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NHWC;
         if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) {
             LOGI("type is kTfLiteUInt8");
-            tensor_info.data_type = TENSOR_DATA_TYPE_UINT8;
+            tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
         }
         else if (mInterpreter->tensor((*iter))->type == kTfLiteFloat32) {
             LOGI("type is kTfLiteFloat32");
-            tensor_info.data_type = TENSOR_DATA_TYPE_FLOAT32;
+            tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
         }
         else {
             LOGE("Not supported");