Refatorying code according to refactoried inference-engine-interface 93/229493/1
authorHyunsoo Park <hance.park@samsung.com>
Wed, 1 Apr 2020 09:55:16 +0000 (18:55 +0900)
committerHyunsoo Park <hance.park@samsung.com>
Wed, 1 Apr 2020 09:55:16 +0000 (18:55 +0900)
Change-Id: I73da801ac99a22ee5b0abb03752da76bdbc0117f
Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
src/inference_engine_tflite.cpp
src/inference_engine_tflite_private.h

index b65c70c8664b39ee1252a563d7cad79763b87e5f..920f74eec3bb18bc852640abff9fbe819c78ce7f 100644 (file)
 namespace InferenceEngineImpl {
 namespace TFLiteImpl {
 
-InferenceTFLite::InferenceTFLite(std::string protoFile, std::string weightFile) :
-    mInputLayerId(0),
-    mInputAttrType(kTfLiteNoType),
-    mInputData(nullptr),
-    mConfigFile(protoFile),
-    mWeightFile(weightFile)
+InferenceTFLite::InferenceTFLite(void)
 {
     LOGI("ENTER");
     LOGI("LEAVE");
@@ -45,54 +40,23 @@ InferenceTFLite::~InferenceTFLite()
     ;
 }
 
-int InferenceTFLite::SetInputTensorParam()
+int InferenceTFLite::SetTargetDevices(int types)
 {
-    return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
-}
-
-int InferenceTFLite::SetInputTensorParamNode(std::string node)
-{
-    mInputLayer = node;
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceTFLite::SetOutputTensorParam()
-{
-    return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
-}
+    LOGI("ENTER");
 
-int InferenceTFLite::SetOutputTensorParamNodes(std::vector<std::string> nodes)
-{
-    mOutputLayer = nodes;
-    return INFERENCE_ENGINE_ERROR_NONE;
-}
-
-int InferenceTFLite::SetTargetDevice(inference_target_type_e type)
-{
-    switch (type) {
-    case INFERENCE_TARGET_CPU:
-        mInterpreter->UseNNAPI(false);
-        break;
-    case INFERENCE_TARGET_GPU:
-        mInterpreter->UseNNAPI(true);
-        break;
-    case INFERENCE_TARGET_CUSTOM:
-    case INFERENCE_TARGET_NONE:
-       default:
-               LOGW("Not supported device type [%d], Set CPU mode", (int)type);
-       }
+    mTargetTypes = types;
 
+    LOGI("LEAVE");
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int InferenceTFLite::Load()
+int InferenceTFLite::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
 {
     int ret = INFERENCE_ENGINE_ERROR_NONE;
 
-    if (access(mWeightFile.c_str(), F_OK)) {
-        LOGE("weightFilePath in [%s] ", mWeightFile.c_str());
-        return INFERENCE_ENGINE_ERROR_INVALID_PATH;
-    }
+    mWeightFile = model_paths.back();
+
+    LOGI("mWeightFile.c_str() result [%s]", mWeightFile.c_str());
 
     mFlatBuffModel = tflite::FlatBufferModel::BuildFromFile(mWeightFile.c_str());
     if (!mFlatBuffModel) {
@@ -110,19 +74,39 @@ int InferenceTFLite::Load()
         return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
     }
 
+
+    LOGI("Inferece targets are: [%d]", mTargetTypes);
+
+    switch (mTargetTypes) {
+    case INFERENCE_TARGET_CPU:
+        mInterpreter->UseNNAPI(false);
+        break;
+    case INFERENCE_TARGET_GPU:
+        mInterpreter->UseNNAPI(true);
+        break;
+    case INFERENCE_TARGET_CUSTOM:
+    case INFERENCE_TARGET_NONE:
+       default:
+        LOGW("Not supported device type [%d], Set CPU mode", (int)mTargetTypes);
+       }
+
     mInterpreter->SetNumThreads(MV_INFERENCE_TFLITE_MAX_THREAD_NUM);
 
     // input tensor
     if (mInterpreter->inputs().size()) {
-        mInputLayerId = mInterpreter->inputs()[0];
+        mInputLayerId = mInterpreter->inputs();
     } else {
-        mInputLayerId = -1;
-        for (int idx = 0; idx < mInterpreter->tensors_size(); ++idx) {
-            if (mInterpreter->tensor(idx)->name == NULL)
-                continue;
-            if (mInputLayer.compare(mInterpreter->tensor(idx)->name) == 0) {
-                mInputLayerId = idx;
-                break;
+        std::vector<std::string>::iterator iter;
+        mInputLayerId.clear();
+        for (iter = mInputLayer.begin(); iter != mInputLayer.end(); ++iter) {
+            LOGI("mInputLayer list [%s]", (*iter).c_str());
+            for (unsigned int idx = 0; idx < mInterpreter->tensors_size(); ++idx) {
+                if (mInterpreter->tensor(idx)->name == NULL)
+                    continue;
+                if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) {
+                    mInputLayerId.push_back(idx);
+                    break;
+                }
             }
         }
     }
@@ -134,8 +118,8 @@ int InferenceTFLite::Load()
         std::vector<std::string>::iterator iter;
         mOutputLayerId.clear();
         for (iter = mOutputLayer.begin(); iter != mOutputLayer.end(); ++iter) {
-            LOGI("%s", (*iter).c_str());
-            for (int idx = 0; idx < mInterpreter->tensors_size(); ++idx) {
+            LOGI("mOutputLayer list [%s]", (*iter).c_str());
+            for (unsigned int idx = 0; idx < mInterpreter->tensors_size(); ++idx) {
                 if (mInterpreter->tensor(idx)->name == NULL)
                     continue;
                 if ((*iter).compare(mInterpreter->tensor(idx)->name) == 0) {
@@ -151,97 +135,197 @@ int InferenceTFLite::Load()
         return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
     }
 
-    mInputAttrType = mInterpreter->tensor(mInputLayerId)->type;
+    for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx ) {
+        mInputAttrType.push_back(mInterpreter->tensor(mInputLayerId[idx])->type);
+    }
 
     return ret;
 }
 
-int InferenceTFLite::CreateInputLayerPassage()
+int InferenceTFLite::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
 {
-    if (mInputAttrType == kTfLiteUInt8) {
-        mInputData = mInterpreter->typed_tensor<uint8_t>(mInputLayerId); //tflite
-        LOGI("InputType is DT_UINT8");
-    }
-    else if (mInputAttrType == kTfLiteFloat32) {
-        mInputData = mInterpreter->typed_tensor<float>(mInputLayerId); //tflite
-        LOGI("InputType is DT_FLOAT");
-    }
-    else {
-        LOGE("Not supported");
-        return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+    LOGI("ENTER");
+
+    mInputData.clear();
+
+    void *pBuff = NULL;
+
+    for (unsigned int idx = 0; idx < mInputLayerId.size(); ++idx ) {
+
+        inference_engine_tensor_buffer buffer;
+
+        if (mInputAttrType[idx] == kTfLiteUInt8) {
+            mInputData.push_back(mInterpreter->typed_tensor<uint8_t>(mInputLayerId[idx]));
+            pBuff = mInputData.back();
+            buffer = {pBuff, TENSOR_DATA_TYPE_UINT8, 0, 1};
+        }
+        else if (mInputAttrType[idx] == kTfLiteFloat32) {
+             mInputData.push_back(mInterpreter->typed_tensor<float>(mInputLayerId[idx]));
+            pBuff = mInputData.back();
+            buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, 0, 1};
+        }
+        else {
+            LOGE("Not supported");
+            return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+        }
+        buffers.push_back(buffer);
     }
 
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int InferenceTFLite::GetInputLayerAttrType()
+int InferenceTFLite::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
 {
-    return mInputAttrType;
-}
+    void *pBuff = NULL;
 
-void * InferenceTFLite::GetInputDataPtr()
-{
-    return mInputData;
+    for (unsigned int idx = 0; idx < mOutputLayerId.size(); ++idx) {
+        inference_engine_tensor_buffer buffer;
+
+        pBuff = (void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]);
+        buffer = {pBuff, TENSOR_DATA_TYPE_FLOAT32, 0, 1};
+
+        buffers.push_back(buffer);
+    }
+    return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int InferenceTFLite::SetInputDataBuffer(tensor_t data)
+int InferenceTFLite::GetInputLayerProperty(inference_engine_layer_property &property)
 {
+    LOGI("ENTER");
+
+    if (mInputLayer.empty()) {
+        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+    }
+
+    property.layer_names = mInputLayer;
+    property.tensor_infos = mInputTensorInfo;
+
+    LOGI("LEAVE");
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int InferenceTFLite::Run()
+int InferenceTFLite::GetOutputLayerProperty(inference_engine_layer_property &property)
 {
     LOGI("ENTER");
-    TfLiteStatus status = mInterpreter->Invoke();
 
-    if (status != kTfLiteOk) {
-        LOGE("Fail to invoke with kTfLiteError");
+    if (mOutputLayer.empty()) {
+        LOGI("mOutputLayer is empty");
         return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
     }
 
+    std::vector<inference_engine_tensor_info>().swap(mOutputTensorInfo);
+    int idx = 0;
+    for (std::vector<int>::iterator iter = mOutputLayerId.begin(); iter != mOutputLayerId.end(); ++iter, ++idx) {
+        LOGI("output layer ID: %d", (*iter));
+        if((*iter) < 0) {
+            LOGE("Invalid output layer");
+            return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+        }
+        inference_engine_tensor_info tensor_info;
+
+        LOGI("mInterpreter->tensor((*iter))->dims name[%s]", mInterpreter->tensor((*iter))->name);
+        LOGI("mInterpreter->tensor((*iter))->dims size[%d]", mInterpreter->tensor((*iter))->dims->size);
+        LOGI("mInterpreter->tensor((*iter))->dims type[%d]", mInterpreter->tensor((*iter))->type);
+
+        std::vector<int> shape_nhwc;
+        for (int idx = 0; idx <mInterpreter->tensor((*iter))->dims->size; idx++) {
+            LOGI("mInterpreter->tensor((*iter))->dims[%d]= [%d]", idx, mInterpreter->tensor((*iter))->dims->data[idx]);
+            shape_nhwc.push_back(mInterpreter->tensor((*iter))->dims->data[idx]);
+        }
+
+        //tflite only supports NHWC (https://www.tensorflow.org/lite/guide/ops_compatibility).
+        tensor_info.shape = shape_nhwc;
+        tensor_info.shape_type = TENSOR_SHAPE_NHWC;
+        if (mInterpreter->tensor((*iter))->type == kTfLiteUInt8) {
+            LOGI("type is kTfLiteUInt8");
+            tensor_info.data_type = TENSOR_DATA_TYPE_UINT8;
+        }
+        else if (mInterpreter->tensor((*iter))->type == kTfLiteFloat32) {
+            LOGI("type is kTfLiteFloat32");
+            tensor_info.data_type = TENSOR_DATA_TYPE_FLOAT32;
+        }
+        else {
+            LOGE("Not supported");
+            return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+        }
+        tensor_info.size = 1;
+        for (std::vector<int>::iterator iter2 = tensor_info.shape.begin();
+            iter2 != tensor_info.shape.end(); ++iter2) {
+                tensor_info.size *= (*iter2);
+        }
+        mOutputTensorInfo.push_back(tensor_info);
+    }
+
+    property.layer_names = mOutputLayer;
+    property.tensor_infos = mOutputTensorInfo;
+
     LOGI("LEAVE");
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int InferenceTFLite::Run(std::vector<float> tensor)
+int InferenceTFLite::SetInputLayerProperty(inference_engine_layer_property &property)
 {
     LOGI("ENTER");
-    int dataIdx = 0;
-    float * inputData = static_cast<float*>(mInputData);
-    for( std::vector<float>::iterator iter = tensor.begin();
-        iter != tensor.end(); ++iter) {
-            inputData[dataIdx] = *iter;
-            dataIdx++;
+
+    std::vector<std::string>::iterator iter;
+    for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+        std::string name = *iter;
+        LOGI("input layer name = %s", name.c_str());
     }
 
-    TfLiteStatus status = mInterpreter->Invoke();
+    mInputLayer.clear();
+    std::vector<std::string>().swap(mInputLayer);
 
-    if (status != kTfLiteOk) {
-        LOGE("Fail to invoke with kTfLiteError");
-        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+    mInputTensorInfo.clear();
+    std::vector<inference_engine_tensor_info>().swap(mInputTensorInfo);
+
+    mInputLayer = property.layer_names;
+    mInputTensorInfo = property.tensor_infos;
+
+    return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTFLite::SetOutputLayerProperty(inference_engine_layer_property &property)
+{
+    std::vector<std::string>::iterator iter;
+    for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) {
+        std::string name = *iter;
+        LOGI("output layer name = %s", name.c_str());
     }
 
-    LOGI("LEAVE");
+    mOutputLayer.clear();
+    std::vector<std::string>().swap(mOutputLayer);
+
+    mOutputLayer = property.layer_names;
+
     return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int InferenceTFLite::GetInferenceResult(tensor_t& results)
+int InferenceTFLite::GetBackendCapacity(inference_engine_capacity *capacity)
 {
     LOGI("ENTER");
 
-    TfLiteIntArray* dims = NULL;
-    std::vector<int> tmpDimInfo;
+    if (capacity == NULL) {
+        LOGE("Bad pointer.");
+        return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+    }
 
-    for (int idx = 0; idx < mOutputLayerId.size(); ++idx) {
-        dims = mInterpreter->tensor(mOutputLayerId[idx])->dims;
+    capacity->supported_accel_devices = INFERENCE_TARGET_CPU;
 
-        tmpDimInfo.clear();
-        for (int d = 0; d < dims->size; ++d) {
-            tmpDimInfo.push_back(dims->data[d]);
-        }
+    LOGI("LEAVE");
+
+    return INFERENCE_ENGINE_ERROR_NONE;
+}
 
-        results.dimInfo.push_back(tmpDimInfo);
-        results.data.push_back((void*)mInterpreter->typed_tensor<float>(mOutputLayerId[idx]));
+int InferenceTFLite::Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+                        std::vector<inference_engine_tensor_buffer> &output_buffers)
+{
+    LOGI("ENTER");
+    TfLiteStatus status = mInterpreter->Invoke();
+
+    if (status != kTfLiteOk) {
+        LOGE("Fail to invoke with kTfLiteError");
+        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
     }
 
     LOGI("LEAVE");
@@ -250,9 +334,9 @@ int InferenceTFLite::GetInferenceResult(tensor_t& results)
 
 extern "C"
 {
-class IInferenceEngineCommon* EngineCommonInit(std::string protoFile, std::string weightFile)
+class IInferenceEngineCommon* EngineCommonInit(void)
 {
-    InferenceTFLite *engine = new InferenceTFLite(protoFile, weightFile);
+    InferenceTFLite *engine = new InferenceTFLite();
     return engine;
 }
 
index 2c388f7d82df42db8335304beb01e10c69c22024..1ab36c1c394e2e3bb274265060b3c5fd4e0749cc 100644 (file)
@@ -46,57 +46,50 @@ namespace TFLiteImpl {
 
 class InferenceTFLite : public IInferenceEngineCommon {
 public:
-    InferenceTFLite(std::string protoFile,
-                    std::string weightFile);
-
+    InferenceTFLite();
     ~InferenceTFLite();
 
-    // InputTensor
-    int SetInputTensorParam() override;
-
-    int SetInputTensorParamNode(std::string node = "input") override;
-
-    // Output Tensor Params
-    int SetOutputTensorParam() override;
-
-    int SetOutputTensorParamNodes(std::vector<std::string> nodes) override;
+    int SetTargetDevices(int types) override;
 
-    int SetTargetDevice(inference_target_type_e type) override;
+    int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) override;
 
-    // Load and Run
-    int Load() override;
+    int GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
 
-    int CreateInputLayerPassage() override;
+    int GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers) override;
 
-    int GetInputLayerAttrType() override;
+    int GetInputLayerProperty(inference_engine_layer_property &property) override;
 
-    void * GetInputDataPtr() override;
+    int GetOutputLayerProperty(inference_engine_layer_property &property) override;
 
-    int SetInputDataBuffer(tensor_t data) override;
+    int SetInputLayerProperty(inference_engine_layer_property &property) override;
 
-    int Run() override;
+    int SetOutputLayerProperty(inference_engine_layer_property &property) override;
 
-    int Run(std::vector<float> tensor) override;
-
-    int GetInferenceResult(tensor_t& results);
+    int GetBackendCapacity(inference_engine_capacity *capacity) override;
 
+    int Run(std::vector<inference_engine_tensor_buffer> &input_buffers,
+            std::vector<inference_engine_tensor_buffer> &output_buffers) override;
     
 private:
     std::unique_ptr<tflite::Interpreter> mInterpreter;
     std::unique_ptr<tflite::FlatBufferModel> mFlatBuffModel;
+    std::vector<void *> mInputData;
 
-    std::string mInputLayer;
+    std::vector<std::string> mInputLayer; /**< Input layer name */
     std::vector<std::string> mOutputLayer; /**< Output layer name */
 
-    int mInputLayerId;
-    std::vector<int> mOutputLayerId;
+    std::vector<inference_engine_tensor_info> mInputTensorInfo;
+    std::vector<inference_engine_tensor_info> mOutputTensorInfo;
 
-    TfLiteType mInputAttrType;
+    std::vector<int> mInputLayerId;
+    std::vector<int> mOutputLayerId;
 
-    void *mInputData;
+    std::vector<TfLiteType> mInputAttrType;
+    std::vector<TfLiteType> mOutputAttrType;
 
     std::string mConfigFile;
     std::string mWeightFile;
+    int mTargetTypes;
 };
 
 } /* InferenceEngineImpl */