Support SNPE 08/272508/2
authorTae-Young Chung <ty83.chung@samsung.com>
Fri, 18 Mar 2022 05:00:31 +0000 (14:00 +0900)
committerTae-Young Chung <ty83.chung@samsung.com>
Mon, 21 Mar 2022 06:00:13 +0000 (15:00 +0900)
[Version] 0.0.2-0
[Issue type] new feature

SNPE is supported when _DA_RVC_65 is defined by
inference-engine-interface (004cfa27e471d484e4ccdc68abb3a8072001bf05).
This commit is based on following commits:

commit 13237cebbc83c2e4fb02a9bb15df2aefadb09c9e
Author: Inki Dae <inki.dae@samsung.com>
Date:   Tue Jan 11 20:29:44 2022 +0900

    add custom device capacity for SNPE engine
------------------------
commit db16f45efe816a4e7521085836059b308e1f4c12
Author: Inki Dae <inki.dae@samsung.com>
Date:   Wed Dec 29 18:23:42 2021 +0900

    src: use ml_single_open_full api
------------------------
commit 58bc5dba631b40f9e7784c3e9713162078696b0d
Author: Seungbae Shin <seungbae.shin@samsung.com>
Date:   Thu Nov 25 12:30:02 2021 +0900

    Refactoring InferenceMLAPI::Load()
------------------------
commit 99114d5faa27998c3781eb7e5bfbab8f43400082
Author: Inki Dae <inki.dae@samsung.com>
Date:   Wed Nov 24 18:11:54 2021 +0900

    add SNPE tensor filter support
------------------------
commit 8f5f3b5c447fa0889a9f03d68c8a9df7c0c24bba
Author: Inki Dae <inki.dae@samsung.com>
Date:   Tue Jun 8 12:49:48 2021 +0900

    Consider user-given property info first

Change-Id: I4c0979ec4efc5dbca3b37f12f686f30ca09a46be
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
packaging/inference-engine-mlapi.spec
src/inference_engine_mlapi.cpp
src/inference_engine_mlapi_private.h

index 1ac601ccfd3d5e1fc0838967f2e81c9bc0e031ab..3411032ee0ee9d7951f534e338579cd47dcbddf6 100644 (file)
@@ -1,7 +1,7 @@
 Name:       inference-engine-mlapi
 Summary:    ML Single API backend of NNStreamer for MediaVision
-Version:    0.0.1
-Release:    2
+Version:    0.0.2
+Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
 ExclusiveArch: %{arm} aarch64
index 397a029007b3625e4a9645b279582438eda63b36..51c9b43f474e1539617cc91642554e6f0932a442 100644 (file)
@@ -87,8 +87,13 @@ namespace MLAPIImpl
                inference_backend_type_e type =
                                *(static_cast<inference_backend_type_e *>(data));
 
-               if (INFERENCE_BACKEND_NONE >= type || INFERENCE_BACKEND_MAX <= type ||
-                               INFERENCE_BACKEND_OPENCV == type) {
+               if ((INFERENCE_BACKEND_NONE >= type || INFERENCE_BACKEND_MAX <= type ||
+#ifdef _DA_RVC_65
+                       INFERENCE_BACKEND_OPENCV == type) &&
+                       INFERENCE_BACKEND_NPU_SNPE != type) {
+#else
+                       INFERENCE_BACKEND_OPENCV == type) {
+#endif
                        LOGE("Invalid backend type.(%d)", type);
                        return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                }
@@ -137,6 +142,229 @@ namespace MLAPIImpl
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
+#ifdef _DA_RVC_65
+       int InferenceMLAPI::CreateMLAPITensorInfo(ml_tensors_info_h& tensor_info,
+                                                                                         inference_engine_layer_property& layer_property)
+       {
+               if (layer_property.layers.empty()) {
+                       LOGE("input or output property is empty.");
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               int err = ml_tensors_info_create(&tensor_info);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to create tensor info(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               err = ml_tensors_info_set_count(tensor_info, layer_property.layers.size());
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to set tensor count(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               size_t layer_idx = 0;
+
+               for (auto& iter : layer_property.layers) {
+                       inference_engine_tensor_info& info = iter.second;
+
+                       int tensor_type = 0;
+
+                       try {
+                               tensor_type = ConvertTensorTypeToMLAPI(info.data_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), info.data_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+
+                       err = ml_tensors_info_set_tensor_type(tensor_info, layer_idx, static_cast<ml_tensor_type_e>(tensor_type));
+                       if (err != ML_ERROR_NONE) {
+                               LOGE("Failed to set tensor count(%d).", err);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       // TODO. nnstreamer needs fixed dimention with 4 for nntrainer tensor filter. Why??
+                       std::vector<unsigned int> indim(4, 1);
+
+                       LOGI("Input tensor(%zu) shape:", layer_idx);
+
+                       std::copy(info.shape.begin(), info.shape.end(), indim.begin());
+
+                       for (auto& shape_value : indim)
+                               LOGI("%u", shape_value);
+
+                       err = ml_tensors_info_set_tensor_dimension(tensor_info, layer_idx, indim.data());
+                       if (err != ML_ERROR_NONE) {
+                               LOGE("Failed to set tensor dimension(%d).", err);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+
+                       layer_idx++;
+               }
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> InferenceMLAPI::GetNNFWInfo()
+       {
+               switch (mPluginType) {
+               case INFERENCE_BACKEND_NPU_VIVANTE:
+                       LOGI("Vivante tensor filter will be used.");
+                       return std::make_tuple(ML_NNFW_TYPE_VIVANTE, ML_NNFW_HW_ANY);
+
+               case INFERENCE_BACKEND_NPU_SNPE:
+                       LOGI("SNPE tensor filter will be used.");
+                       return std::make_tuple(ML_NNFW_TYPE_SNPE, ML_NNFW_HW_ANY);
+
+               case INFERENCE_BACKEND_ONE:
+                       LOGI("NNFW tensor filter will be used.");
+
+                       if (mTargetDevice == INFERENCE_TARGET_CPU) {
+                               LOGI("Target device is NEON.");
+                               return std::make_tuple(ML_NNFW_TYPE_NNFW, ML_NNFW_HW_CPU_NEON);
+                       } else if (mTargetDevice == INFERENCE_TARGET_GPU) {
+                               LOGI("Target device is GPU");
+                               return std::make_tuple(ML_NNFW_TYPE_NNFW, ML_NNFW_HW_GPU);
+                       }
+
+                       LOGE("Invalid inference target device type.");
+                       throw std::invalid_argument("invalid tensor type.");
+
+               case INFERENCE_BACKEND_ARMNN:
+                       LOGI("ARMNN tensor filter will be used.");
+                       return std::make_tuple(ML_NNFW_TYPE_ARMNN, ML_NNFW_HW_ANY);
+
+               case INFERENCE_BACKEND_TFLITE:
+                       LOGI("TFLITE tensor filter will be used.");
+                       return std::make_tuple(ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+
+               default:
+                       LOGE("Invalid plugin type.");
+                       throw std::invalid_argument("invalid tensor type.");
+               }
+       }
+
+       bool InferenceMLAPI::IsFileReadable(const std::string& path)
+       {
+               LOGI("ENTER");
+
+               if (access(path.c_str(), R_OK) == -1) {
+                       LOGE("file [%s] is not readable, errno(%d)", path.c_str(), errno);
+                       return false;
+               }
+               LOGI("LEAVE");
+
+               return true;
+       }
+
+       std::string InferenceMLAPI::GetModelPath(const std::vector<std::string>& model_paths)
+       {
+               switch (mPluginType) {
+               case INFERENCE_BACKEND_NPU_VIVANTE:
+                       if (!IsFileReadable(model_paths[0]) ||
+                               !IsFileReadable(model_paths[1]))
+                               throw std::runtime_error("invalid path");
+
+                       // ML Single API of MLAPI requires model_paths rule like below,
+                       // "so library file path,nb model file path" or vise versa.
+                       return model_paths[0] + "," + model_paths[1];
+
+               case INFERENCE_BACKEND_NPU_SNPE:
+                       if (!IsFileReadable(model_paths[0]))
+                               throw std::runtime_error("invalid path");
+                       return model_paths[0];
+
+               case INFERENCE_BACKEND_ONE:
+                       /* fall through */
+               case INFERENCE_BACKEND_ARMNN:
+                       /* fall through */
+               case INFERENCE_BACKEND_TFLITE:
+                       /* fall through */
+
+                       if (!IsFileReadable(model_paths[0]))
+                               throw std::runtime_error("invalid path");
+                       return model_paths[0];
+
+               default:
+                       throw std::runtime_error("shouldn't be reach here");
+               }
+       }
+
+       const char* InferenceMLAPI::GetCustomProp()
+       {
+               if (mPluginType != INFERENCE_BACKEND_NPU_SNPE)
+                       return "";
+
+               return mTargetDevice == INFERENCE_TARGET_CPU ? "RUNTIME:CPU" :
+                                  mTargetDevice == INFERENCE_TARGET_GPU ? "RUNTIME:GPU" : "RUNTIME:DSP";
+       }
+
+       int InferenceMLAPI::Load(std::vector<std::string> model_paths,
+                                                        inference_model_format_e model_format)
+       {
+               LOGI("ENTER");
+
+               std::string model_str;
+
+               ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_ANY;
+               ml_nnfw_hw_e nnfw_hw = ML_NNFW_HW_ANY;
+
+               try {
+                       std::tie(nnfw_type, nnfw_hw) = GetNNFWInfo();
+                       model_str = GetModelPath(model_paths);
+               } catch (const std::invalid_argument& ex) {
+                       LOGE("Get NNFW info Error (%s)", ex.what());
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               } catch (const std::runtime_error& ex) {
+                       LOGE("Get model path Error (%s)", ex.what());
+                       return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+               }
+
+               LOGI("Model name = %s", model_str.c_str());
+
+               ml_tensors_info_h in_info = NULL, out_info = NULL;
+
+               int err = ml_single_open_full(&mSingle, model_str.c_str(), in_info, out_info,
+                                                                nnfw_type, nnfw_hw, GetCustomProp());
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_single_open_full(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               if (mInputInfoHandle) {
+                       ml_tensors_info_destroy(mInputInfoHandle);
+                       mInputInfoHandle = NULL;
+               }
+
+               err = ml_single_get_input_info(mSingle, &mInputInfoHandle);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_single_get_input_info(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               if (mOutputInfoHandle) {
+                       ml_tensors_info_destroy(mOutputInfoHandle);
+                       mOutputInfoHandle = NULL;
+               }
+
+               err = ml_single_get_output_info(mSingle, &mOutputInfoHandle);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to request ml_single_get_output_info(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               err = UpdateTensorsInfo();
+               if (err != INFERENCE_ENGINE_ERROR_NONE) {
+                       ml_single_close(mSingle);
+                       mSingle = NULL;
+               }
+
+               LOGI("LEAVE");
+
+               return err;
+       }
+#else
+
        int InferenceMLAPI::Load(std::vector<std::string> model_paths,
                                                         inference_model_format_e model_format)
        {
@@ -230,6 +458,7 @@ namespace MLAPIImpl
 
                return err;
        }
+#endif
 
        int InferenceMLAPI::GetInputTensorBuffers(
                        std::map<std::string, inference_engine_tensor_buffer> &buffers)
@@ -275,12 +504,21 @@ namespace MLAPIImpl
                        }
 
                        LOGI("input tensor type = %d", in_type);
-
+#ifdef _DA_RVC_65
+                       int type = 0;
+
+                       try {
+                               type = ConvertTensorTypeToInternal(in_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), in_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+#else
                        int type = ConvertTensorType(in_type);
                        if (type == -1) {
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                        }
-
+#endif
                        in_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
                        in_buffer.owner_is_backend = 1;
 
@@ -337,11 +575,21 @@ namespace MLAPIImpl
 
                        LOGI("output tensor type = %d", out_type);
 
+#ifdef _DA_RVC_65
+                       int type = 0;
+
+                       try {
+                               type = ConvertTensorTypeToInternal(out_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), out_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+#else
                        int type = ConvertTensorType(out_type);
                        if (type == -1) {
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                        }
-
+#endif
                        out_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
                        out_buffer.owner_is_backend = 1;
 
@@ -376,11 +624,21 @@ namespace MLAPIImpl
 
                        LOGI("input tensor type = %d", in_type);
 
+#ifdef _DA_RVC_65
+                       int type = 0;
+
+                       try {
+                               type = ConvertTensorTypeToInternal(in_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), in_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+#else
                        int type = ConvertTensorType(in_type);
                        if (type == -1) {
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                        }
-
+#endif
                        ret = ml_tensors_info_get_tensor_dimension(mInputInfoHandle, input.second, in_dim);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).",
@@ -401,7 +659,9 @@ namespace MLAPIImpl
 
                        tensor_info.data_type = static_cast<inference_tensor_data_type_e>(type);
                        tensor_info.size = in_size;
-
+#ifdef _DA_RVC_65
+                       tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
+#endif
                        property.layers.insert(std::make_pair(input.first, tensor_info));
 
                        // TODO. Compare tensor info from engine to one from a given property.
@@ -435,11 +695,21 @@ namespace MLAPIImpl
 
                        LOGI("output tensor type = %d", out_type);
 
+#ifdef _DA_RVC_65
+                       int type = 0;
+
+                       try {
+                               type = ConvertTensorTypeToInternal(out_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), out_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+#else
                        int type = ConvertTensorType(out_type);
                        if (type == -1) {
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                        }
-
+#endif
                        ret = ml_tensors_info_get_tensor_dimension(mOutputInfoHandle, output.second, out_dim);
                        if (ret != ML_ERROR_NONE) {
                                LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).",
@@ -475,7 +745,9 @@ namespace MLAPIImpl
 
                        tensor_info.data_type = static_cast<inference_tensor_data_type_e>(type);
                        tensor_info.size = out_size;
-
+#ifdef _DA_RVC_65
+                       tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
+#endif
                        property.layers.insert(std::make_pair(output.first, tensor_info));
 
                        // TODO. Compare tensor info from engine to one from a given property.
@@ -542,7 +814,12 @@ namespace MLAPIImpl
                }
 
                // TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend.
+#ifdef _DA_RVC_65
+               if (mPluginType == INFERENCE_BACKEND_NPU_VIVANTE ||
+                       mPluginType == INFERENCE_BACKEND_NPU_SNPE) {
+#else
                if (mPluginType == INFERENCE_BACKEND_NPU_VIVANTE) {
+#endif
                        capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM;
                } else {
                        capacity->supported_accel_devices = INFERENCE_TARGET_GPU |
@@ -565,6 +842,176 @@ namespace MLAPIImpl
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
+#ifdef _DA_RVC_65
+       int InferenceMLAPI::ConvertTensorTypeToInternal(int tensor_type)
+       {
+               LOGI("ENTER");
+
+               int converted_type = 0;
+
+               switch (tensor_type) {
+               case ML_TENSOR_TYPE_FLOAT32:
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+                       break;
+               case ML_TENSOR_TYPE_UINT8:
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+                       break;
+               case ML_TENSOR_TYPE_UINT16:
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_UINT16;
+                       break;
+               case ML_TENSOR_TYPE_INT64:
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
+                       break;
+               case ML_TENSOR_TYPE_UINT64:
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_UINT64;
+                       break;
+               default:
+                       throw std::invalid_argument("invalid tensor type.");
+               }
+
+               LOGI("LEAVE");
+
+               return converted_type;
+       }
+
+       int InferenceMLAPI::ConvertTensorTypeToMLAPI(int tensor_type)
+       {
+               LOGI("ENTER");
+
+               int converted_type = 0;
+
+               switch (tensor_type) {
+               case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
+                       converted_type = ML_TENSOR_TYPE_FLOAT32;
+                       break;
+               case INFERENCE_TENSOR_DATA_TYPE_UINT8:
+                       converted_type = ML_TENSOR_TYPE_UINT8;
+                       break;
+               case INFERENCE_TENSOR_DATA_TYPE_UINT16:
+                       converted_type = ML_TENSOR_TYPE_UINT16;
+                       break;
+               case INFERENCE_TENSOR_DATA_TYPE_INT64:
+                       converted_type = ML_TENSOR_TYPE_INT64;
+                       break;
+               case INFERENCE_TENSOR_DATA_TYPE_UINT64:
+                       converted_type = ML_TENSOR_TYPE_UINT64;
+                       break;
+               default:
+                       throw std::invalid_argument("invalid tensor type.");
+               }
+
+               LOGI("LEAVE");
+
+               return converted_type;
+       }
+
+       int InferenceMLAPI::UpdateTensorsInfo()
+       {
+               LOGI("ENTER");
+
+               if (!mSingle) {
+                       LOGE("Invalid state, single-shot handle is not initialized.");
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               unsigned int input_tensor_cnt = 0;
+
+               // If user-given input layer information exists then use it.
+               if (!mInputProperty.layers.empty()) {
+                       for (auto& iter : mInputProperty.layers) {
+                               LOGI("index:%d with name %s", input_tensor_cnt, iter.first.c_str());
+                               mDesignated_inputs.insert(std::make_pair(iter.first, input_tensor_cnt));
+                               input_tensor_cnt++;
+                       }
+               // Otherwise, request input layer information to tensor filter.
+               } else {
+                       int ret = ml_tensors_info_get_count(mInputInfoHandle, &input_tensor_cnt);
+                       if (ret != ML_ERROR_NONE || !input_tensor_cnt) {
+                               LOGE("Failed to request ml_tensors_info_get_count(%d).", ret);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       for (unsigned int index = 0; index < input_tensor_cnt; ++index) {
+                               char *in_name = NULL;
+                               ret = ml_tensors_info_get_tensor_name(mInputInfoHandle, index, &in_name);
+                               LOGI("index:%d with name %s", index, in_name);
+                               if (ret != ML_ERROR_NONE) {
+                                       LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", ret);
+                                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                               }
+
+                               if (in_name == NULL)
+                                       continue;
+
+                               mDesignated_inputs.insert(std::make_pair(std::string(in_name), index));
+                               free(in_name);
+                       }
+               }
+
+               LOGI("input tensor count = %u", input_tensor_cnt);
+
+               unsigned int output_tensor_cnt = 0;
+
+               // If user-given output layer information exists then use it.
+               if (!mOutputProperty.layers.empty()) {
+                       int index = 0;
+                       for (auto& iter : mOutputProperty.layers) {
+                               LOGI("index:%d with name %s", index, iter.first.c_str());
+                               mDesignated_outputs.insert(std::make_pair(iter.first, index));
+                               index++;
+                       }
+
+                       output_tensor_cnt = index;
+
+                       for (index = 0; index < output_tensor_cnt; ++index) {
+                               char *out_name = NULL;
+
+                               int ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, index, &out_name);
+                               LOGI("index:%u with name %s", index, out_name);
+                               if (ret != ML_ERROR_NONE) {
+                                       LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
+                                                ret);
+                                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                               }
+
+                               if (out_name == NULL)
+                                       continue;
+
+                               free(out_name);
+                       }
+               // Otherwise, request output layer information to tensor filter.
+               } else {
+                       int ret = ml_tensors_info_get_count(mOutputInfoHandle, &output_tensor_cnt);
+                       if (ret != ML_ERROR_NONE || output_tensor_cnt == 0) {
+                               LOGE("Failed to request ml_tensors_info_get_count(%d).", output_tensor_cnt);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       for (unsigned int index = 0; index < output_tensor_cnt; ++index) {
+                               char *out_name = NULL;
+
+                               ret = ml_tensors_info_get_tensor_name(mOutputInfoHandle, index, &out_name);
+                               LOGI("index:%u with name %s", index, out_name);
+                               if (ret != ML_ERROR_NONE) {
+                                       LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).",
+                                                ret);
+                                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                               }
+
+                               if (out_name == NULL)
+                                       continue;
+
+                               mDesignated_outputs.insert(std::make_pair(std::string(out_name), index));
+                               free(out_name);
+                       }
+               }
+
+               LOGI("output tensor count = %u", output_tensor_cnt);
+
+               LOGI("LEAVE");
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+#else
        int InferenceMLAPI::ConvertTensorType(int tensor_type)
        {
                LOGI("ENTER");
@@ -680,6 +1127,7 @@ namespace MLAPIImpl
                LOGI("LEAVE");
                return INFERENCE_ENGINE_ERROR_NONE;
        }
+#endif
 
        int InferenceMLAPI::Run(
                        std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
index fe39594cc62a6623aec05dd8ac0c0de517d2bbcb..df5b3174f799b1af0d07b6687eb61bb0a01f344f 100644 (file)
@@ -22,6 +22,9 @@
 
 #include <memory>
 #include <dlog.h>
+#ifdef _DA_RVC_65
+#include <tuple>
+#endif
 
 #ifdef LOG_TAG
 #undef LOG_TAG
@@ -78,7 +81,18 @@ namespace MLAPIImpl
                int CheckTensorBuffers(
                                std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
                                std::map<std::string, inference_engine_tensor_buffer> &output_buffers);
+#ifdef _DA_RVC_65
+               int ConvertTensorTypeToInternal(int tensor_type);
+               int ConvertTensorTypeToMLAPI(int tensor_type);
+               int CreateMLAPITensorInfo(ml_tensors_info_h& tensor_info,
+                                                                 inference_engine_layer_property& layer_property);
+               bool IsFileReadable(const std::string& path);
+               std::tuple<ml_nnfw_type_e, ml_nnfw_hw_e> GetNNFWInfo();
+               std::string GetModelPath(const std::vector<std::string>& model_paths);
+               const char* GetCustomProp();
+#else
                int ConvertTensorType(int tensor_type);
+#endif
                int UpdateTensorsInfo();
 
                int mPluginType;