fix NNTrainer inference issue 66/266466/6
authorInki Dae <inki.dae@samsung.com>
Fri, 12 Nov 2021 09:41:41 +0000 (18:41 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 17 Nov 2021 10:57:59 +0000 (19:57 +0900)
[Version] : 0.2.0-0
[Issue type] : bug fix

Fixed NNTrainer inference issue.

NNTrainer backend needs ml tensor info for input and output tensors
when loding a given model. So this patch creates the info before
requesting the model loading, and then sets them ml_single_open function
as its arguments.

Change-Id: I4e444b7b1d87c37249ddf2ac8b7c56aa7119602a
Signed-off-by: Inki Dae <inki.dae@samsung.com>
packaging/inference-engine-mlapi.spec
src/inference_engine_mlapi.cpp
src/inference_engine_mlapi_private.h

index bc4c0e1..717393b 100644 (file)
@@ -1,7 +1,7 @@
 Name:       inference-engine-mlapi
 Summary:    ML Single API backend of NNStreamer for MediaVision
-Version:    0.1.2
-Release:    2
+Version:    0.2.0
+Release:    0
 Group:      Multimedia/Libraries
 License:    Apache-2.0
 ExclusiveArch: %{arm} aarch64
index 0848741..b5e823e 100644 (file)
@@ -137,6 +137,68 @@ namespace MLAPIImpl
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
+       int InferenceMLAPI::CreateMLAPITensorInfo(ml_tensors_info_h& tensor_info,
+                                                                                         inference_engine_layer_property& layer_property)
+       {
+               if (layer_property.layers.empty()) {
+                       LOGE("input or output property is empty.");
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               int err = ml_tensors_info_create(&tensor_info);
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to create tensor info(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               err = ml_tensors_info_set_count(tensor_info, layer_property.layers.size());
+               if (err != ML_ERROR_NONE) {
+                       LOGE("Failed to set tensor count(%d).", err);
+                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+               }
+
+               size_t layer_idx = 0;
+
+               for (auto& iter : layer_property.layers) {
+                       inference_engine_tensor_info& info = iter.second;
+
+                       int tensor_type = 0;
+
+                       try {
+                               tensor_type = ConvertTensorTypeToMLAPI(info.data_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), info.data_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+
+                       err = ml_tensors_info_set_tensor_type(tensor_info, layer_idx, static_cast<ml_tensor_type_e>(tensor_type));
+                       if (err != ML_ERROR_NONE) {
+                               LOGE("Failed to set tensor count(%d).", err);
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       // TODO. nnstreamer needs fixed dimention with 4 for nntrainer tensor filter. Why??
+                       std::vector<unsigned int> indim(4, 1);
+
+                       LOGI("Input tensor(%zu) shape:", layer_idx);
+
+                       std::copy(info.shape.begin(), info.shape.end(), indim.begin());
+
+                       for (auto& shape_value : indim)
+                               LOGI("%u", shape_value);
+
+                       err = ml_tensors_info_set_tensor_dimension(tensor_info, layer_idx, indim.data());
+                       if (err != ML_ERROR_NONE) {
+                               LOGE("Failed to set tensor dimension(%d).", err);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       }
+
+                       layer_idx++;
+               }
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
        int InferenceMLAPI::Load(std::vector<std::string> model_paths,
                                                         inference_model_format_e model_format)
        {
@@ -214,10 +276,21 @@ namespace MLAPIImpl
 
                LOGI("Model name = %s", model_str.c_str());
 
-               // TODO. create ml_tensor_info for input and output tensor and pass
-               //               them as parameters of ml_single_open function.
+               ml_tensors_info_h in_info = NULL, out_info = NULL;
+
+               // In case of nntrainer tensor filter, input and output tensor
+               // informaion is needed to load a given model.
+               if (mPluginType == INFERENCE_BACKEND_NNTRAINER) {
+                       int ret = CreateMLAPITensorInfo(in_info, mInputProperty);
+                       if (ret != INFERENCE_ENGINE_ERROR_NONE)
+                               return ret;
+
+                       ret = CreateMLAPITensorInfo(out_info, mOutputProperty);
+                       if (ret != INFERENCE_ENGINE_ERROR_NONE)
+                               return ret;
+               }
 
-               int err = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL,
+               int err = ml_single_open(&mSingle, model_str.c_str(), in_info, out_info,
                                                                 nnfw_type, nnfw_hw);
                if (err != ML_ERROR_NONE) {
                        LOGE("Failed to request ml_single_open(%d).", err);
@@ -302,9 +375,13 @@ namespace MLAPIImpl
 
                        LOGI("input tensor type = %d", in_type);
 
-                       int type = ConvertTensorType(in_type);
-                       if (type == -1) {
-                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+                       int type = 0;
+
+                       try {
+                               type = ConvertTensorTypeToInternal(in_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), in_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                        }
 
                        in_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
@@ -363,9 +440,13 @@ namespace MLAPIImpl
 
                        LOGI("output tensor type = %d", out_type);
 
-                       int type = ConvertTensorType(out_type);
-                       if (type == -1) {
-                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+                       int type = 0;
+
+                       try {
+                               type = ConvertTensorTypeToInternal(out_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), out_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                        }
 
                        out_buffer.data_type = static_cast<inference_tensor_data_type_e>(type);
@@ -402,9 +483,13 @@ namespace MLAPIImpl
 
                        LOGI("input tensor type = %d", in_type);
 
-                       int type = ConvertTensorType(in_type);
-                       if (type == -1) {
-                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+                       int type = 0;
+
+                       try {
+                               type = ConvertTensorTypeToInternal(in_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), in_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                        }
 
                        ret = ml_tensors_info_get_tensor_dimension(mInputInfoHandle, input.second, in_dim);
@@ -461,9 +546,13 @@ namespace MLAPIImpl
 
                        LOGI("output tensor type = %d", out_type);
 
-                       int type = ConvertTensorType(out_type);
-                       if (type == -1) {
-                               return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+                       int type = 0;
+
+                       try {
+                               type = ConvertTensorTypeToInternal(out_type);
+                       } catch (const std::invalid_argument& ex) {
+                               LOGE("Error (%s) (%d)", ex.what(), out_type);
+                               return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
                        }
 
                        ret = ml_tensors_info_get_tensor_dimension(mOutputInfoHandle, output.second, out_dim);
@@ -591,29 +680,66 @@ namespace MLAPIImpl
                return INFERENCE_ENGINE_ERROR_NONE;
        }
 
-       int InferenceMLAPI::ConvertTensorType(int tensor_type)
+       int InferenceMLAPI::ConvertTensorTypeToInternal(int tensor_type)
        {
                LOGI("ENTER");
 
+               int converted_type = 0;
+
                switch (tensor_type) {
                case ML_TENSOR_TYPE_FLOAT32:
-                       return INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+                       break;
                case ML_TENSOR_TYPE_UINT8:
-                       return INFERENCE_TENSOR_DATA_TYPE_UINT8;
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+                       break;
                case ML_TENSOR_TYPE_UINT16:
-                       return INFERENCE_TENSOR_DATA_TYPE_UINT16;
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_UINT16;
+                       break;
                case ML_TENSOR_TYPE_INT64:
-                       return INFERENCE_TENSOR_DATA_TYPE_INT64;
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_INT64;
+                       break;
                case ML_TENSOR_TYPE_UINT64:
-                       return INFERENCE_TENSOR_DATA_TYPE_UINT64;
+                       converted_type = INFERENCE_TENSOR_DATA_TYPE_UINT64;
+                       break;
                default:
-                       LOGE("Tensor type(%d) is invalid.", tensor_type);
-                       return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+                       throw std::invalid_argument("invalid tensor type.");
+               }
+
+               LOGI("LEAVE");
+
+               return converted_type;
+       }
+
+       int InferenceMLAPI::ConvertTensorTypeToMLAPI(int tensor_type)
+       {
+               LOGI("ENTER");
+
+               int converted_type = 0;
+
+               switch (tensor_type) {
+               case INFERENCE_TENSOR_DATA_TYPE_FLOAT32:
+                       converted_type = ML_TENSOR_TYPE_FLOAT32;
+                       break;
+               case INFERENCE_TENSOR_DATA_TYPE_UINT8:
+                       converted_type = ML_TENSOR_TYPE_UINT8;
+                       break;
+               case INFERENCE_TENSOR_DATA_TYPE_UINT16:
+                       converted_type = ML_TENSOR_TYPE_UINT16;
+                       break;
+               case INFERENCE_TENSOR_DATA_TYPE_INT64:
+                       converted_type = ML_TENSOR_TYPE_INT64;
+                       break;
+               case INFERENCE_TENSOR_DATA_TYPE_UINT64:
+                       converted_type = ML_TENSOR_TYPE_UINT64;
+                       break;
+               default:
+                       throw std::invalid_argument("invalid tensor type.");
                }
 
                LOGI("LEAVE");
 
-               return -1;
+               return converted_type;
        }
 
        int InferenceMLAPI::UpdateTensorsInfo()
index fe39594..d2eacb9 100644 (file)
@@ -78,8 +78,11 @@ namespace MLAPIImpl
                int CheckTensorBuffers(
                                std::map<std::string, inference_engine_tensor_buffer> &input_buffers,
                                std::map<std::string, inference_engine_tensor_buffer> &output_buffers);
-               int ConvertTensorType(int tensor_type);
+               int ConvertTensorTypeToInternal(int tensor_type);
+               int ConvertTensorTypeToMLAPI(int tensor_type);
                int UpdateTensorsInfo();
+               int CreateMLAPITensorInfo(ml_tensors_info_h& tensor_info,
+                                                                 inference_engine_layer_property& layer_property);
 
                int mPluginType;
                int mTargetDevice;