mv_machine_learning: code refactoring
authorInki Dae <inki.dae@samsung.com>
Thu, 26 May 2022 06:22:58 +0000 (15:22 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 20 Jul 2022 05:16:57 +0000 (14:16 +0900)
[Version] : 0.23.2-0
[Issue type] : code refactoring

Did code refactoring like below,
- Move ConfigureBackendType function to Inference class and do not
  keep the backend type in Inference class. Instead, pass the backend
  type through Bind function directly.
- Rename ConfigureBackendType to CheckBackendType. Now this function
  checks if a given backend type is valid or not when Bind function
  is called.
- Add some exception code.

This is just a step for next code refactoring.

Change-Id: Id81ba442656823540c45be00bc398d961662b6b0
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_machine_learning/face_recognition/src/face_recognition.cpp
mv_machine_learning/inference/include/Inference.h
mv_machine_learning/inference/src/Inference.cpp
mv_machine_learning/inference/src/mv_inference_open.cpp
packaging/capi-media-vision.spec

index 8ff447f355cff038142f0363f7c176517078139d..5e6816ace87e9787b8434533a49f07517f6fd5fb 100644 (file)
@@ -223,9 +223,10 @@ int FaceRecognition::Initialize()
 
        // Initialize inference engine object for backbone model.
        _backbone = make_unique<Inference>();
-       _backbone->ConfigureBackendType(_config.backbone_engine_backend_type);
-       _backbone->ConfigureTargetTypes(static_cast<int>(_config.backbone_target_device_type), true);
-       _backbone->Bind();
+
+       int ret = _backbone->Bind(_config.backbone_engine_backend_type, _config.backbone_target_device_type);
+       if (ret != MEDIA_VISION_ERROR_NONE)
+               return ret;
 
        // Tensor order is NCHW.
        vector<model_layer_info>& input_layer_info = GetBackboneInputLayerInfo();
@@ -236,17 +237,21 @@ int FaceRecognition::Initialize()
        _backbone->ConfigureInputInfo(width, height, 1, ch, 127.5f, 127.5f, MV_INFERENCE_DATA_FLOAT32, input_layer_names);
        _backbone->ConfigureOutputInfo(output_layer_names, output_tensor_info);
        _backbone->ConfigureModelFiles("", _face_net_info->GetModelFilePath(), "");
-       _backbone->Load();
+
+       ret = _backbone->Load();
+       if (ret != MEDIA_VISION_ERROR_NONE)
+               return ret;
 
        _training_model = make_unique<SimpleShot>(_config.training_engine_backend_type,
                                                                                                _config.training_target_device_type,
                                                                                                _config.internal_model_file_path);
 
        _internal = make_unique<Inference>();
-       _internal->ConfigureBackendType(_config.inference_engine_backend_type);
-       _internal->ConfigureTargetTypes(_config.inference_target_device_type, true);
-       _internal->ConfigureModelFiles("", _config.internal_model_file_path, "");
-       _internal->Bind();
+
+       ret = _internal->Bind(_config.inference_engine_backend_type, _config.inference_target_device_type);
+       if (ret != MEDIA_VISION_ERROR_NONE)
+               return ret;
+
 
        _initialized = true;
 
@@ -445,6 +450,8 @@ int FaceRecognition::RecognizeFace(mv_source_h img_src, vector<float>& out_vec,
                output_tensor_info[0].shape[0] = _label_manager->GetMaxLabel();
                _internal->ConfigureOutputInfo(output_layers, output_tensor_info);
 
+               _internal->ConfigureModelFiles("", _config.internal_model_file_path, "");
+
                // Load the trained internal model.
                ret = _internal->Load();
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
index cb82af1e64b142d3e4245a92f84231912c77c601..5db209236f029eaae9782d376215f522e92160f2 100644 (file)
@@ -105,8 +105,6 @@ namespace inference
 
                mv_inference_data_type_e mDataType; /**< Data type of a input tensor */
 
-               mv_inference_backend_type_e mBackedType; /**< Backed type of model files */
-
                int mTargetTypes; /**< Target type to run inference */
 
                double mConfidenceThresHold; /**< Confidence threshold value */
@@ -160,13 +158,6 @@ namespace inference
                void ConfigureOutputInfo(std::vector<std::string> names,
                                                                 std::vector<inference_engine_tensor_info>& tensors_info);
 
-               /**
-                * @brief   Configure inference backend type.
-                *
-                * @since_tizen 6.0
-                */
-               int ConfigureBackendType(const mv_inference_backend_type_e backendType);
-
                /**
                 * @brief   Configure a inference target device type such as CPU, GPU or NPU. (only one type can be set)
                 * @details Internally, a given device type will be converted to new type.
@@ -221,7 +212,7 @@ namespace inference
                 * @retval #MEDIA_VISION_ERROR_NONE Successful
                 * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
                 */
-               int Bind();
+               int Bind(int backend_type, int device_type);
 
                /**
                 * @brief   Load model files
@@ -354,6 +345,7 @@ namespace inference
 
        private:
                void CheckSupportedInferenceBackend();
+               int CheckBackendType(const mv_inference_backend_type_e backendType);
                bool IsTargetDeviceSupported(const int targetDevices);
                int ConvertEngineErrorToVisionError(int error);
                int ConvertTargetTypes(int given_types);
index 24b6b4bb161a735c8884210a470758a72c2e9d45..e2c9e1940ac88e1184818adad09f8789b0d15011 100755 (executable)
@@ -52,7 +52,6 @@ namespace inference
                        mWeightFilePath(),
                        mUserFilePath(),
                        mDataType(MV_INFERENCE_DATA_FLOAT32),
-                       mBackedType(MV_INFERENCE_BACKEND_NONE),
                        mTargetTypes(MV_INFERENCE_TARGET_DEVICE_CPU),
                        mConfidenceThresHold(),
                        mMeanValue(),
@@ -505,8 +504,7 @@ namespace inference
                LOGI("LEAVE");
        }
 
-       int Inference::ConfigureBackendType(
-                       const mv_inference_backend_type_e backendType)
+       int Inference::CheckBackendType(const mv_inference_backend_type_e backendType)
        {
                // Check if a given backend type is valid or not.
                if (backendType <= MV_INFERENCE_BACKEND_NONE ||
@@ -524,8 +522,6 @@ namespace inference
 
                LOGI("backend engine : %d", backendType);
 
-               mConfig.mBackedType = backendType;
-
                return MEDIA_VISION_ERROR_NONE;
        }
 
@@ -805,25 +801,23 @@ namespace inference
                return MEDIA_VISION_ERROR_NONE;
        }
 
-       int Inference::Bind(void)
+       int Inference::Bind(int backend_type, int device_type)
        {
                LOGI("ENTER");
 
-               if (mConfig.mBackedType <= MV_INFERENCE_BACKEND_NONE ||
-                       mConfig.mBackedType >= MV_INFERENCE_BACKEND_MAX) {
-                       LOGE("NOT SUPPORTED BACKEND %d", mConfig.mBackedType);
-                       return MEDIA_VISION_ERROR_INVALID_OPERATION;
-               }
+               int ret = CheckBackendType(static_cast<mv_inference_backend_type_e>(backend_type));
+               if (ret != MEDIA_VISION_ERROR_NONE)
+                       return ret;
 
-               std::string backendName = mSupportedInferenceBackend[mConfig.mBackedType].first;
+               std::string backendName = mSupportedInferenceBackend[backend_type].first;
                LOGI("backend string name: %s", backendName.c_str());
 
                inference_engine_config config = {
                        .backend_name = backendName,
-                       .backend_type = mConfig.mBackedType,
+                       .backend_type = backend_type,
                        // As a default, Target device is CPU. If user defined desired device type in json file
                        // then the device type will be set by Load callback.
-                       .target_devices = mConfig.mTargetTypes,
+                       .target_devices = device_type,
                };
 
                // Create a backend class object.
@@ -839,7 +833,7 @@ namespace inference
                        return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
                }
 
-               int ret = MEDIA_VISION_ERROR_NONE;
+               ret = MEDIA_VISION_ERROR_NONE;
 
                // Load configuration file if a given backend type is mlapi.
                if (config.backend_type == MV_INFERENCE_BACKEND_MLAPI) {
@@ -852,7 +846,7 @@ namespace inference
                // Bind a backend library.
                ret = mBackend->BindBackend(&config);
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-                       LOGE("Fail to bind backend library.(%d)", mConfig.mBackedType);
+                       LOGE("Fail to bind backend library.(%d)", ret);
                        return MEDIA_VISION_ERROR_INVALID_OPERATION;
                }
 
index a84151a3818eb7f6cc6e1e84d2cbbb40621bd1c3..fbdb8194be9c856e933d4546053b83a7b8d334fa 100644 (file)
@@ -347,13 +347,6 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
                goto out_of_function;
        }
 
-       ret = pInfer->ConfigureBackendType(
-                       (mv_inference_backend_type_e) backendType);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to configure a backend type.");
-               goto out_of_function;
-       }
-
        ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &targetTypes);
        if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -383,7 +376,7 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
        // Create a inference-engine-common class object and load its corresponding library.
        // Ps. Inference engine gets a capability from a given backend by Bind call
        // so access to mBackendCapacity should be done after Bind.
-       ret = pInfer->Bind();
+       ret = pInfer->Bind(backendType, targetTypes);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to bind a backend engine.");
                goto out_of_function;
index 6c430e25222b2010bcfa38f49cb263c1bdef4956..a772b9d9e459d17f2f364a3793c706012c90266f 100644 (file)
@@ -1,6 +1,6 @@
 Name:        capi-media-vision
 Summary:     Media Vision library for Tizen Native API
-Version:     0.23.1
+Version:     0.23.2
 Release:     0
 Group:       Multimedia/Framework
 License:     Apache-2.0 and BSD-3-Clause